python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Euclidean Knowledge Graph embedding models where embeddings are in real space."""
import numpy as np
import torch
from torch import nn
from models.base import KGModel
from utils.euclidean import euc_sqdistance, givens_rotations, givens_reflection
EUC_MODELS = ["TransE", "CP", "MurE", "RotE", "RefE", "AttE"]
class BaseE(KGModel):
"""Euclidean Knowledge Graph Embedding models.
Attributes:
sim: similarity metric to use (dist for distance and dot for dot product)
"""
def __init__(self, args):
super(BaseE, self).__init__(args.sizes, args.rank, args.dropout, args.gamma, args.dtype, args.bias,
args.init_size)
self.entity.weight.data = self.init_size * torch.randn((self.sizes[0], self.rank), dtype=self.data_type)
self.rel.weight.data = self.init_size * torch.randn((self.sizes[1], self.rank), dtype=self.data_type)
def get_rhs(self, queries, eval_mode):
"""Get embeddings and biases of target entities."""
if eval_mode:
return self.entity.weight, self.bt.weight
else:
return self.entity(queries[:, 2]), self.bt(queries[:, 2])
def similarity_score(self, lhs_e, rhs_e, eval_mode):
"""Compute similarity scores or queries against targets in embedding space."""
if self.sim == "dot":
if eval_mode:
score = lhs_e @ rhs_e.transpose(0, 1)
else:
score = torch.sum(lhs_e * rhs_e, dim=-1, keepdim=True)
else:
score = - euc_sqdistance(lhs_e, rhs_e, eval_mode)
return score
class TransE(BaseE):
"""Euclidean translations https://www.utc.fr/~bordesan/dokuwiki/_media/en/transe_nips13.pdf"""
def __init__(self, args):
super(TransE, self).__init__(args)
self.sim = "dist"
def get_queries(self, queries):
head_e = self.entity(queries[:, 0])
rel_e = self.rel(queries[:, 1])
lhs_e = head_e + rel_e
lhs_biases = self.bh(queries[:, 0])
return lhs_e, lhs_biases
class CP(BaseE):
"""Canonical tensor decomposition https://arxiv.org/pdf/1806.07297.pdf"""
def __init__(self, args):
super(CP, self).__init__(args)
self.sim = "dot"
def get_queries(self, queries: torch.Tensor):
"""Compute embedding and biases of queries."""
return self.entity(queries[:, 0]) * self.rel(queries[:, 1]), self.bh(queries[:, 0])
class MurE(BaseE):
"""Diagonal scaling https://arxiv.org/pdf/1905.09791.pdf"""
def __init__(self, args):
super(MurE, self).__init__(args)
self.rel_diag = nn.Embedding(self.sizes[1], self.rank)
self.rel_diag.weight.data = 2 * torch.rand((self.sizes[1], self.rank), dtype=self.data_type) - 1.0
self.sim = "dist"
def get_queries(self, queries: torch.Tensor):
"""Compute embedding and biases of queries."""
lhs_e = self.rel_diag(queries[:, 1]) * self.entity(queries[:, 0]) + self.rel(queries[:, 1])
lhs_biases = self.bh(queries[:, 0])
return lhs_e, lhs_biases
class RotE(BaseE):
"""Euclidean 2x2 Givens rotations"""
def __init__(self, args):
super(RotE, self).__init__(args)
self.rel_diag = nn.Embedding(self.sizes[1], self.rank)
self.rel_diag.weight.data = 2 * torch.rand((self.sizes[1], self.rank), dtype=self.data_type) - 1.0
self.sim = "dist"
def get_queries(self, queries: torch.Tensor):
"""Compute embedding and biases of queries."""
lhs_e = givens_rotations(self.rel_diag(queries[:, 1]), self.entity(queries[:, 0])) + self.rel(queries[:, 1])
lhs_biases = self.bh(queries[:, 0])
return lhs_e, lhs_biases
class RefE(BaseE):
"""Euclidean 2x2 Givens reflections"""
def __init__(self, args):
super(RefE, self).__init__(args)
self.rel_diag = nn.Embedding(self.sizes[1], self.rank)
self.rel_diag.weight.data = 2 * torch.rand((self.sizes[1], self.rank), dtype=self.data_type) - 1.0
self.sim = "dist"
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
lhs = givens_reflection(self.rel_diag(queries[:, 1]), self.entity(queries[:, 0]))
rel = self.rel(queries[:, 1])
lhs_biases = self.bh(queries[:, 0])
return lhs + rel, lhs_biases
class AttE(BaseE):
"""Euclidean attention model combining translations, reflections and rotations"""
def __init__(self, args):
super(AttE, self).__init__(args)
self.sim = "dist"
# reflection
self.ref = nn.Embedding(self.sizes[1], self.rank)
self.ref.weight.data = 2 * torch.rand((self.sizes[1], self.rank), dtype=self.data_type) - 1.0
# rotation
self.rot = nn.Embedding(self.sizes[1], self.rank)
self.rot.weight.data = 2 * torch.rand((self.sizes[1], self.rank), dtype=self.data_type) - 1.0
# attention
self.context_vec = nn.Embedding(self.sizes[1], self.rank)
self.act = nn.Softmax(dim=1)
self.scale = torch.Tensor([1. / np.sqrt(self.rank)]).cuda()
def get_reflection_queries(self, queries):
lhs_ref_e = givens_reflection(
self.ref(queries[:, 1]), self.entity(queries[:, 0])
)
return lhs_ref_e
def get_rotation_queries(self, queries):
lhs_rot_e = givens_rotations(
self.rot(queries[:, 1]), self.entity(queries[:, 0])
)
return lhs_rot_e
def get_queries(self, queries):
"""Compute embedding and biases of queries."""
lhs_ref_e = self.get_reflection_queries(queries).view((-1, 1, self.rank))
lhs_rot_e = self.get_rotation_queries(queries).view((-1, 1, self.rank))
# self-attention mechanism
cands = torch.cat([lhs_ref_e, lhs_rot_e], dim=1)
context_vec = self.context_vec(queries[:, 1]).view((-1, 1, self.rank))
att_weights = torch.sum(context_vec * cands * self.scale, dim=-1, keepdim=True)
att_weights = self.act(att_weights)
lhs_e = torch.sum(att_weights * cands, dim=1) + self.rel(queries[:, 1])
return lhs_e, self.bh(queries[:, 0])
| KGEmb-master | models/euclidean.py |
"""Knowledge Graph embedding model optimizer."""
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from torch import nn
class KGOptimizer(object):
"""Knowledge Graph embedding model optimizer.
KGOptimizers performs loss computations with negative sampling and gradient descent steps.
Attributes:
model: models.base.KGModel
regularizer: regularizers.Regularizer
optimizer: torch.optim.Optimizer
batch_size: An integer for the training batch size
neg_sample_size: An integer for the number of negative samples
double_neg: A boolean (True to sample both head and tail entities)
"""
def __init__(
self, model, regularizer, optimizer, batch_size, neg_sample_size, double_neg, verbose=True):
"""Inits KGOptimizer."""
self.model = model
self.regularizer = regularizer
self.optimizer = optimizer
self.batch_size = batch_size
self.verbose = verbose
self.double_neg = double_neg
self.loss_fn = nn.CrossEntropyLoss(reduction='mean')
self.neg_sample_size = neg_sample_size
self.n_entities = model.sizes[0]
def reduce_lr(self, factor=0.8):
"""Reduce learning rate.
Args:
factor: float for the learning rate decay
"""
for param_group in self.optimizer.param_groups:
param_group['lr'] *= factor
def get_neg_samples(self, input_batch):
"""Sample negative examples.
Args:
input_batch: torch.LongTensor of shape (batch_size x 3) with ground truth training triples
Returns:
negative_batch: torch.Tensor of shape (neg_sample_size x 3) with negative examples
"""
negative_batch = input_batch.repeat(self.neg_sample_size, 1)
batch_size = input_batch.shape[0]
negsamples = torch.Tensor(np.random.randint(
self.n_entities,
size=batch_size * self.neg_sample_size)
).to(input_batch.dtype)
negative_batch[:, 2] = negsamples
if self.double_neg:
negsamples = torch.Tensor(np.random.randint(
self.n_entities,
size=batch_size * self.neg_sample_size)
).to(input_batch.dtype)
negative_batch[:, 0] = negsamples
return negative_batch
def neg_sampling_loss(self, input_batch):
"""Compute KG embedding loss with negative sampling.
Args:
input_batch: torch.LongTensor of shape (batch_size x 3) with ground truth training triples.
Returns:
loss: torch.Tensor with negative sampling embedding loss
factors: torch.Tensor with embeddings weights to regularize
"""
# positive samples
positive_score, factors = self.model(input_batch)
positive_score = F.logsigmoid(positive_score)
# negative samples
neg_samples = self.get_neg_samples(input_batch)
negative_score, _ = self.model(neg_samples)
negative_score = F.logsigmoid(-negative_score)
loss = - torch.cat([positive_score, negative_score], dim=0).mean()
return loss, factors
def no_neg_sampling_loss(self, input_batch):
"""Compute KG embedding loss without negative sampling.
Args:
input_batch: torch.LongTensor of shape (batch_size x 3) with ground truth training triples
Returns:
loss: torch.Tensor with embedding loss
factors: torch.Tensor with embeddings weights to regularize
"""
predictions, factors = self.model(input_batch, eval_mode=True)
truth = input_batch[:, 2]
log_prob = F.logsigmoid(-predictions)
idx = torch.arange(0, truth.shape[0], dtype=truth.dtype)
pos_scores = F.logsigmoid(predictions[idx, truth]) - F.logsigmoid(-predictions[idx, truth])
log_prob[idx, truth] += pos_scores
loss = - log_prob.mean()
loss += self.regularizer.forward(factors)
return loss, factors
def calculate_loss(self, input_batch):
"""Compute KG embedding loss and regularization loss.
Args:
input_batch: torch.LongTensor of shape (batch_size x 3) with ground truth training triples
Returns:
loss: torch.Tensor with embedding loss and regularization loss
"""
if self.neg_sample_size > 0:
loss, factors = self.neg_sampling_loss(input_batch)
else:
predictions, factors = self.model(input_batch, eval_mode=True)
truth = input_batch[:, 2]
loss = self.loss_fn(predictions, truth)
# loss, factors = self.no_neg_sampling_loss(input_batch)
# regularization loss
loss += self.regularizer.forward(factors)
return loss
def calculate_valid_loss(self, examples):
"""Compute KG embedding loss over validation examples.
Args:
examples: torch.LongTensor of shape (N_valid x 3) with validation triples
Returns:
loss: torch.Tensor with loss averaged over all validation examples
"""
b_begin = 0
loss = 0.0
counter = 0
with torch.no_grad():
while b_begin < examples.shape[0]:
input_batch = examples[
b_begin:b_begin + self.batch_size
].cuda()
b_begin += self.batch_size
loss += self.calculate_loss(input_batch)
counter += 1
loss /= counter
return loss
def epoch(self, examples):
"""Runs one epoch of training KG embedding model.
Args:
examples: torch.LongTensor of shape (N_train x 3) with training triples
Returns:
loss: torch.Tensor with loss averaged over all training examples
"""
actual_examples = examples[torch.randperm(examples.shape[0]), :]
with tqdm.tqdm(total=examples.shape[0], unit='ex', disable=not self.verbose) as bar:
bar.set_description(f'train loss')
b_begin = 0
total_loss = 0.0
counter = 0
while b_begin < examples.shape[0]:
input_batch = actual_examples[
b_begin:b_begin + self.batch_size
].cuda()
# gradient step
l = self.calculate_loss(input_batch)
self.optimizer.zero_grad()
l.backward()
self.optimizer.step()
b_begin += self.batch_size
total_loss += l
counter += 1
bar.update(input_batch.shape[0])
bar.set_postfix(loss=f'{l.item():.4f}')
total_loss /= counter
return total_loss
| KGEmb-master | optimizers/kg_optimizer.py |
from .kg_optimizer import KGOptimizer
from .regularizers import *
| KGEmb-master | optimizers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from abc import ABC, abstractmethod
from typing import Tuple
import torch
from torch import nn
class Regularizer(nn.Module, ABC):
@abstractmethod
def forward(self, factors: Tuple[torch.Tensor]):
pass
class F2(Regularizer):
def __init__(self, weight: float):
super(F2, self).__init__()
self.weight = weight
def forward(self, factors):
norm = 0
for f in factors:
norm += self.weight * torch.sum(f ** 2)
return norm / factors[0].shape[0]
class N3(Regularizer):
def __init__(self, weight: float):
super(N3, self).__init__()
self.weight = weight
def forward(self, factors):
"""Regularized complex embeddings https://arxiv.org/pdf/1806.07297.pdf"""
norm = 0
for f in factors:
norm += self.weight * torch.sum(
torch.abs(f) ** 3
)
return norm / factors[0].shape[0]
| KGEmb-master | optimizers/regularizers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup, launch
from adapteacher import add_ateacher_config
from adapteacher.engine.trainer import ATeacherTrainer, BaselineTrainer
# hacky way to register
from adapteacher.modeling.meta_arch.rcnn import TwoStagePseudoLabGeneralizedRCNN, DAobjTwoStagePseudoLabGeneralizedRCNN
from adapteacher.modeling.meta_arch.vgg import build_vgg_backbone # noqa
from adapteacher.modeling.proposal_generator.rpn import PseudoLabRPN
from adapteacher.modeling.roi_heads.roi_heads import StandardROIHeadsPseudoLab
import adapteacher.data.datasets.builtin
from adapteacher.modeling.meta_arch.ts_ensemble import EnsembleTSModel
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_ateacher_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if cfg.SEMISUPNET.Trainer == "ateacher":
Trainer = ATeacherTrainer
elif cfg.SEMISUPNET.Trainer == "baseline":
Trainer = BaselineTrainer
else:
raise ValueError("Trainer Name is not found.")
if args.eval_only:
if cfg.SEMISUPNET.Trainer == "ateacher":
model = Trainer.build_model(cfg)
model_teacher = Trainer.build_model(cfg)
ensem_ts_model = EnsembleTSModel(model_teacher, model)
DetectionCheckpointer(
ensem_ts_model, save_dir=cfg.OUTPUT_DIR
).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, ensem_ts_model.modelTeacher)
else:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| adaptive_teacher-main | train_net.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_ateacher_config(cfg):
"""
Add config for semisupnet.
"""
_C = cfg
_C.TEST.VAL_LOSS = True
_C.MODEL.RPN.UNSUP_LOSS_WEIGHT = 1.0
_C.MODEL.RPN.LOSS = "CrossEntropy"
_C.MODEL.ROI_HEADS.LOSS = "CrossEntropy"
_C.SOLVER.IMG_PER_BATCH_LABEL = 1
_C.SOLVER.IMG_PER_BATCH_UNLABEL = 1
_C.SOLVER.FACTOR_LIST = (1,)
_C.DATASETS.TRAIN_LABEL = ("coco_2017_train",)
_C.DATASETS.TRAIN_UNLABEL = ("coco_2017_train",)
_C.DATASETS.CROSS_DATASET = True
_C.TEST.EVALUATOR = "COCOeval"
_C.SEMISUPNET = CN()
# Output dimension of the MLP projector after `res5` block
_C.SEMISUPNET.MLP_DIM = 128
# Semi-supervised training
_C.SEMISUPNET.Trainer = "ateacher"
_C.SEMISUPNET.BBOX_THRESHOLD = 0.7
_C.SEMISUPNET.PSEUDO_BBOX_SAMPLE = "thresholding"
_C.SEMISUPNET.TEACHER_UPDATE_ITER = 1
_C.SEMISUPNET.BURN_UP_STEP = 12000
_C.SEMISUPNET.EMA_KEEP_RATE = 0.0
_C.SEMISUPNET.UNSUP_LOSS_WEIGHT = 4.0
_C.SEMISUPNET.SUP_LOSS_WEIGHT = 0.5
_C.SEMISUPNET.LOSS_WEIGHT_TYPE = "standard"
_C.SEMISUPNET.DIS_TYPE = "res4"
_C.SEMISUPNET.DIS_LOSS_WEIGHT = 0.1
# dataloader
# supervision level
_C.DATALOADER.SUP_PERCENT = 100.0 # 5 = 5% dataset as labeled set
_C.DATALOADER.RANDOM_DATA_SEED = 0 # random seed to read data
_C.DATALOADER.RANDOM_DATA_SEED_PATH = "dataseed/COCO_supervision.txt"
_C.EMAMODEL = CN()
_C.EMAMODEL.SUP_CONSIST = True
| adaptive_teacher-main | adapteacher/config.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_ateacher_config
| adaptive_teacher-main | adapteacher/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
from detectron2.checkpoint import DetectionCheckpointer
# for load_student_model
from typing import Any
from fvcore.common.checkpoint import _strip_prefix_if_present, _IncompatibleKeys
class DetectionTSCheckpointer(DetectionCheckpointer):
def _load_model(self, checkpoint):
if checkpoint.get("__author__", None) == "Caffe2":
# pretrained model weight: only update student model
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.modelStudent.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
incompatible = self._load_student_model(checkpoint)
model_buffers = dict(self.model.modelStudent.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
else: # whole model
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
def _load_student_model(self, checkpoint: Any) -> _IncompatibleKeys: # pyre-ignore
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# work around https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.modelStudent.state_dict()
incorrect_shapes = []
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
# pyre-ignore
incompatible = self.model.modelStudent.load_state_dict(
checkpoint_state_dict, strict=False
)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
# class DetectionCheckpointer(Checkpointer):
# """
# Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2
# model zoo, and apply conversions for legacy models.
# """
# def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
# is_main_process = comm.is_main_process()
# super().__init__(
# model,
# save_dir,
# save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
# **checkpointables,
# )
# def _load_file(self, filename):
# if filename.endswith(".pkl"):
# with PathManager.open(filename, "rb") as f:
# data = pickle.load(f, encoding="latin1")
# if "model" in data and "__author__" in data:
# # file is in Detectron2 model zoo format
# self.logger.info("Reading a file from '{}'".format(data["__author__"]))
# return data
# else:
# # assume file is from Caffe2 / Detectron1 model zoo
# if "blobs" in data:
# # Detection models have "blobs", but ImageNet models don't
# data = data["blobs"]
# data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
# return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
# loaded = super()._load_file(filename) # load native pth checkpoint
# if "model" not in loaded:
# loaded = {"model": loaded}
# return loaded
# def _load_model(self, checkpoint):
# if checkpoint.get("matching_heuristics", False):
# self._convert_ndarray_to_tensor(checkpoint["model"])
# # convert weights by name-matching heuristics
# model_state_dict = self.model.state_dict()
# align_and_update_state_dicts(
# model_state_dict,
# checkpoint["model"],
# c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
# )
# checkpoint["model"] = model_state_dict
# # for non-caffe2 models, use standard ways to load it
# super()._load_model(checkpoint) | adaptive_teacher-main | adapteacher/checkpoint/detection_checkpoint.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detectron2.config import CfgNode
from detectron2.solver.lr_scheduler import WarmupCosineLR, WarmupMultiStepLR
from .lr_scheduler import WarmupTwoStageMultiStepLR
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupCosineLR":
return WarmupCosineLR(
optimizer,
cfg.SOLVER.MAX_ITER,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupTwoStageMultiStepLR":
return WarmupTwoStageMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
factor_list=cfg.SOLVER.FACTOR_LIST,
gamma=cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
| adaptive_teacher-main | adapteacher/solver/build.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from bisect import bisect_right
from typing import List
import torch
from detectron2.solver.lr_scheduler import _get_warmup_factor_at_iter
class WarmupTwoStageMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
factor_list: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if len(milestones) + 1 != len(factor_list):
raise ValueError("Length of milestones should match length of factor_list.")
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.factor_list = factor_list
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr
* warmup_factor
* self.factor_list[bisect_right(self.milestones, self.last_epoch)]
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
| adaptive_teacher-main | adapteacher/solver/lr_scheduler.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.config import configurable
# from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
# from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
import logging
from typing import Dict, Tuple, List, Optional
from collections import OrderedDict
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.backbone import build_backbone, Backbone
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.utils.events import get_event_storage
from detectron2.structures import ImageList
############### Image discriminator ##############
class FCDiscriminator_img(nn.Module):
def __init__(self, num_classes, ndf1=256, ndf2=128):
super(FCDiscriminator_img, self).__init__()
self.conv1 = nn.Conv2d(num_classes, ndf1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(ndf1, ndf2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(ndf2, ndf2, kernel_size=3, padding=1)
self.classifier = nn.Conv2d(ndf2, 1, kernel_size=3, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.classifier(x)
return x
#################################
################ Gradient reverse function
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
#######################
@META_ARCH_REGISTRY.register()
class DAobjTwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
dis_type: str,
# dis_loss_weight: float = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super(GeneralizedRCNN, self).__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
# @yujheli: you may need to build your discriminator here
self.dis_type = dis_type
self.D_img = None
# self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels['res4']) # Need to know the channel
# self.D_img = None
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]) # Need to know the channel
# self.bceLoss_func = nn.BCEWithLogitsLoss()
def build_discriminator(self):
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]).to(self.device) # Need to know the channel
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"dis_type": cfg.SEMISUPNET.DIS_TYPE,
# "dis_loss_ratio": cfg.xxx,
}
def preprocess_image_train(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images_t = [x["image_unlabeled"].to(self.device) for x in batched_inputs]
images_t = [(x - self.pixel_mean) / self.pixel_std for x in images_t]
images_t = ImageList.from_tensors(images_t, self.backbone.size_divisibility)
return images, images_t
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if self.D_img == None:
self.build_discriminator()
if (not self.training) and (not val_mode): # only conduct when testing mode
return self.inference(batched_inputs)
source_label = 0
target_label = 1
if branch == "domain":
# self.D_img.train()
# source_label = 0
# target_label = 1
# images = self.preprocess_image(batched_inputs)
images_s, images_t = self.preprocess_image_train(batched_inputs)
features = self.backbone(images_s.tensor)
# import pdb
# pdb.set_trace()
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
features_t = self.backbone(images_t.tensor)
features_t = grad_reverse(features_t[self.dis_type])
# features_t = grad_reverse(features_t['p2'])
D_img_out_t = self.D_img(features_t)
loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# import pdb
# pdb.set_trace()
losses = {}
losses["loss_D_img_s"] = loss_D_img_s
losses["loss_D_img_t"] = loss_D_img_t
return losses, [], [], None
# self.D_img.eval()
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
# TODO: remove the usage of if else here. This needs to be re-organized
if branch == "supervised":
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
losses["loss_D_img_s"] = loss_D_img_s*0.001
return losses, [], [], None
elif branch == "supervised_target":
# features_t = grad_reverse(features_t[self.dis_type])
# D_img_out_t = self.D_img(features_t)
# loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
# losses["loss_D_img_t"] = loss_D_img_t*0.001
# losses["loss_D_img_s"] = loss_D_img_s*0.001
return losses, [], [], None
elif branch == "unsup_data_weak":
"""
unsupervised weak branch: input image without any ground-truth label; output proposals of rpn and roi-head
"""
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production)
# notice that we do not use any target in ROI head to do inference!
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
# if self.vis_period > 0:
# storage = get_event_storage()
# if storage.iter % self.vis_period == 0:
# self.visualize_training(batched_inputs, proposals_rpn, branch)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "unsup_data_strong":
raise NotImplementedError()
elif branch == "val_loss":
raise NotImplementedError()
def visualize_training(self, batched_inputs, proposals, branch=""):
"""
This function different from the original one:
- it adds "branch" to the `vis_name`.
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
"Left: GT bounding boxes "
+ branch
+ "; Right: Predicted proposals "
+ branch
)
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
@META_ARCH_REGISTRY.register()
class TwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
if (not self.training) and (not val_mode):
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if branch == "supervised":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# # roi_head lower branch
_, detector_losses = self.roi_heads(
images, features, proposals_rpn, gt_instances, branch=branch
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsup_data_weak":
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production) # notice that we do not use any target in ROI head to do inference !
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "val_loss":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances, compute_val_loss=True
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
gt_instances,
branch=branch,
compute_val_loss=True,
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
| adaptive_teacher-main | adapteacher/modeling/meta_arch/rcnn.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.nn as nn
import copy
import torch
from typing import Union, List, Dict, Any, cast
from detectron2.modeling.backbone import (
ResNet,
Backbone,
build_resnet_backbone,
BACKBONE_REGISTRY
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class vgg_backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super().__init__()
self.vgg = make_layers(cfgs['vgg16'],batch_norm=True)
self._initialize_weights()
# self.stage_names_index = {'vgg1':3, 'vgg2':8 , 'vgg3':15, 'vgg4':22, 'vgg5':29}
_out_feature_channels = [64, 128, 256, 512, 512]
_out_feature_strides = [2, 4, 8, 16, 32]
# stages, shape_specs = build_fbnet(
# cfg,
# name="trunk",
# in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
# )
# nn.Sequential(*list(self.vgg.features._modules.values())[:14])
self.stages = [nn.Sequential(*list(self.vgg._modules.values())[0:7]),\
nn.Sequential(*list(self.vgg._modules.values())[7:14]),\
nn.Sequential(*list(self.vgg._modules.values())[14:24]),\
nn.Sequential(*list(self.vgg._modules.values())[24:34]),\
nn.Sequential(*list(self.vgg._modules.values())[34:]),]
self._out_feature_channels = {}
self._out_feature_strides = {}
self._stage_names = []
for i, stage in enumerate(self.stages):
name = "vgg{}".format(i)
self.add_module(name, stage)
self._stage_names.append(name)
self._out_feature_channels[name] = _out_feature_channels[i]
self._out_feature_strides[name] = _out_feature_strides[i]
self._out_features = self._stage_names
del self.vgg
def forward(self, x):
features = {}
for name, stage in zip(self._stage_names, self.stages):
x = stage(x)
# if name in self._out_features:
# outputs[name] = x
features[name] = x
# import pdb
# pdb.set_trace()
return features
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_backbone(cfg, _):
return vgg_backbone(cfg)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_fpn_backbone(cfg, _):
# backbone = FPN(
# bottom_up=build_vgg_backbone(cfg),
# in_features=cfg.MODEL.FPN.IN_FEATURES,
# out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
# norm=cfg.MODEL.FPN.NORM,
# top_block=LastLevelMaxPool(),
# )
bottom_up = vgg_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
# fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
# return backbone
return backbone
| adaptive_teacher-main | adapteacher/modeling/meta_arch/vgg.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch.nn.parallel import DataParallel, DistributedDataParallel
import torch.nn as nn
class EnsembleTSModel(nn.Module):
def __init__(self, modelTeacher, modelStudent):
super(EnsembleTSModel, self).__init__()
if isinstance(modelTeacher, (DistributedDataParallel, DataParallel)):
modelTeacher = modelTeacher.module
if isinstance(modelStudent, (DistributedDataParallel, DataParallel)):
modelStudent = modelStudent.module
self.modelTeacher = modelTeacher
self.modelStudent = modelStudent | adaptive_teacher-main | adapteacher/modeling/meta_arch/ts_ensemble.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Optional
import torch
from detectron2.structures import ImageList, Instances
from detectron2.modeling.proposal_generator import RPN
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
@PROPOSAL_GENERATOR_REGISTRY.register()
class PseudoLabRPN(RPN):
"""
Region Proposal Network, introduced by :paper:`Faster R-CNN`.
"""
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
gt_instances: Optional[Instances] = None,
compute_loss: bool = True,
compute_val_loss: bool = False,
):
features = [features[f] for f in self.in_features]
anchors = self.anchor_generator(features)
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
pred_objectness_logits = [
# (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score.permute(0, 2, 3, 1).flatten(1)
for score in pred_objectness_logits
]
pred_anchor_deltas = [
# (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B)
x.view(
x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]
)
.permute(0, 3, 4, 1, 2)
.flatten(1, -2)
for x in pred_anchor_deltas
]
if (self.training and compute_loss) or compute_val_loss:
gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances)
losses = self.losses(
anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes
)
losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
else: # inference
losses = {}
proposals = self.predict_proposals(
anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes
)
return proposals, losses | adaptive_teacher-main | adapteacher/modeling/proposal_generator/rpn.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.modeling.roi_heads.fast_rcnn import (
FastRCNNOutputLayers,
FastRCNNOutputs,
)
# focal loss
class FastRCNNFocaltLossOutputLayers(FastRCNNOutputLayers):
def __init__(self, cfg, input_shape):
super(FastRCNNFocaltLossOutputLayers, self).__init__(cfg, input_shape)
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
def losses(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
"""
scores, proposal_deltas = predictions
losses = FastRCNNFocalLoss(
self.box2box_transform,
scores,
proposal_deltas,
proposals,
self.smooth_l1_beta,
self.box_reg_loss_type,
num_classes=self.num_classes,
).losses()
return losses
class FastRCNNFocalLoss(FastRCNNOutputs):
"""
A class that stores information about outputs of a Fast R-CNN head.
It provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
num_classes=80,
):
super(FastRCNNFocalLoss, self).__init__(
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta,
box_reg_loss_type,
)
self.num_classes = num_classes
def losses(self):
return {
"loss_cls": self.comput_focal_loss(),
"loss_box_reg": self.box_reg_loss(),
}
def comput_focal_loss(self):
if self._no_instances:
return 0.0 * self.pred_class_logits.sum()
else:
FC_loss = FocalLoss(
gamma=1.5,
num_classes=self.num_classes,
)
total_loss = FC_loss(input=self.pred_class_logits, target=self.gt_classes)
total_loss = total_loss / self.gt_classes.shape[0]
return total_loss
class FocalLoss(nn.Module):
def __init__(
self,
weight=None,
gamma=1.0,
num_classes=80,
):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
def forward(self, input, target):
# focal loss
CE = F.cross_entropy(input, target, reduction="none")
p = torch.exp(-CE)
loss = (1 - p) ** self.gamma * CE
return loss.sum()
| adaptive_teacher-main | adapteacher/modeling/roi_heads/fast_rcnn.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from typing import Dict, List, Optional, Tuple, Union
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.modeling.proposal_generator.proposal_utils import (
add_ground_truth_to_proposals,
)
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import (
ROI_HEADS_REGISTRY,
StandardROIHeads,
)
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from adapteacher.modeling.roi_heads.fast_rcnn import FastRCNNFocaltLossOutputLayers
import numpy as np
from detectron2.modeling.poolers import ROIPooler
@ROI_HEADS_REGISTRY.register()
class StandardROIHeadsPseudoLab(StandardROIHeads):
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
box_head = build_box_head(
cfg,
ShapeSpec(
channels=in_channels, height=pooler_resolution, width=pooler_resolution
),
)
if cfg.MODEL.ROI_HEADS.LOSS == "CrossEntropy":
box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape)
elif cfg.MODEL.ROI_HEADS.LOSS == "FocalLoss":
box_predictor = FastRCNNFocaltLossOutputLayers(cfg, box_head.output_shape)
else:
raise ValueError("Unknown ROI head loss.")
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
compute_loss=True,
branch="",
compute_val_loss=False,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
del images
if self.training and compute_loss: # apply if training loss
assert targets
# 1000 --> 512
proposals = self.label_and_sample_proposals(
proposals, targets, branch=branch
)
elif compute_val_loss: # apply if val loss
assert targets
# 1000 --> 512
temp_proposal_append_gt = self.proposal_append_gt
self.proposal_append_gt = False
proposals = self.label_and_sample_proposals(
proposals, targets, branch=branch
) # do not apply target on proposals
self.proposal_append_gt = temp_proposal_append_gt
del targets
if (self.training and compute_loss) or compute_val_loss:
losses, _ = self._forward_box(
features, proposals, compute_loss, compute_val_loss, branch
)
return proposals, losses
else:
pred_instances, predictions = self._forward_box(
features, proposals, compute_loss, compute_val_loss, branch
)
return pred_instances, predictions
def _forward_box(
self,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
compute_loss: bool = True,
compute_val_loss: bool = False,
branch: str = "",
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if (
self.training and compute_loss
) or compute_val_loss: # apply if training loss or val loss
losses = self.box_predictor.losses(predictions, proposals)
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses, predictions
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances, predictions
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances], branch: str = ""
) -> List[Instances]:
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(
trg_name
):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
storage = get_event_storage()
storage.put_scalar(
"roi_head/num_target_fg_samples_" + branch, np.mean(num_fg_samples)
)
storage.put_scalar(
"roi_head/num_target_bg_samples_" + branch, np.mean(num_bg_samples)
)
return proposals_with_gt
| adaptive_teacher-main | adapteacher/modeling/roi_heads/roi_heads.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .coco_evaluation import COCOEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
# __all__ = [k for k in globals().keys() if not k.startswith("_")]
__all__ = [
"COCOEvaluator",
"PascalVOCDetectionEvaluator"
]
| adaptive_teacher-main | adapteacher/evaluation/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation import DatasetEvaluator
from iopath.common.file_io import file_lock
logger = logging.getLogger(__name__)
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatasetCatalog and in detectron2's standard format
output_file: path of json file that will be saved to
allow_cached: if json file is already present then skip conversion
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
PathManager.mkdirs(os.path.dirname(output_file))
with file_lock(output_file):
if PathManager.exists(output_file) and allow_cached:
logger.warning(
f"Using previously cached COCO format annotations at '{output_file}'. "
"You need to clear the cache file if your dataset has been modified."
)
else:
logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
logger.info(f"Caching COCO format annotations at '{output_file}' ...")
tmp_file = output_file #+ ".tmp"
# with PathManager.open(tmp_file, "w") as f:
# json.dump(coco_dict, f)
# shutil.move(tmp_file, output_file)
with PathManager.open(tmp_file, "w") as f:
json.dump(coco_dict, f)
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
kpt_oks_sigmas=(),
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.info(
f"'{dataset_name}' is not registered by `register_coco_instances`."
" Therefore trying to convert it to COCO format ..."
)
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
results_per_category_AP50 = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
t = np.where(.5 == coco_eval.params.iouThrs)[0]
precisions_50 = precisions[t]
precisions_50 = precisions_50[:, :, idx, 0, -1]
precisions_50 = precisions_50[precisions_50 > -1]
ap = np.mean(precisions_50) if precisions_50.size else float("nan")
results_per_category_AP50.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category_AP50) * 2)
results_flatten = list(itertools.chain(*results_per_category_AP50))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP50"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP50: \n".format(iou_type) + table)
results.update({"AP50-" + name: ap for name, ap in results_per_category_AP50})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
| adaptive_teacher-main | adapteacher/evaluation/coco_evaluation.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import DatasetEvaluator
class PascalVOCDetectionEvaluator(DatasetEvaluator):
"""
Evaluate Pascal VOC style AP for Pascal VOC dataset.
It contains a synchronization, therefore has to be called from all ranks.
Note that the concept of AP can be implemented in different ways and may not
produce identical results. This class mimics the implementation of the official
Pascal VOC Matlab API, and should produce similar but not identical results to the
official API.
"""
def __init__(self, dataset_name, target_classnames=None):
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
"""
self._dataset_name = dataset_name
meta = MetadataCatalog.get(dataset_name)
# Too many tiny files, download all to local for speed.
annotation_dir_local = PathManager.get_local_path(
os.path.join(meta.dirname, "Annotations/")
)
self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
self._class_names = meta.thing_classes
assert meta.year in [2007, 2012], meta.year
self._is_2007 = meta.year == 2007
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
if target_classnames == None:
self.target_classnames = self._class_names
else:
self.target_classnames = target_classnames
def reset(self):
self._predictions = defaultdict(list) # class name -> list of prediction strings
def process(self, inputs, outputs):
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
instances = output["instances"].to(self._cpu_device)
boxes = instances.pred_boxes.tensor.numpy()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
for box, score, cls in zip(boxes, scores, classes):
xmin, ymin, xmax, ymax = box
# The inverse of data loading logic in `datasets/pascal_voc.py`
xmin += 1
ymin += 1
self._predictions[cls].append(
f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
"""
all_predictions = comm.gather(self._predictions, dst=0)
if not comm.is_main_process():
return
predictions = defaultdict(list)
for predictions_per_rank in all_predictions:
for clsid, lines in predictions_per_rank.items():
predictions[clsid].extend(lines)
del all_predictions
self._logger.info(
"Evaluating {} using {} metric. "
"Note that results do not use the official Matlab API.".format(
self._dataset_name, 2007 if self._is_2007 else 2012
)
)
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
res_file_template = os.path.join(dirname, "{}.txt")
aps = defaultdict(list) # iou -> ap per class
for cls_id, cls_name in enumerate(self._class_names):
if cls_name not in self.target_classnames:
continue
lines = predictions.get(cls_id, [""])
with open(res_file_template.format(cls_name), "w") as f:
f.write("\n".join(lines))
for thresh in range(50, 100, 5):
rec, prec, ap = voc_eval(
res_file_template,
self._anno_file_template,
self._image_set_path,
cls_name,
ovthresh=thresh / 100.0,
use_07_metric=self._is_2007,
)
aps[thresh].append(ap * 100)
ret = OrderedDict()
mAP = {iou: np.mean(x) for iou, x in aps.items()}
ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
#Add the codes for AP50
for idx, name in enumerate(self.target_classnames):
ret["bbox"].update({"AP50-" + name: aps[50][idx]})
return ret
##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
@lru_cache(maxsize=None)
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| adaptive_teacher-main | adapteacher/evaluation/pascal_voc_evaluation.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
import operator
import json
import torch.utils.data
from detectron2.utils.comm import get_world_size
from detectron2.data.common import (
DatasetFromList,
MapDataset,
)
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import (
InferenceSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.data.build import (
trivial_batch_collator,
worker_init_reset_seed,
get_detection_dataset_dicts,
build_batch_data_loader,
)
from adapteacher.data.common import (
AspectRatioGroupedSemiSupDatasetTwoCrop,
)
"""
This file contains the default logic to build a dataloader for training or testing.
"""
def divide_label_unlabel(
dataset_dicts, SupPercent, random_data_seed, random_data_seed_path
):
num_all = len(dataset_dicts)
num_label = int(SupPercent / 100.0 * num_all)
# read from pre-generated data seed
with open(random_data_seed_path) as COCO_sup_file:
coco_random_idx = json.load(COCO_sup_file)
labeled_idx = np.array(coco_random_idx[str(SupPercent)][str(random_data_seed)])
assert labeled_idx.shape[0] == num_label, "Number of READ_DATA is mismatched."
label_dicts = []
unlabel_dicts = []
labeled_idx = set(labeled_idx)
for i in range(len(dataset_dicts)):
if i in labeled_idx:
label_dicts.append(dataset_dicts[i])
else:
unlabel_dicts.append(dataset_dicts[i])
return label_dicts, unlabel_dicts
# uesed by supervised-only baseline trainer
def build_detection_semisup_train_loader(cfg, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
# Divide into labeled and unlabeled sets according to supervision percentage
label_dicts, unlabel_dicts = divide_label_unlabel(
dataset_dicts,
cfg.DATALOADER.SUP_PERCENT,
cfg.DATALOADER.RANDOM_DATA_SEED,
cfg.DATALOADER.RANDOM_DATA_SEED_PATH,
)
dataset = DatasetFromList(label_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = (
RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
label_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
# list num of labeled and unlabeled
logger.info("Number of training samples " + str(len(dataset)))
logger.info("Supervision percentage " + str(cfg.DATALOADER.SUP_PERCENT))
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# uesed by evaluation
def build_detection_test_loader(cfg, dataset_name, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[
list(cfg.DATASETS.TEST).index(dataset_name)
]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
# uesed by unbiased teacher trainer
def build_detection_semisup_train_loader_two_crops(cfg, mapper=None):
if cfg.DATASETS.CROSS_DATASET: # cross-dataset (e.g., coco-additional)
label_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN_LABEL,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
unlabel_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN_UNLABEL,
filter_empty=False,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
else: # different degree of supervision (e.g., COCO-supervision)
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
# Divide into labeled and unlabeled sets according to supervision percentage
label_dicts, unlabel_dicts = divide_label_unlabel(
dataset_dicts,
cfg.DATALOADER.SUP_PERCENT,
cfg.DATALOADER.RANDOM_DATA_SEED,
cfg.DATALOADER.RANDOM_DATA_SEED_PATH,
)
label_dataset = DatasetFromList(label_dicts, copy=False)
# exclude the labeled set from unlabeled dataset
unlabel_dataset = DatasetFromList(unlabel_dicts, copy=False)
# include the labeled set in unlabel dataset
# unlabel_dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
label_dataset = MapDataset(label_dataset, mapper)
unlabel_dataset = MapDataset(unlabel_dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
label_sampler = TrainingSampler(len(label_dataset))
unlabel_sampler = TrainingSampler(len(unlabel_dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
raise NotImplementedError("{} not yet supported.".format(sampler_name))
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return build_semisup_batch_data_loader_two_crop(
(label_dataset, unlabel_dataset),
(label_sampler, unlabel_sampler),
cfg.SOLVER.IMG_PER_BATCH_LABEL,
cfg.SOLVER.IMG_PER_BATCH_UNLABEL,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# batch data loader
def build_semisup_batch_data_loader_two_crop(
dataset,
sampler,
total_batch_size_label,
total_batch_size_unlabel,
*,
aspect_ratio_grouping=False,
num_workers=0
):
world_size = get_world_size()
assert (
total_batch_size_label > 0 and total_batch_size_label % world_size == 0
), "Total label batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size_label, world_size
)
assert (
total_batch_size_unlabel > 0 and total_batch_size_unlabel % world_size == 0
), "Total unlabel batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size_label, world_size
)
batch_size_label = total_batch_size_label // world_size
batch_size_unlabel = total_batch_size_unlabel // world_size
label_dataset, unlabel_dataset = dataset
label_sampler, unlabel_sampler = sampler
if aspect_ratio_grouping:
label_data_loader = torch.utils.data.DataLoader(
label_dataset,
sampler=label_sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
unlabel_data_loader = torch.utils.data.DataLoader(
unlabel_dataset,
sampler=unlabel_sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
return AspectRatioGroupedSemiSupDatasetTwoCrop(
(label_data_loader, unlabel_data_loader),
(batch_size_label, batch_size_unlabel),
)
else:
raise NotImplementedError("ASPECT_RATIO_GROUPING = False is not supported yet") | adaptive_teacher-main | adapteacher/data/build.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .build import (
build_detection_test_loader,
build_detection_semisup_train_loader,
)
| adaptive_teacher-main | adapteacher/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import torchvision.transforms as transforms
from adapteacher.data.transforms.augmentation_impl import (
GaussianBlur,
)
def build_strong_augmentation(cfg, is_train):
"""
Create a list of :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
logger = logging.getLogger(__name__)
augmentation = []
if is_train:
# This is simialr to SimCLR https://arxiv.org/abs/2002.05709
augmentation.append(
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8)
)
augmentation.append(transforms.RandomGrayscale(p=0.2))
augmentation.append(transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5))
randcrop_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.RandomErasing(
p=0.7, scale=(0.05, 0.2), ratio=(0.3, 3.3), value="random"
),
transforms.RandomErasing(
p=0.5, scale=(0.02, 0.2), ratio=(0.1, 6), value="random"
),
transforms.RandomErasing(
p=0.3, scale=(0.02, 0.2), ratio=(0.05, 8), value="random"
),
transforms.ToPILImage(),
]
)
augmentation.append(randcrop_transform)
logger.info("Augmentations used in training: " + str(augmentation))
return transforms.Compose(augmentation) | adaptive_teacher-main | adapteacher/data/detection_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
from PIL import Image
import torch
import detectron2.data.detection_utils as utils
import detectron2.data.transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from adapteacher.data.detection_utils import build_strong_augmentation
class DatasetMapperTwoCropSeparate(DatasetMapper):
"""
This customized mapper produces two augmented images from a single image
instance. This mapper makes sure that the two augmented images have the same
cropping and thus the same size.
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
self.augmentation = utils.build_augmentation(cfg, is_train)
# include crop into self.augmentation
if cfg.INPUT.CROP.ENABLED and is_train:
self.augmentation.insert(
0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
self.compute_tight_boxes = True
else:
self.compute_tight_boxes = False
self.strong_augmentation = build_strong_augmentation(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on and is_train:
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
cfg.DATASETS.TRAIN
)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
# utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(
dataset_dict.pop("sem_seg_file_name"), "L"
).squeeze(2)
else:
sem_seg_gt = None
aug_input = T.StandardAugInput(image, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentation)
image_weak_aug, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image_weak_aug.shape[:2] # h, w
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
if self.load_proposals:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
if self.compute_tight_boxes and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
bboxes_d2_format = utils.filter_empty_instances(instances)
dataset_dict["instances"] = bboxes_d2_format
# apply strong augmentation
# We use torchvision augmentation, which is not compatiable with
# detectron2, which use numpy format for images. Thus, we need to
# convert to PIL format first.
image_pil = Image.fromarray(image_weak_aug.astype("uint8"), "RGB")
image_strong_aug = np.array(self.strong_augmentation(image_pil))
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image_strong_aug.transpose(2, 0, 1))
)
dataset_dict_key = copy.deepcopy(dataset_dict)
dataset_dict_key["image"] = torch.as_tensor(
np.ascontiguousarray(image_weak_aug.transpose(2, 0, 1))
)
assert dataset_dict["image"].size(1) == dataset_dict_key["image"].size(1)
assert dataset_dict["image"].size(2) == dataset_dict_key["image"].size(2)
return (dataset_dict, dataset_dict_key)
| adaptive_teacher-main | adapteacher/data/dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from detectron2.data.common import MapDataset, AspectRatioGroupedDataset
class MapDatasetTwoCrop(MapDataset):
"""
Map a function over the elements in a dataset.
This customized MapDataset transforms an image with two augmentations
as two inputs (queue and key).
Args:
dataset: a dataset where map function is applied.
map_func: a callable which maps the element in dataset. map_func is
responsible for error handling, when error happens, it needs to
return None so the MapDataset will randomly use other
elements from the dataset.
"""
def __getitem__(self, idx):
retry_count = 0
cur_idx = int(idx)
while True:
data = self._map_func(self._dataset[cur_idx])
if data is not None:
self._fallback_candidates.add(cur_idx)
return data
# _map_func fails for this idx, use a random new index from the pool
retry_count += 1
self._fallback_candidates.discard(cur_idx)
cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
if retry_count >= 3:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to apply `_map_func` for idx: {}, retry count: {}".format(
idx, retry_count
)
)
class AspectRatioGroupedDatasetTwoCrop(AspectRatioGroupedDataset):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
This improves training speed because the images then need less padding
to form a batch.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
self._buckets = [[] for _ in range(2)]
self._buckets_key = [[] for _ in range(2)]
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
for d in self.dataset:
# d is a tuple with len = 2
# It's two images (same size) from the same image instance
w, h = d[0]["width"], d[0]["height"]
bucket_id = 0 if w > h else 1
# bucket = bucket for normal images
bucket = self._buckets[bucket_id]
bucket.append(d[0])
# buckets_key = bucket for augmented images
buckets_key = self._buckets_key[bucket_id]
buckets_key.append(d[1])
if len(bucket) == self.batch_size:
yield (bucket[:], buckets_key[:])
del bucket[:]
del buckets_key[:]
class AspectRatioGroupedSemiSupDatasetTwoCrop(AspectRatioGroupedDataset):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
This improves training speed because the images then need less padding
to form a batch.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: a tuple containing two iterable generators. (labeled and unlabeled data)
Each element must be a dict with keys "width" and "height", which will be used
to batch data.
batch_size (int):
"""
self.label_dataset, self.unlabel_dataset = dataset
self.batch_size_label = batch_size[0]
self.batch_size_unlabel = batch_size[1]
self._label_buckets = [[] for _ in range(2)]
self._label_buckets_key = [[] for _ in range(2)]
self._unlabel_buckets = [[] for _ in range(2)]
self._unlabel_buckets_key = [[] for _ in range(2)]
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
label_bucket, unlabel_bucket = [], []
for d_label, d_unlabel in zip(self.label_dataset, self.unlabel_dataset):
# d is a tuple with len = 2
# It's two images (same size) from the same image instance
# d[0] is with strong augmentation, d[1] is with weak augmentation
# because we are grouping images with their aspect ratio
# label and unlabel buckets might not have the same number of data
# i.e., one could reach batch_size, while the other is still not
if len(label_bucket) != self.batch_size_label:
w, h = d_label[0]["width"], d_label[0]["height"]
label_bucket_id = 0 if w > h else 1
label_bucket = self._label_buckets[label_bucket_id]
label_bucket.append(d_label[0])
label_buckets_key = self._label_buckets_key[label_bucket_id]
label_buckets_key.append(d_label[1])
if len(unlabel_bucket) != self.batch_size_unlabel:
w, h = d_unlabel[0]["width"], d_unlabel[0]["height"]
unlabel_bucket_id = 0 if w > h else 1
unlabel_bucket = self._unlabel_buckets[unlabel_bucket_id]
unlabel_bucket.append(d_unlabel[0])
unlabel_buckets_key = self._unlabel_buckets_key[unlabel_bucket_id]
unlabel_buckets_key.append(d_unlabel[1])
# yield the batch of data until all buckets are full
if (
len(label_bucket) == self.batch_size_label
and len(unlabel_bucket) == self.batch_size_unlabel
):
# label_strong, label_weak, unlabed_strong, unlabled_weak
yield (
label_bucket[:],
label_buckets_key[:],
unlabel_bucket[:],
unlabel_buckets_key[:],
)
del label_bucket[:]
del label_buckets_key[:]
del unlabel_bucket[:]
del unlabel_buckets_key[:]
| adaptive_teacher-main | adapteacher/data/common.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import contextlib
from detectron2.data import DatasetCatalog, MetadataCatalog
from fvcore.common.timer import Timer
# from fvcore.common.file_io import PathManager
from iopath.common.file_io import PathManager
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from .cityscapes_foggy import load_cityscapes_instances
import io
import logging
logger = logging.getLogger(__name__)
JSON_ANNOTATIONS_DIR = ""
_SPLITS_COCO_FORMAT = {}
_SPLITS_COCO_FORMAT["coco"] = {
"coco_2017_unlabel": (
"coco/unlabeled2017",
"coco/annotations/image_info_unlabeled2017.json",
),
"coco_2017_for_voc20": (
"coco",
"coco/annotations/google/instances_unlabeledtrainval20class.json",
),
}
def register_coco_unlabel(root):
for _, splits_per_dataset in _SPLITS_COCO_FORMAT.items():
for key, (image_root, json_file) in splits_per_dataset.items():
meta = {}
register_coco_unlabel_instances(
key, meta, os.path.join(root, json_file), os.path.join(root, image_root)
)
def register_coco_unlabel_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_unlabel_json(json_file, image_root, name)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
def load_coco_unlabel_json(
json_file, image_root, dataset_name=None, extra_annotation_keys=None
):
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
id_map = None
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
logger.info("Loaded {} images in COCO format from {}".format(len(imgs), json_file))
dataset_dicts = []
for img_dict in imgs:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
dataset_dicts.append(record)
return dataset_dicts
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_coco_unlabel(_root)
# ==== Predefined splits for raw cityscapes foggy images ===========
_RAW_CITYSCAPES_SPLITS = {
# "cityscapes_foggy_{task}_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
# "cityscapes_foggy_{task}_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
# "cityscapes_foggy_{task}_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
"cityscapes_foggy_train": ("cityscapes_foggy/leftImg8bit/train/", "cityscapes_foggy/gtFine/train/"),
"cityscapes_foggy_val": ("cityscapes_foggy/leftImg8bit/val/", "cityscapes_foggy/gtFine/val/"),
"cityscapes_foggy_test": ("cityscapes_foggy/leftImg8bit/test/", "cityscapes_foggy/gtFine/test/"),
}
def register_all_cityscapes_foggy(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
# inst_key = key.format(task="instance_seg")
inst_key = key
# DatasetCatalog.register(
# inst_key,
# lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
# x, y, from_json=True, to_polygons=True
# ),
# )
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=False, to_polygons=False
),
)
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
# )
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="pascal_voc", **meta
# )
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="coco", **meta
)
# ==== Predefined splits for Clipart (PASCAL VOC format) ===========
def register_all_clipart(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train", "clipart", "train"),
("Clipart1k_test", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# MetadataCatalog.get(name).evaluator_type = "coco"
# ==== Predefined splits for Watercolor (PASCAL VOC format) ===========
def register_all_water(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Watercolor_train", "watercolor", "train"),
("Watercolor_test", "watercolor", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year, class_names=["person", "dog","bicycle", "bird", "car", "cat"])
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
# MetadataCatalog.get(name).thing_classes = ["person", "dog","bike", "bird", "car", "cat"]
# MetadataCatalog.get(name).thing_classes = ["person", "dog","bicycle", "bird", "car", "cat"]
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_cityscapes_foggy(_root)
register_all_clipart(_root)
register_all_water(_root)
| adaptive_teacher-main | adapteacher/data/datasets/builtin.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
load_only_002 = False
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
if load_only_002 and '0.02.png' not in basename:
continue
image_file = os.path.join(city_img_dir, basename)
# suffix = "leftImg8bit.png"
# assert basename.endswith(suffix), basename
# basename = basename[: -len(suffix)]
suffix = 'leftImg8bit_foggy'
basename = basename.split(suffix)[0]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
pool.close()
# Map cityscape ids to contiguous ids
from cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
| adaptive_teacher-main | adapteacher/data/datasets/cityscapes_foggy.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from PIL import ImageFilter
class GaussianBlur:
"""
Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709
Adapted from MoCo:
https://github.com/facebookresearch/moco/blob/master/moco/loader.py
Note that this implementation does not seem to be exactly the same as
described in SimCLR.
"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
| adaptive_teacher-main | adapteacher/data/transforms/augmentation_impl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.engine.hooks import HookBase
import detectron2.utils.comm as comm
import torch
import numpy as np
from contextlib import contextmanager
class LossEvalHook(HookBase):
def __init__(self, eval_period, model, data_loader, model_output, model_name=""):
self._model = model
self._period = eval_period
self._data_loader = data_loader
self._model_output = model_output
self._model_name = model_name
def _do_loss_eval(self):
record_acc_dict = {}
with inference_context(self._model), torch.no_grad():
for _, inputs in enumerate(self._data_loader):
record_dict = self._get_loss(inputs, self._model)
# accumulate the losses
for loss_type in record_dict.keys():
if loss_type not in record_acc_dict.keys():
record_acc_dict[loss_type] = record_dict[loss_type]
else:
record_acc_dict[loss_type] += record_dict[loss_type]
# average
for loss_type in record_acc_dict.keys():
record_acc_dict[loss_type] = record_acc_dict[loss_type] / len(
self._data_loader
)
# divide loss and other metrics
loss_acc_dict = {}
for key in record_acc_dict.keys():
if key[:4] == "loss":
loss_acc_dict[key] = record_acc_dict[key]
# only output the results of major node
if comm.is_main_process():
total_losses_reduced = sum(loss for loss in loss_acc_dict.values())
self.trainer.storage.put_scalar(
"val_total_loss_val" + self._model_name, total_losses_reduced
)
record_acc_dict = {
"val_" + k + self._model_name: record_acc_dict[k]
for k in record_acc_dict.keys()
}
if len(record_acc_dict) > 1:
self.trainer.storage.put_scalars(**record_acc_dict)
def _get_loss(self, data, model):
if self._model_output == "loss_only":
record_dict = model(data)
elif self._model_output == "loss_proposal":
record_dict, _, _, _ = model(data, branch="val_loss", val_mode=True)
elif self._model_output == "meanteacher":
record_dict, _, _, _, _ = model(data)
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in record_dict.items()
}
return metrics_dict
def _write_losses(self, metrics_dict):
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
comm.synchronize()
all_metrics_dict = comm.gather(metrics_dict, dst=0)
if comm.is_main_process():
# average the rest metrics
metrics_dict = {
"val_" + k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.trainer.storage.put_scalar("val_total_loss_val", total_losses_reduced)
if len(metrics_dict) > 1:
self.trainer.storage.put_scalars(**metrics_dict)
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.trainer.iter, loss_dict
)
)
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self._do_loss_eval()
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
| adaptive_teacher-main | adapteacher/engine/hooks.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.structures import pairwise_iou
class OpenMatchTrainerProbe:
def __init__(self, cfg):
self.BOX_AP = 0.5
self.NUM_CLASSES = cfg.MODEL.ROI_HEADS.NUM_CLASSES
# self.bbox_stat_list = ['compute_fp_gtoutlier', 'compute_num_box', 'compute_ood_acc']
def bbox_stat(self, unlabel_gt, unlabel_pseudo, name, bbox_stat_list):
stats = {}
sum_gpu_names = []
for metric in bbox_stat_list:
stats_per, sum_gpu_names_per = getattr(
self, metric)(unlabel_gt, unlabel_pseudo, name)
stats.update(stats_per)
sum_gpu_names.extend(sum_gpu_names_per)
return stats, sum_gpu_names
def compute_fp_gtoutlier(self, unlabel_gt, unlabel_pseudo, name):
num_gt_ood_object = 0
num_gt_fp_ood_object = 0
sum_iou = 0.0
sum_gpu_names = []
results = {}
if len(unlabel_gt) != 0:
for gt, pseudo in zip(unlabel_gt, unlabel_pseudo):
# import pdb; pdb. set_trace()
if name == "pred":
pp_boxes = pseudo.pred_boxes
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter predicted ood box when evaluating this metric
pseudo = pseudo[pseudo.gt_classes != -1]
pp_boxes = pseudo.gt_boxes
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pseudo) != 0:
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(1)
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.sum().item()
num_gt_fp_ood_object += (max_iou[ood_idx]
> self.BOX_AP).sum().item()
sum_iou += max_iou[ood_idx].sum().item()
elif len(gt) != 0 and len(pseudo) == 0:
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.shape[0]
results = {'Analysis_'+name+'/num_gt_ood_object': num_gt_ood_object,
'Analysis_'+name+'/num_gt_fp_ood_object': num_gt_fp_ood_object,
'Analysis_'+name+'/sum_iou': sum_iou}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
def compute_num_box(self, unlabel_gt, unlabel_pseudo, name, processed=False):
num_bbox = 0.0
size_bbox = 0.0
avg_conf = 0.0
# measure in and out box for openset SS-OD
num_bbox_in = 0.0
num_bbox_out = 0.0
num_bg = 0.0
# when ground-truth is missing in unlabeled data
if len(unlabel_gt) == 0:
for pp_roi in unlabel_pseudo:
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
num_valid_img = len(unlabel_pseudo)
else:
# with ground-truth
num_valid_img = 0
for gt, pp_roi in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter out ood pseudo-box when doing analysis
pp_roi = pp_roi[pp_roi.gt_classes != -1]
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
# in and out class
if name == "gt":
pp_roi_in = pp_roi[pp_classes != -1]
num_bbox_in += len(pp_roi_in)
pp_roi_out = pp_roi[pp_classes == -1]
num_bbox_out += len(pp_roi_out)
num_valid_img += 1
elif name == "pred" or name == "pseudo_conf" or name == "pseudo_ood":
if len(gt.gt_boxes.to('cuda'))>0 and len(pp_boxes) > 0:
max_iou, max_idx = pairwise_iou(gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# for the ground-truth label for each pseudo-box
gtclass4pseudo = gt.gt_classes[max_idx]
matchgtbox = max_iou > 0.5
# compute the number of boxes (background, inlier, outlier)
num_bg += (~matchgtbox).sum().item()
num_bbox_in += (gtclass4pseudo[matchgtbox]
!= -1).sum().item()
num_bbox_out += (gtclass4pseudo[matchgtbox]
== -1).sum().item()
num_valid_img += 1
else:
raise ValueError("Unknown name for probe roi bbox.")
box_probe = {}
if processed == True:
name = name+"processed"
if num_bbox == 0:
return box_probe, []
if num_valid_img >0 :
box_probe["Analysis_" + name + "/Num_bbox"] = num_bbox / \
num_valid_img
box_probe["Analysis_" + name + "/Size_bbox"] = size_bbox / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = num_bbox_in / num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = num_bbox_out / num_valid_img
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = avg_conf / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_background"] = num_bg / num_valid_img
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
else:
box_probe["Analysis_" + name + "/Num_bbox"] = 0.0
box_probe["Analysis_" + name + "/Size_bbox"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = 0.0
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_background"] = 0.0
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
return box_probe, []
def compute_ood_acc(self, unlabel_gt, unlabel_pseudo, name, BOX_IOU=0.5):
results = {}
sum_gpu_names = []
if len(unlabel_gt) != 0:
for metric in ['acc_outlier', 'recall_outlier']:
for samples in ['_fg', '_all']:
for fraction_part in ['_nume', '_deno']:
results[metric+samples+fraction_part] = 0.0
for gt, pred in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pred.pred_boxes
pp_ood_scores = pred.ood_scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# assume these outlier are suppressed
pred = pred[pred.gt_classes != -1]
pp_boxes = pred.gt_boxes
pp_ood_scores = pred.ood_scores
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pred) != 0:
# find the most overlapped ground-truth box for each pseudo-box
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# ignore background instances
find_fg_mask = max_iou > BOX_IOU
if find_fg_mask.sum() > 0:
gt_corres = gt[max_idx].gt_classes.to("cuda")
gt_outlier = (gt_corres[find_fg_mask] == -1)
pred_outlier = pp_ood_scores[find_fg_mask][:, 0] > 0.5
# accurcay of ood detection (foreground)
# acc_outlier_fg = (pred_outlier == gt_outlier).sum() /find_fg_mask.sum()
results['acc_outlier_fg_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_fg_deno'] += find_fg_mask.sum()
# recall of ood detection (foreground)
# recall_outlier_fg = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_fg_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_fg_deno'] += gt_outlier.sum()
# Regard backgound gt as outlier
gt_corres = gt[max_idx].gt_classes.to("cuda")
# convert all background gt as outlier
gt_corres[~find_fg_mask] = -1
gt_outlier = gt_corres == -1
pred_outlier = pp_ood_scores[:, 0] > 0.5
# accurcay of ood detection (all)
# acc_outlier_all = (pred_outlier == gt_outlier).sum() /len(pred)
results['acc_outlier_all_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_all_deno'] += len(pred)
# recall of ood detection (all)
# recall_outlier_all = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_all_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_all_deno'] += gt_outlier.sum()
results = {'Analysis_'+name+'/'+k: v for k, v in results.items()}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
import torch
def probe(
cfg,
proposals_roih_unsup_k,
unlabel_data_k,
pesudo_proposals_roih_unsup_k,
record_dict,
):
"""
Probe for research development
"""
# [probe] roi result from weak branch (before pseudo-labeling)
record_roih = probe_roih_bbox(
proposals_roih_unsup_k, cfg.MODEL.ROI_HEADS.NUM_CLASSES, "roih"
)
record_dict.update(record_roih)
# [probe] roi result after pseudo-labeling from weak branch
record_roih_pseudo = probe_roih_bbox(
pesudo_proposals_roih_unsup_k, cfg.MODEL.ROI_HEADS.NUM_CLASSES, "roih_pseudo"
)
record_dict.update(record_roih_pseudo)
return record_dict
def probe_roih_bbox(proposals_roih, num_cls, name=""):
num_bbox = 0.0
size_bbox = 0.0
avg_conf = 0.0
pred_cls_list = []
for pp_roi in proposals_roih:
if name == "roih":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "roih_pseudo":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError(f"Unknown name for probe roi bbox '{name}'")
device = pp_classes.device
if pp_roi:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean()
if name != "gt":
avg_conf += pp_scores.mean()
# ratio of majority class
all_idx, cls_count = torch.unique(pp_classes, return_counts=True)
major_cls_idx = all_idx[torch.argmax(cls_count)]
major_cls_ratio = torch.max(cls_count).float() / pp_classes.numel()
# cls_sum
pred_cls_list.append(pp_classes)
else:
num_bbox += 0
size_bbox += torch.tensor(0).to(device)
major_cls_idx = torch.tensor(0).to(device)
major_cls_ratio = torch.tensor(0).to(device)
# boxes monitor
box_probe = {}
box_probe["bbox_probe_" + name + "/Num_bbox"] = num_bbox / len(proposals_roih)
box_probe["bbox_probe_" + name + "/Size_bbox"] = size_bbox.item() / len(
proposals_roih
)
if name != "gt":
box_probe["bbox_probe_" + name + "/Conf"] = avg_conf / len(proposals_roih)
box_probe["bbox_probe_" + name + "/Ratio_major_cls_idx"] = major_cls_idx.item()
box_probe["bbox_probe_" + name + "/Ratio_major_cls"] = major_cls_ratio.item()
return box_probe | adaptive_teacher-main | adapteacher/engine/probe.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import time
import logging
import torch
from torch.nn.parallel import DistributedDataParallel
from fvcore.nn.precise_bn import get_bn_modules
import numpy as np
from collections import OrderedDict
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine import DefaultTrainer, SimpleTrainer, TrainerBase
from detectron2.engine.train_loop import AMPTrainer
from detectron2.utils.events import EventStorage
from detectron2.evaluation import verify_results, DatasetEvaluators
# from detectron2.evaluation import COCOEvaluator, verify_results, DatasetEvaluators
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.engine import hooks
from detectron2.structures.boxes import Boxes
from detectron2.structures.instances import Instances
from detectron2.utils.env import TORCH_VERSION
from detectron2.data import MetadataCatalog
from adapteacher.data.build import (
build_detection_semisup_train_loader,
build_detection_test_loader,
build_detection_semisup_train_loader_two_crops,
)
from adapteacher.data.dataset_mapper import DatasetMapperTwoCropSeparate
from adapteacher.engine.hooks import LossEvalHook
from adapteacher.modeling.meta_arch.ts_ensemble import EnsembleTSModel
from adapteacher.checkpoint.detection_checkpoint import DetectionTSCheckpointer
from adapteacher.solver.build import build_lr_scheduler
from adapteacher.evaluation import PascalVOCDetectionEvaluator, COCOEvaluator
from .probe import OpenMatchTrainerProbe
import copy
# Supervised-only Trainer
class BaselineTrainer(DefaultTrainer):
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
Use the custom checkpointer, which loads other backbone models
with matching heuristics.
"""
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
TrainerBase.__init__(self)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
model,
cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
checkpoint = self.checkpointer.resume_or_load(
self.cfg.MODEL.WEIGHTS, resume=resume
)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
if isinstance(self.model, DistributedDataParallel):
# broadcast loaded data/model from the first rank, because other
# machines may not have access to the checkpoint file
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
self.start_iter = comm.all_gather(self.start_iter)[0]
def train_loop(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def run_step(self):
self._trainer.iter = self.iter
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._trainer._data_loader_iter)
data_time = time.perf_counter() - start
record_dict, _, _, _ = self.model(data, branch="supervised")
num_gt_bbox = 0.0
for element in data:
num_gt_bbox += len(element["instances"])
num_gt_bbox = num_gt_bbox / len(data)
record_dict["bbox_num/gt_bboxes"] = num_gt_bbox
loss_dict = {}
for key in record_dict.keys():
if key[:4] == "loss" and key[-3:] != "val":
loss_dict[key] = record_dict[key]
losses = sum(loss_dict.values())
metrics_dict = record_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(
dataset_name, output_dir=output_folder))
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "pascal_voc_water":
return PascalVOCDetectionEvaluator(dataset_name, target_classnames=["bicycle", "bird", "car", "cat", "dog", "person"])
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
return build_detection_semisup_train_loader(cfg, mapper=None)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
"""
return build_detection_test_loader(cfg, dataset_name)
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(self.optimizer, self.scheduler),
hooks.PreciseBN(
cfg.TEST.EVAL_PERIOD,
self.model,
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
if comm.is_main_process():
ret.append(
hooks.PeriodicCheckpointer(
self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD
)
)
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def _write_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
data_time = np.max([x.pop("data_time")
for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
loss_dict = {}
for key in metrics_dict.keys():
if key[:4] == "loss":
loss_dict[key] = metrics_dict[key]
total_losses_reduced = sum(loss for loss in loss_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
# Adaptive Teacher Trainer
class ATeacherTrainer(DefaultTrainer):
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
Use the custom checkpointer, which loads other backbone models
with matching heuristics.
"""
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
data_loader = self.build_train_loader(cfg)
# create an student model
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
# create an teacher model
model_teacher = self.build_model(cfg)
self.model_teacher = model_teacher
# For training, wrap with DDP. But don't need this for inference.
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
TrainerBase.__init__(self)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
# Ensemble teacher and student model is for model saving and loading
ensem_ts_model = EnsembleTSModel(model_teacher, model)
self.checkpointer = DetectionTSCheckpointer(
ensem_ts_model,
cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.probe = OpenMatchTrainerProbe(cfg)
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
checkpoint = self.checkpointer.resume_or_load(
self.cfg.MODEL.WEIGHTS, resume=resume
)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
if isinstance(self.model, DistributedDataParallel):
# broadcast loaded data/model from the first rank, because other
# machines may not have access to the checkpoint file
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
self.start_iter = comm.all_gather(self.start_iter)[0]
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(
dataset_name, output_dir=output_folder))
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "pascal_voc_water":
return PascalVOCDetectionEvaluator(dataset_name, target_classnames=["bicycle", "bird", "car", "cat", "dog", "person"])
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
mapper = DatasetMapperTwoCropSeparate(cfg, True)
return build_detection_semisup_train_loader_two_crops(cfg, mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
return build_lr_scheduler(cfg, optimizer)
def train(self):
self.train_loop(self.start_iter, self.max_iter)
if hasattr(self, "_last_eval_results") and comm.is_main_process():
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def train_loop(self, start_iter: int, max_iter: int):
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step_full_semisup()
self.after_step()
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
# =====================================================
# ================== Pseduo-labeling ==================
# =====================================================
def threshold_bbox(self, proposal_bbox_inst, thres=0.7, proposal_type="roih"):
if proposal_type == "rpn":
valid_map = proposal_bbox_inst.objectness_logits > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.proposal_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.objectness_logits = proposal_bbox_inst.objectness_logits[
valid_map
]
elif proposal_type == "roih":
valid_map = proposal_bbox_inst.scores > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.pred_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.gt_classes = proposal_bbox_inst.pred_classes[valid_map]
new_proposal_inst.scores = proposal_bbox_inst.scores[valid_map]
return new_proposal_inst
def process_pseudo_label(
self, proposals_rpn_unsup_k, cur_threshold, proposal_type, psedo_label_method=""
):
list_instances = []
num_proposal_output = 0.0
for proposal_bbox_inst in proposals_rpn_unsup_k:
# thresholding
if psedo_label_method == "thresholding":
proposal_bbox_inst = self.threshold_bbox(
proposal_bbox_inst, thres=cur_threshold, proposal_type=proposal_type
)
else:
raise ValueError("Unkown pseudo label boxes methods")
num_proposal_output += len(proposal_bbox_inst)
list_instances.append(proposal_bbox_inst)
num_proposal_output = num_proposal_output / len(proposals_rpn_unsup_k)
return list_instances, num_proposal_output
def remove_label(self, label_data):
for label_datum in label_data:
if "instances" in label_datum.keys():
del label_datum["instances"]
return label_data
def add_label(self, unlabled_data, label):
for unlabel_datum, lab_inst in zip(unlabled_data, label):
unlabel_datum["instances"] = lab_inst
return unlabled_data
def get_label(self, label_data):
label_list = []
for label_datum in label_data:
if "instances" in label_datum.keys():
label_list.append(copy.deepcopy(label_datum["instances"]))
return label_list
# def get_label_test(self, label_data):
# label_list = []
# for label_datum in label_data:
# if "instances" in label_datum.keys():
# label_list.append(label_datum["instances"])
# =====================================================
# =================== Training Flow ===================
# =====================================================
def run_step_full_semisup(self):
self._trainer.iter = self.iter
assert self.model.training, "[UBTeacherTrainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._trainer._data_loader_iter)
# data_q and data_k from different augmentations (q:strong, k:weak)
# label_strong, label_weak, unlabed_strong, unlabled_weak
label_data_q, label_data_k, unlabel_data_q, unlabel_data_k = data
data_time = time.perf_counter() - start
# burn-in stage (supervised training with labeled data)
if self.iter < self.cfg.SEMISUPNET.BURN_UP_STEP:
# input both strong and weak supervised data into model
label_data_q.extend(label_data_k)
record_dict, _, _, _ = self.model(
label_data_q, branch="supervised")
# weight losses
loss_dict = {}
for key in record_dict.keys():
if key[:4] == "loss":
loss_dict[key] = record_dict[key] * 1
losses = sum(loss_dict.values())
else:
if self.iter == self.cfg.SEMISUPNET.BURN_UP_STEP:
# update copy the the whole model
self._update_teacher_model(keep_rate=0.00)
# self.model.build_discriminator()
elif (
self.iter - self.cfg.SEMISUPNET.BURN_UP_STEP
) % self.cfg.SEMISUPNET.TEACHER_UPDATE_ITER == 0:
self._update_teacher_model(
keep_rate=self.cfg.SEMISUPNET.EMA_KEEP_RATE)
record_dict = {}
######################## For probe #################################
# import pdb; pdb. set_trace()
gt_unlabel_k = self.get_label(unlabel_data_k)
# gt_unlabel_q = self.get_label_test(unlabel_data_q)
# 0. remove unlabeled data labels
unlabel_data_q = self.remove_label(unlabel_data_q)
unlabel_data_k = self.remove_label(unlabel_data_k)
# 1. generate the pseudo-label using teacher model
with torch.no_grad():
(
_,
proposals_rpn_unsup_k,
proposals_roih_unsup_k,
_,
) = self.model_teacher(unlabel_data_k, branch="unsup_data_weak")
######################## For probe #################################
# import pdb; pdb. set_trace()
# probe_metrics = ['compute_fp_gtoutlier', 'compute_num_box']
# probe_metrics = ['compute_num_box']
# analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,proposals_roih_unsup_k,'pred')
# record_dict.update(analysis_pred)
######################## For probe END #################################
# 2. Pseudo-labeling
cur_threshold = self.cfg.SEMISUPNET.BBOX_THRESHOLD
joint_proposal_dict = {}
joint_proposal_dict["proposals_rpn"] = proposals_rpn_unsup_k
#Process pseudo labels and thresholding
(
pesudo_proposals_rpn_unsup_k,
nun_pseudo_bbox_rpn,
) = self.process_pseudo_label(
proposals_rpn_unsup_k, cur_threshold, "rpn", "thresholding"
)
# analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,pesudo_proposals_rpn_unsup_k,'pred',True)
# record_dict.update(analysis_pred)
joint_proposal_dict["proposals_pseudo_rpn"] = pesudo_proposals_rpn_unsup_k
# Pseudo_labeling for ROI head (bbox location/objectness)
pesudo_proposals_roih_unsup_k, _ = self.process_pseudo_label(
proposals_roih_unsup_k, cur_threshold, "roih", "thresholding"
)
joint_proposal_dict["proposals_pseudo_roih"] = pesudo_proposals_roih_unsup_k
# 3. add pseudo-label to unlabeled data
unlabel_data_q = self.add_label(
unlabel_data_q, joint_proposal_dict["proposals_pseudo_roih"]
)
unlabel_data_k = self.add_label(
unlabel_data_k, joint_proposal_dict["proposals_pseudo_roih"]
)
all_label_data = label_data_q + label_data_k
all_unlabel_data = unlabel_data_q
# 4. input both strongly and weakly augmented labeled data into student model
record_all_label_data, _, _, _ = self.model(
all_label_data, branch="supervised"
)
record_dict.update(record_all_label_data)
# 5. input strongly augmented unlabeled data into model
record_all_unlabel_data, _, _, _ = self.model(
all_unlabel_data, branch="supervised_target"
)
new_record_all_unlabel_data = {}
for key in record_all_unlabel_data.keys():
new_record_all_unlabel_data[key + "_pseudo"] = record_all_unlabel_data[
key
]
record_dict.update(new_record_all_unlabel_data)
# 6. input weakly labeled data (source) and weakly unlabeled data (target) to student model
# give sign to the target data
for i_index in range(len(unlabel_data_k)):
# unlabel_data_item = {}
for k, v in unlabel_data_k[i_index].items():
# label_data_k[i_index][k + "_unlabeled"] = v
label_data_k[i_index][k + "_unlabeled"] = v
# unlabel_data_k[i_index] = unlabel_data_item
all_domain_data = label_data_k
# all_domain_data = label_data_k + unlabel_data_k
record_all_domain_data, _, _, _ = self.model(all_domain_data, branch="domain")
record_dict.update(record_all_domain_data)
# weight losses
loss_dict = {}
for key in record_dict.keys():
if key.startswith("loss"):
if key == "loss_rpn_loc_pseudo" or key == "loss_box_reg_pseudo":
# pseudo bbox regression <- 0
loss_dict[key] = record_dict[key] * 0
elif key[-6:] == "pseudo": # unsupervised loss
loss_dict[key] = (
record_dict[key] *
self.cfg.SEMISUPNET.UNSUP_LOSS_WEIGHT
)
elif (
key == "loss_D_img_s" or key == "loss_D_img_t"
): # set weight for discriminator
# import pdb
# pdb.set_trace()
loss_dict[key] = record_dict[key] * self.cfg.SEMISUPNET.DIS_LOSS_WEIGHT #Need to modify defaults and yaml
else: # supervised loss
loss_dict[key] = record_dict[key] * 1
losses = sum(loss_dict.values())
metrics_dict = record_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
def _write_metrics(self, metrics_dict: dict):
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
# all_hg_dict = comm.gather(hg_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time")
for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
# append the list
loss_dict = {}
for key in metrics_dict.keys():
if key[:4] == "loss":
loss_dict[key] = metrics_dict[key]
total_losses_reduced = sum(loss for loss in loss_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
@torch.no_grad()
def _update_teacher_model(self, keep_rate=0.9996):
if comm.get_world_size() > 1:
student_model_dict = {
key[7:]: value for key, value in self.model.state_dict().items()
}
else:
student_model_dict = self.model.state_dict()
new_teacher_dict = OrderedDict()
for key, value in self.model_teacher.state_dict().items():
if key in student_model_dict.keys():
new_teacher_dict[key] = (
student_model_dict[key] *
(1 - keep_rate) + value * keep_rate
)
else:
raise Exception("{} is not found in student model".format(key))
self.model_teacher.load_state_dict(new_teacher_dict)
@torch.no_grad()
def _copy_main_model(self):
# initialize all parameters
if comm.get_world_size() > 1:
rename_model_dict = {
key[7:]: value for key, value in self.model.state_dict().items()
}
self.model_teacher.load_state_dict(rename_model_dict)
else:
self.model_teacher.load_state_dict(self.model.state_dict())
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name)
def build_hooks(self):
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(self.optimizer, self.scheduler),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(
hooks.PeriodicCheckpointer(
self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD
)
)
def test_and_save_results_student():
self._last_eval_results_student = self.test(self.cfg, self.model)
_last_eval_results_student = {
k + "_student": self._last_eval_results_student[k]
for k in self._last_eval_results_student.keys()
}
return _last_eval_results_student
def test_and_save_results_teacher():
self._last_eval_results_teacher = self.test(
self.cfg, self.model_teacher)
return self._last_eval_results_teacher
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD,
test_and_save_results_student))
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD,
test_and_save_results_teacher))
if comm.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
| adaptive_teacher-main | adapteacher/engine/trainer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# from d2go.config import CfgNode as CN
def add_aut_config(cfg):
"""
Add config for SemiSupSegRunner.
"""
_C = cfg
#New added for discriminator
_C.UNBIASEDTEACHER.DIS_LOSS_WEIGHT = 0.1
_C.UNBIASEDTEACHER.DIS_TYPE = "concate" #["concate","p2","multi"]
_C.UNBIASEDTEACHER.ISAUG = "Yes"
| adaptive_teacher-main | prod_lib/config/defaults.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from collections import OrderedDict
from functools import lru_cache
import d2go.utils.abnormal_checker as abnormal_checker
import detectron2.utils.comm as comm
from d2go.config import CONFIG_SCALING_METHOD_REGISTRY, temp_defrost
from d2go.data.dataset_mappers import D2GoDatasetMapper, build_dataset_mapper
from d2go.data.transforms.build import build_transform_gen
from d2go.data.utils import maybe_subsample_n_images
from d2go.modeling import build_model, kmeans_anchors, model_ema
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.flop_calculator import add_print_flops_callback
from d2go.utils.misc import get_tensorboard_log_dir
from d2go.utils.helper import TensorboardXWriter, D2Trainer
from detectron2.checkpoint import PeriodicCheckpointer
from detectron2.engine import hooks
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from torch.nn.parallel import DataParallel, DistributedDataParallel
from detectron2.evaluation import (
DatasetEvaluators,
)
from detectron2.data import (
MetadataCatalog,
)
from ..evaluation import (
COCOEvaluator,
PascalVOCDetectionEvaluator,
)
from d2go.projects.unbiased_teacher.checkpoint import EnsembleTSModel
from ..config.defaults import add_aut_config
# from ..config.defaults import add_ut_config
# from ..data.build import (
# build_detection_semisup_train_loader_two_crops,
# build_uru_detection_semisup_train_loader,
# inject_uru_dataset,
# )
from d2go.projects.unbiased_teacher.data.build import (
build_detection_semisup_train_loader_two_crops,
build_uru_detection_semisup_train_loader,
)
from d2go.projects.unbiased_teacher.runner.runner import UnbiasedTeacherRunner
from d2go.projects.unbiased_teacher.data.dataset_mapper import DatasetMapperTwoCropSeparate # noqa
from ..data import builtin # noqa; for registering COCO unlabel dataset
from d2go.projects.unbiased_teacher.engine.trainer import UnbiasedTeacherTrainer
from d2go.projects.unbiased_teacher.modeling.meta_arch.rcnn import TwoStagePseudoLabGeneralizedRCNN # noqa
from d2go.projects.unbiased_teacher.modeling.proposal_generator.rpn import PseudoLabRPN # noqa
from d2go.projects.unbiased_teacher.modeling.roi_heads.roi_heads import StandardROIHeadsPseudoLab # noqa
from d2go.projects.unbiased_teacher.solver.build import ut_build_lr_scheduler
#For DA object detection
from ..engine.trainer import DAobjTrainer
from ..modeling.meta_arch.daobj_rcnn import DAobjTwoStagePseudoLabGeneralizedRCNN # noqa
#For VGG model architecture
from ..modeling.meta_arch.vgg import build_vgg_backbone,build_vgg_fpn_backbone # noqa
ALL_TB_WRITERS = []
@lru_cache()
def _get_tbx_writer(log_dir):
ret = TensorboardXWriter(log_dir)
ALL_TB_WRITERS.append(ret)
return ret
class BaseUnbiasedTeacherRunner(UnbiasedTeacherRunner):
def get_default_cfg(self):
cfg = super().get_default_cfg()
add_aut_config(cfg)
# add_pointrend_config(cfg)
# cfg = CN(cfg) # upgrade from D2's CfgNode to D2Go's CfgNode
return cfg
@staticmethod
def get_evaluator(cfg, dataset_name, output_folder):
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["coco"]:
# D2 is in the process of reducing the use of cfg.
dataset_evaluators = COCOEvaluator(
dataset_name,
output_dir=output_folder,
kpt_oks_sigmas=cfg.TEST.KEYPOINT_OKS_SIGMAS,
)
elif evaluator_type in ["pascal_voc"]:
dataset_evaluators = PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type in ["pascal_voc_water"]:
dataset_evaluators = PascalVOCDetectionEvaluator(dataset_name, target_classnames=["bicycle", "bird", "car", "cat", "dog", "person"])
else:
dataset_evaluators = D2Trainer.build_evaluator(
cfg, dataset_name, output_folder
)
if not isinstance(dataset_evaluators, DatasetEvaluators):
dataset_evaluators = DatasetEvaluators([dataset_evaluators])
return dataset_evaluators
# class DAobjUnbiasedTeacherRunner(UnbiasedTeacherRunner):
class DAobjUnbiasedTeacherRunner(BaseUnbiasedTeacherRunner):
def get_default_cfg(self):
cfg = super().get_default_cfg()
# add_aut_config(cfg)
# add_pointrend_config(cfg)
# cfg = CN(cfg) # upgrade from D2's CfgNode to D2Go's CfgNode
return cfg
def build_model(self, cfg, eval_only=False):
"""
Build both Student and Teacher models
Student: regular model
Teacher: model that is updated by EMA
"""
# build_model might modify the cfg, thus clone
cfg = cfg.clone()
model = build_model(cfg)
model_teacher = build_model(cfg)
if cfg.MODEL.FROZEN_LAYER_REG_EXP:
raise NotImplementedError()
if cfg.QUANTIZATION.QAT.ENABLED:
raise NotImplementedError()
if eval_only:
raise NotImplementedError()
return EnsembleTSModel(model_teacher, model)
def do_train(self, cfg, model, resume):
# NOTE: d2go's train_net applies DDP layer by default
# we need to strip it away and only put DDP on model_student
if isinstance(model, (DistributedDataParallel, DataParallel)):
model = model.module
model_teacher, model_student = model.model_teacher, model.model_student
if comm.get_world_size() > 1:
model_student = DistributedDataParallel(
model_student,
device_ids=None
if cfg.MODEL.DEVICE == "cpu"
else [comm.get_local_rank()],
broadcast_buffers=False,
find_unused_parameters=cfg.MODEL.DDP_FIND_UNUSED_PARAMETERS,
)
add_print_flops_callback(cfg, model_student, disable_after_callback=True)
optimizer = self.build_optimizer(cfg, model_student)
scheduler = self.build_lr_scheduler(cfg, optimizer)
checkpointer = self.build_checkpointer(
cfg,
model,
save_dir=cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=scheduler,
)
checkpoint = checkpointer.resume_or_load(
cfg.MODEL.WEIGHTS, resume=resume or cfg.UNBIASEDTEACHER.RESUME_FROM_ANOTHER
)
start_iter = (
checkpoint.get("iteration", -1)
if resume
and checkpointer.has_checkpoint()
or cfg.UNBIASEDTEACHER.RESUME_FROM_ANOTHER
else -1
)
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
start_iter += 1
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
# if resume from a pre-trained checkpoint, we modify the BURN_IN_STEP
# so that the weights of the Student will be copied to the Teacher
# at the 1st iteration when the training started
if cfg.UNBIASEDTEACHER.RESUME_FROM_ANOTHER:
cfg.defrost()
cfg.UNBIASEDTEACHER.BURN_IN_STEP = start_iter
cfg.freeze()
data_loader = self.build_detection_train_loader(cfg)
def _get_model_with_abnormal_checker(model):
if not cfg.ABNORMAL_CHECKER.ENABLED:
return model
tbx_writer = _get_tbx_writer(get_tensorboard_log_dir(cfg.OUTPUT_DIR))
writers = abnormal_checker.get_writers(cfg, tbx_writer)
checker = abnormal_checker.AbnormalLossChecker(start_iter, writers)
ret = abnormal_checker.AbnormalLossCheckerWrapper(model, checker)
return ret
trainer = DAobjTrainer(
cfg,
_get_model_with_abnormal_checker(model_student),
_get_model_with_abnormal_checker(model_teacher),
data_loader,
optimizer,
)
trainer_hooks = [
hooks.IterationTimer(),
self._create_after_step_hook(
cfg, model_student, optimizer, scheduler, periodic_checkpointer
),
hooks.EvalHook(
cfg.TEST.EVAL_PERIOD,
lambda: self.do_test(cfg, model, train_iter=trainer.iter),
),
kmeans_anchors.compute_kmeans_anchors_hook(self, cfg),
self._create_qat_hook(cfg) if cfg.QUANTIZATION.QAT.ENABLED else None,
]
if comm.is_main_process():
tbx_writer = _get_tbx_writer(get_tensorboard_log_dir(cfg.OUTPUT_DIR))
writers = [
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
tbx_writer,
]
trainer_hooks.append(
hooks.PeriodicWriter(writers, period=cfg.WRITER_PERIOD)
)
trainer.register_hooks(trainer_hooks)
trainer.train(start_iter, max_iter)
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = checkpointer.get_checkpoint_file()
return {"model_final": trained_cfg}
| adaptive_teacher-main | prod_lib/runner/runner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# from .runner import SemiSupSegRunner, SemiSupHandTrackingRunner # noqa
from .runner import BaseUnbiasedTeacherRunner # noqa
from .runner import DAobjUnbiasedTeacherRunner # noqa
| adaptive_teacher-main | prod_lib/runner/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.nn as nn
import copy
import torch
from typing import Union, List, Dict, Any, cast
from detectron2.modeling.backbone import (
ResNet,
Backbone,
build_resnet_backbone,
BACKBONE_REGISTRY
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class vgg_backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super().__init__()
self.vgg = make_layers(cfgs['vgg16'],batch_norm=True)
self._initialize_weights()
# self.stage_names_index = {'vgg1':3, 'vgg2':8 , 'vgg3':15, 'vgg4':22, 'vgg5':29}
_out_feature_channels = [64, 128, 256, 512, 512]
_out_feature_strides = [2, 4, 8, 16, 32]
# stages, shape_specs = build_fbnet(
# cfg,
# name="trunk",
# in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
# )
# nn.Sequential(*list(self.vgg.features._modules.values())[:14])
self.stages = [nn.Sequential(*list(self.vgg._modules.values())[0:7]),\
nn.Sequential(*list(self.vgg._modules.values())[7:14]),\
nn.Sequential(*list(self.vgg._modules.values())[14:24]),\
nn.Sequential(*list(self.vgg._modules.values())[24:34]),\
nn.Sequential(*list(self.vgg._modules.values())[34:]),]
self._out_feature_channels = {}
self._out_feature_strides = {}
self._stage_names = []
for i, stage in enumerate(self.stages):
name = "vgg{}".format(i)
self.add_module(name, stage)
self._stage_names.append(name)
self._out_feature_channels[name] = _out_feature_channels[i]
self._out_feature_strides[name] = _out_feature_strides[i]
self._out_features = self._stage_names
del self.vgg
def forward(self, x):
features = {}
for name, stage in zip(self._stage_names, self.stages):
x = stage(x)
# if name in self._out_features:
# outputs[name] = x
features[name] = x
# import pdb
# pdb.set_trace()
return features
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_backbone(cfg, _):
return vgg_backbone(cfg)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_fpn_backbone(cfg, _):
# backbone = FPN(
# bottom_up=build_vgg_backbone(cfg),
# in_features=cfg.MODEL.FPN.IN_FEATURES,
# out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
# norm=cfg.MODEL.FPN.NORM,
# top_block=LastLevelMaxPool(),
# )
bottom_up = vgg_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
# fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
# return backbone
return backbone
| adaptive_teacher-main | prod_lib/modeling/vgg.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.modeling import META_ARCH_REGISTRY, GeneralizedRCNN
from detectron2.utils.events import get_event_storage
import logging
from typing import Dict, Tuple, List, Optional
from collections import OrderedDict
from detectron2.config import configurable
# from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
# from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.backbone import build_backbone, Backbone
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.utils.events import get_event_storage
from detectron2.structures import ImageList
############### Image discriminator ##############
class FCDiscriminator_img(nn.Module):
def __init__(self, num_classes, ndf1=256, ndf2=128):
super(FCDiscriminator_img, self).__init__()
self.conv1 = nn.Conv2d(num_classes, ndf1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(ndf1, ndf2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(ndf2, ndf2, kernel_size=3, padding=1)
self.classifier = nn.Conv2d(ndf2, 1, kernel_size=3, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.classifier(x)
return x
#################################
################ Gradient reverse function
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
#######################
@META_ARCH_REGISTRY.register()
class DAobjTwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
dis_type: str,
# dis_loss_weight: float = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super(GeneralizedRCNN, self).__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
# @yujheli: you may need to build your discriminator here
self.dis_type = dis_type
# self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels['res4']) # Need to know the channel
if self.dis_type == "multi":
self.D_img_dict = {}
for k,v in self.backbone._out_feature_channels.items():
self.D_img_dict[k] = FCDiscriminator_img(v)
self.add_module("D_"+k, self.D_img_dict[k])
else:
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]) # Need to know the channel
# self.bceLoss_func = nn.BCEWithLogitsLoss()
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"dis_type": cfg.UNBIASEDTEACHER.DIS_TYPE,
# "dis_loss_ratio": cfg.xxx,
}
def preprocess_image_train(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images_t = [x["image_unlabeled"].to(self.device) for x in batched_inputs]
images_t = [(x - self.pixel_mean) / self.pixel_std for x in images_t]
images_t = ImageList.from_tensors(images_t, self.backbone.size_divisibility)
return images, images_t
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if (not self.training) and (not val_mode): # only conduct when testing mode
return self.inference(batched_inputs)
if branch == "domain":
source_label = 0
target_label = 1
# images = self.preprocess_image(batched_inputs)
images_s, images_t = self.preprocess_image_train(batched_inputs)
features = self.backbone(images_s.tensor)
# import pdb
# pdb.set_trace()
if self.dis_type == "multi":
loss_D_img_s = 0
for k, v in features.items():
features_s = grad_reverse(v)
D_img_out_s = self.D_img_dict[k](features_s)
loss_D_img_s += F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
loss_D_img_s /= len(features)
# features_s = grad_reverse(torch.cat((features['p2'],features['p3'],features['p4'],features['p5']),dim=1))
else:
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
features_t = self.backbone(images_t.tensor)
if self.dis_type == "multi":
loss_D_img_t = 0
for k, v in features_t.items():
features_tt = grad_reverse(v)
D_img_out_t = self.D_img_dict[k](features_tt)
loss_D_img_t += F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
loss_D_img_t /= len(features_t)
else:
features_t = grad_reverse(features_t[self.dis_type])
# features_t = grad_reverse(features_t['p2'])
D_img_out_t = self.D_img(features_t)
loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# import pdb
# pdb.set_trace()
losses = {}
losses["loss_D_img_s"] = loss_D_img_s
losses["loss_D_img_t"] = loss_D_img_t
return losses, [], [], None
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
# TODO: remove the usage of if else here. This needs to be re-organized
if branch.startswith("supervised"):
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsup_data_weak":
"""
unsupervised weak branch: input image without any ground-truth label; output proposals of rpn and roi-head
"""
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production)
# notice that we do not use any target in ROI head to do inference!
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
# if self.vis_period > 0:
# storage = get_event_storage()
# if storage.iter % self.vis_period == 0:
# self.visualize_training(batched_inputs, proposals_rpn, branch)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "unsup_data_strong":
raise NotImplementedError()
elif branch == "val_loss":
raise NotImplementedError()
def visualize_training(self, batched_inputs, proposals, branch=""):
"""
This function different from the original one:
- it adds "branch" to the `vis_name`.
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
"Left: GT bounding boxes "
+ branch
+ "; Right: Predicted proposals "
+ branch
)
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
| adaptive_teacher-main | prod_lib/modeling/daobj_rcnn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .coco_evaluation import COCOEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
# __all__ = [k for k in globals().keys() if not k.startswith("_")]
__all__ = [
"COCOEvaluator",
"PascalVOCDetectionEvaluator"
]
| adaptive_teacher-main | prod_lib/evaluation/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation import DatasetEvaluator
from iopath.common.file_io import file_lock
logger = logging.getLogger(__name__)
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatasetCatalog and in detectron2's standard format
output_file: path of json file that will be saved to
allow_cached: if json file is already present then skip conversion
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
PathManager.mkdirs(os.path.dirname(output_file))
with file_lock(output_file):
if PathManager.exists(output_file) and allow_cached:
logger.warning(
f"Using previously cached COCO format annotations at '{output_file}'. "
"You need to clear the cache file if your dataset has been modified."
)
else:
logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
logger.info(f"Caching COCO format annotations at '{output_file}' ...")
tmp_file = output_file #+ ".tmp"
# with PathManager.open(tmp_file, "w") as f:
# json.dump(coco_dict, f)
# shutil.move(tmp_file, output_file)
with PathManager.open(tmp_file, "w") as f:
json.dump(coco_dict, f)
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
kpt_oks_sigmas=(),
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.info(
f"'{dataset_name}' is not registered by `register_coco_instances`."
" Therefore trying to convert it to COCO format ..."
)
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
results_per_category_AP50 = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
t = np.where(.5 == coco_eval.params.iouThrs)[0]
precisions_50 = precisions[t]
precisions_50 = precisions_50[:, :, idx, 0, -1]
precisions_50 = precisions_50[precisions_50 > -1]
ap = np.mean(precisions_50) if precisions_50.size else float("nan")
results_per_category_AP50.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category_AP50) * 2)
results_flatten = list(itertools.chain(*results_per_category_AP50))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP50"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP50: \n".format(iou_type) + table)
results.update({"AP50-" + name: ap for name, ap in results_per_category_AP50})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
| adaptive_teacher-main | prod_lib/evaluation/coco_evaluation.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import DatasetEvaluator
class PascalVOCDetectionEvaluator(DatasetEvaluator):
"""
Evaluate Pascal VOC style AP for Pascal VOC dataset.
It contains a synchronization, therefore has to be called from all ranks.
Note that the concept of AP can be implemented in different ways and may not
produce identical results. This class mimics the implementation of the official
Pascal VOC Matlab API, and should produce similar but not identical results to the
official API.
"""
def __init__(self, dataset_name, target_classnames=None):
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
"""
self._dataset_name = dataset_name
meta = MetadataCatalog.get(dataset_name)
# Too many tiny files, download all to local for speed.
annotation_dir_local = PathManager.get_local_path(
os.path.join(meta.dirname, "Annotations/")
)
self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
self._class_names = meta.thing_classes
assert meta.year in [2007, 2012], meta.year
self._is_2007 = meta.year == 2007
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
if target_classnames == None:
self.target_classnames = self._class_names
else:
self.target_classnames = target_classnames
def reset(self):
self._predictions = defaultdict(list) # class name -> list of prediction strings
def process(self, inputs, outputs):
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
instances = output["instances"].to(self._cpu_device)
boxes = instances.pred_boxes.tensor.numpy()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
for box, score, cls in zip(boxes, scores, classes):
xmin, ymin, xmax, ymax = box
# The inverse of data loading logic in `datasets/pascal_voc.py`
xmin += 1
ymin += 1
self._predictions[cls].append(
f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
"""
all_predictions = comm.gather(self._predictions, dst=0)
if not comm.is_main_process():
return
predictions = defaultdict(list)
for predictions_per_rank in all_predictions:
for clsid, lines in predictions_per_rank.items():
predictions[clsid].extend(lines)
del all_predictions
self._logger.info(
"Evaluating {} using {} metric. "
"Note that results do not use the official Matlab API.".format(
self._dataset_name, 2007 if self._is_2007 else 2012
)
)
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
res_file_template = os.path.join(dirname, "{}.txt")
aps = defaultdict(list) # iou -> ap per class
for cls_id, cls_name in enumerate(self._class_names):
if cls_name not in self.target_classnames:
continue
lines = predictions.get(cls_id, [""])
with open(res_file_template.format(cls_name), "w") as f:
f.write("\n".join(lines))
for thresh in range(50, 100, 5):
rec, prec, ap = voc_eval(
res_file_template,
self._anno_file_template,
self._image_set_path,
cls_name,
ovthresh=thresh / 100.0,
use_07_metric=self._is_2007,
)
aps[thresh].append(ap * 100)
ret = OrderedDict()
mAP = {iou: np.mean(x) for iou, x in aps.items()}
ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
#Add the codes for AP50
for idx, name in enumerate(self.target_classnames):
ret["bbox"].update({"AP50-" + name: aps[50][idx]})
return ret
##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
@lru_cache(maxsize=None)
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| adaptive_teacher-main | prod_lib/evaluation/pascal_voc_evaluation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import io
import logging
import os
import json
from detectron2.data import DatasetCatalog, MetadataCatalog
from d2go.data.utils import CallFuncWithJsonFile
from detectron2.utils.file_io import PathManager
from fvcore.common.timer import Timer
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from .cityscapes_foggy import load_cityscapes_instances
logger = logging.getLogger(__name__)
_SPLITS_COCO_FORMAT = {}
_SPLITS_COCO_FORMAT["coco"] = {
"coco_2017_unlabel": (
"memcache_manifold://mobile_vision_dataset/tree/coco_unlabel2017",
"memcache_manifold://mobile_vision_dataset/tree/coco_unlabel2017/coco_jsons/image_info_unlabeled2017.json",
),
"goi_v5_unlabel": (
"memcache_manifold://portal_ai_data/tree/goi_v5/train",
"memcache_manifold://mobile_vision_dataset/tree/goi/v5/coco_jsons/openimages_v5_train_unlabel.json",
),
}
def register_coco_unlabel():
for _, splits_per_dataset in _SPLITS_COCO_FORMAT.items():
for key, (image_root, json_file) in splits_per_dataset.items():
meta = {}
register_coco_unlabel_instances(key, meta, json_file, image_root)
def register_coco_unlabel_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_unlabel_json(json_file, image_root, name)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
def load_coco_unlabel_json(
json_file, image_root, dataset_name=None, extra_annotation_keys=None
):
"""
Load a json file with COCO's instances annotation format.
Currently supports instance detection, instance segmentation,
and person keypoints annotations.
Args:
json_file (str): full path to the json file in COCO instances annotation format.
image_root (str or path-like): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., coco_2017_train).
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard dataset dicts format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
logger.info(
"Loaded {} unlabeled images in COCO format from {}".format(len(imgs), json_file)
)
dataset_dicts = []
for img_dict in imgs:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
dataset_dicts.append(record)
return dataset_dicts
_UNLABELED_DATASETS = {
# 1-2 people images extracted from UGC ig images or ig profiles using fetch_image flow
"UGC_unlabel_ig_1M_20210514_1or2people": "manifold://pai_mobile/tree/datasets/semi_supervised/unlabeled_UGC/sweep_4m_20210514_20210515_1or2people.json",
# hand non-UGC long range frames extracted from collected videos
"hand_nonUGC_long_range_384K_20210521": "manifold://pai_mobile/tree/datasets/hand_unlabeled_nonUGC/long_range.json",
# hand non-UGC short range images cropped from the annotated bounding boxes in long-range videos
"hand_nonUGC_short_range_183K_20210521": "manifold://pai_mobile/tree/datasets/hand_unlabeled_nonUGC/short_range.json",
}
def load_json(json_file):
"""
Simply load and return the json_file
"""
with PathManager.open(json_file, "r") as f:
json_data = json.load(f)
return json_data
def register_unlabeled():
"""
Register the unlabeled datasets
The json_file needs to be in D2's format
"""
for name, json_file in _UNLABELED_DATASETS.items():
# 1. register a function which returns dicts
DatasetCatalog.register(
name,
CallFuncWithJsonFile(
func=load_json,
json_file=json_file
)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root="", evaluator_type="coco"
)
# ==== Predefined splits for raw cityscapes foggy images ===========
_RAW_CITYSCAPES_SPLITS = {
# "cityscapes_foggy_{task}_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
# "cityscapes_foggy_{task}_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
# "cityscapes_foggy_{task}_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
"cityscapes_foggy_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
"cityscapes_foggy_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
"cityscapes_foggy_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
}
def register_all_cityscapes_foggy():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
# inst_key = key.format(task="instance_seg")
inst_key = key
# DatasetCatalog.register(
# inst_key,
# lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
# x, y, from_json=True, to_polygons=True
# ),
# )
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=False, to_polygons=False
),
)
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
# )
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="pascal_voc", **meta
# )
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="coco", **meta
)
# ==== Predefined splits for Clipart (PASCAL VOC format) ===========
def register_all_clipart():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train", "clipart", "train"),
("Clipart1k_test", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_cityscapes_foggy()
register_all_clipart()
# register_coco_unlabel()
# register_unlabeled()
def register_all_water():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset" #Need to modify to the correct folder containing the dataset.
SPLITS = [
("Watercolor_train", "watercolor", "train"),
("Watercolor_test", "watercolor", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year, class_names=["person", "dog","bicycle", "bird", "car", "cat"])
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
register_all_water()
def register_all_clipart_ws():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train_w", "clipart", "train"),
("Clipart1k_test_w", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_clipart_ws() | adaptive_teacher-main | prod_lib/data/builtin.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
image_file = os.path.join(city_img_dir, basename)
# suffix = "leftImg8bit.png"
# assert basename.endswith(suffix), basename
# basename = basename[: -len(suffix)]
suffix = 'leftImg8bit_foggy'
basename = basename.split(suffix)[0]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
# Map cityscape ids to contiguous ids
from cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
| adaptive_teacher-main | prod_lib/data/cityscapes_foggy.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.structures import pairwise_iou
class OpenMatchTrainerProbe:
def __init__(self, cfg):
self.BOX_AP = 0.5
self.NUM_CLASSES = cfg.MODEL.ROI_HEADS.NUM_CLASSES
# self.bbox_stat_list = ['compute_fp_gtoutlier', 'compute_num_box', 'compute_ood_acc']
def bbox_stat(self, unlabel_gt, unlabel_pseudo, name, bbox_stat_list):
stats = {}
sum_gpu_names = []
for metric in bbox_stat_list:
stats_per, sum_gpu_names_per = getattr(
self, metric)(unlabel_gt, unlabel_pseudo, name)
stats.update(stats_per)
sum_gpu_names.extend(sum_gpu_names_per)
return stats, sum_gpu_names
def compute_fp_gtoutlier(self, unlabel_gt, unlabel_pseudo, name):
num_gt_ood_object = 0
num_gt_fp_ood_object = 0
sum_iou = 0.0
sum_gpu_names = []
results = {}
if len(unlabel_gt) != 0:
for gt, pseudo in zip(unlabel_gt, unlabel_pseudo):
# import pdb; pdb. set_trace()
if name == "pred":
pp_boxes = pseudo.pred_boxes
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter predicted ood box when evaluating this metric
pseudo = pseudo[pseudo.gt_classes != -1]
pp_boxes = pseudo.gt_boxes
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pseudo) != 0:
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(1)
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.sum().item()
num_gt_fp_ood_object += (max_iou[ood_idx]
> self.BOX_AP).sum().item()
sum_iou += max_iou[ood_idx].sum().item()
elif len(gt) != 0 and len(pseudo) == 0:
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.shape[0]
results = {'Analysis_'+name+'/num_gt_ood_object': num_gt_ood_object,
'Analysis_'+name+'/num_gt_fp_ood_object': num_gt_fp_ood_object,
'Analysis_'+name+'/sum_iou': sum_iou}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
def compute_num_box(self, unlabel_gt, unlabel_pseudo, name):
num_bbox = 0.0
size_bbox = 0.0
avg_conf = 0.0
# measure in and out box for openset SS-OD
num_bbox_in = 0.0
num_bbox_out = 0.0
num_bg = 0.0
# when ground-truth is missing in unlabeled data
if len(unlabel_gt) == 0:
for pp_roi in unlabel_pseudo:
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
num_valid_img = len(unlabel_pseudo)
else:
# with ground-truth
num_valid_img = 0
for gt, pp_roi in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter out ood pseudo-box when doing analysis
pp_roi = pp_roi[pp_roi.gt_classes != -1]
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
# in and out class
if name == "gt":
pp_roi_in = pp_roi[pp_classes != -1]
num_bbox_in += len(pp_roi_in)
pp_roi_out = pp_roi[pp_classes == -1]
num_bbox_out += len(pp_roi_out)
num_valid_img += 1
elif name == "pred" or name == "pseudo_conf" or name == "pseudo_ood":
if len(gt.gt_boxes.to('cuda'))>0 and len(pp_boxes) > 0:
max_iou, max_idx = pairwise_iou(gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# for the ground-truth label for each pseudo-box
gtclass4pseudo = gt.gt_classes[max_idx]
matchgtbox = max_iou > 0.5
# compute the number of boxes (background, inlier, outlier)
num_bg += (~matchgtbox).sum().item()
num_bbox_in += (gtclass4pseudo[matchgtbox]
!= -1).sum().item()
num_bbox_out += (gtclass4pseudo[matchgtbox]
== -1).sum().item()
num_valid_img += 1
else:
raise ValueError("Unknown name for probe roi bbox.")
box_probe = {}
if num_valid_img >0 :
box_probe["Analysis_" + name + "/Num_bbox"] = num_bbox / \
num_valid_img
box_probe["Analysis_" + name + "/Size_bbox"] = size_bbox / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = num_bbox_in / num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = num_bbox_out / num_valid_img
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = avg_conf / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_background"] = num_bg / num_valid_img
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
else:
box_probe["Analysis_" + name + "/Num_bbox"] = 0.0
box_probe["Analysis_" + name + "/Size_bbox"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = 0.0
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_background"] = 0.0
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
return box_probe, []
def compute_ood_acc(self, unlabel_gt, unlabel_pseudo, name, BOX_IOU=0.5):
results = {}
sum_gpu_names = []
if len(unlabel_gt) != 0:
for metric in ['acc_outlier', 'recall_outlier']:
for samples in ['_fg', '_all']:
for fraction_part in ['_nume', '_deno']:
results[metric+samples+fraction_part] = 0.0
for gt, pred in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pred.pred_boxes
pp_ood_scores = pred.ood_scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# assume these outlier are suppressed
pred = pred[pred.gt_classes != -1]
pp_boxes = pred.gt_boxes
pp_ood_scores = pred.ood_scores
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pred) != 0:
# find the most overlapped ground-truth box for each pseudo-box
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# ignore background instances
find_fg_mask = max_iou > BOX_IOU
if find_fg_mask.sum() > 0:
gt_corres = gt[max_idx].gt_classes.to("cuda")
gt_outlier = (gt_corres[find_fg_mask] == -1)
pred_outlier = pp_ood_scores[find_fg_mask][:, 0] > 0.5
# accurcay of ood detection (foreground)
# acc_outlier_fg = (pred_outlier == gt_outlier).sum() /find_fg_mask.sum()
results['acc_outlier_fg_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_fg_deno'] += find_fg_mask.sum()
# recall of ood detection (foreground)
# recall_outlier_fg = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_fg_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_fg_deno'] += gt_outlier.sum()
# Regard backgound gt as outlier
gt_corres = gt[max_idx].gt_classes.to("cuda")
# convert all background gt as outlier
gt_corres[~find_fg_mask] = -1
gt_outlier = gt_corres == -1
pred_outlier = pp_ood_scores[:, 0] > 0.5
# accurcay of ood detection (all)
# acc_outlier_all = (pred_outlier == gt_outlier).sum() /len(pred)
results['acc_outlier_all_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_all_deno'] += len(pred)
# recall of ood detection (all)
# recall_outlier_all = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_all_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_all_deno'] += gt_outlier.sum()
results = {'Analysis_'+name+'/'+k: v for k, v in results.items()}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
| adaptive_teacher-main | prod_lib/engine/probe.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import time
from collections import OrderedDict
from typing import Dict
import detectron2.utils.comm as comm
import numpy as np
import torch
from detectron2.engine import SimpleTrainer
from detectron2.structures import BitMasks, Boxes, Instances, Keypoints
from detectron2.utils.events import get_event_storage
from d2go.projects.unbiased_teacher.engine.trainer import UnbiasedTeacherTrainer
from d2go.projects.unbiased_teacher.utils.probe import probe
import copy
logger = logging.getLogger(__name__)
class DAobjTrainer(UnbiasedTeacherTrainer):
"""
A trainer for Teacher-Student mutual learning following this paper:
"Unbiased Teacher for Semi-Supervised Object Detection"
It assumes that every step, you:
For Teacher:
1. Perform a forward pass on a weakly augmented unlabeled data from the data_loader.
2. Generate pseudo-labels on the weakly augmented unlabeled data
For Student:
1. Perform a forward pass on a strongly augmented unlabeled data from the data_loader.
2. Perform a forward pass on a labeled data from the data_loader.
1. Use pseudo-labels generated from the Teacher as target and compute the
loss on a strongly augmented unlabeled data
2. Compute the gradients with the above losses on labeled and unlabeled data.
3. Update the Student model with the optimizer.
4. EMA update the Teacher model
"""
# def __init__(self, cfg, model, model_teacher, data_loader, optimizer):
# """
# Args:
# model: a torch Module. Takes a data from data_loader and returns a
# dict of losses.
# data_loader: an iterable. Contains data to be used to call model.
# optimizer: a torch optimizer.
# """
# super().__init__(model, data_loader, optimizer)
# self.cfg = cfg
# self.model_teacher = model_teacher
def run_step(self):
assert (
self.model.training
), "Student model was changed to eval mode during training"
start = time.perf_counter()
data = next(self._data_loader_iter)
# q (queue): strongly augmented, k (key): weakly augmented
#TODO Need to further use the weak samples for domain adaptation
label_data_q, label_data_k, unlabel_data_q, unlabel_data_k = data
data_time = time.perf_counter() - start
if (
self.cfg.UNBIASEDTEACHER.BURN_IN_STEP != 0
and self.iter < self.cfg.UNBIASEDTEACHER.BURN_IN_STEP
):
# Burn-In stage. Supervisedly train the Student model.
losses, loss_dict, record_dict = self.burn_in(label_data_q, label_data_k)
else:
# Copy the Student model to the Teacher (using keep_rate = 0)
if self.iter == self.cfg.UNBIASEDTEACHER.BURN_IN_STEP:
logger.info("Copying Student weights to the Teacher .....")
self._update_teacher_model(keep_rate=0.0)
elif (
self.iter - self.cfg.UNBIASEDTEACHER.BURN_IN_STEP
) % self.cfg.UNBIASEDTEACHER.TEACHER_UPDATE_ITER == 0:
self._update_teacher_model(
keep_rate=self.cfg.UNBIASEDTEACHER.EMA.KEEP_RATE
)
# Teacher-Student Mutual Learning
losses, loss_dict, record_dict = self.teacher_student_learning(
label_data_q, label_data_k, unlabel_data_q, unlabel_data_k
)
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(record_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def burn_in(self, label_data_q, label_data_k):
"""
Perform Burn-In stage with labeled data
"""
# combine label_data_q + label_data_k
label_data_q.extend(label_data_k)
record_dict, _, _, _ = self.model(label_data_q, branch="supervised")
# weight losses
loss_dict = self.weight_losses(record_dict)
losses = sum(loss_dict.values())
return losses, loss_dict, record_dict
def teacher_student_learning(
self, label_data_q, label_data_k, unlabel_data_q, unlabel_data_k
):
"""
Perform Teacher-Student Mutual Learning with labeled and unlabeled data
"""
# q (queue): strongly augmented, k (key): weakly augmented
record_dict = {}
######################## For probe #################################
# import pdb; pdb. set_trace()
gt_unlabel_k = self.get_label(unlabel_data_k)
# 0. remove potential ground-truth labels in the unlabeled data
unlabel_data_q = self.remove_label(unlabel_data_q)
unlabel_data_k = self.remove_label(unlabel_data_k)
# 1. generate the pseudo-label using teacher model
# TODO: why is the Teacher not in .eval() mode?
with torch.no_grad():
(
_,
proposals_rpn_unsup_k,
proposals_roih_unsup_k,
_,
) = self.model_teacher(unlabel_data_k, branch="unsup_data_weak")
######################## For probe #################################
# import pdb; pdb. set_trace()
# analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,proposals_roih_unsup_k,'pred')
# record_dict.update(analysis_pred)
# 2. Pseudo-labeling
# Pseudo-labeling for RPN head (bbox location/objectness)
joint_proposal_dict = {}
## No need this
joint_proposal_dict["proposals_rpn"] = proposals_rpn_unsup_k
(
pesudo_proposals_rpn_unsup_k,
nun_pseudo_bbox_rpn,
) = self.process_pseudo_label(
proposals_rpn_unsup_k,
self.cfg.UNBIASEDTEACHER.BBOX_THRESHOLD,
self.cfg.UNBIASEDTEACHER.MASK_THRESHOLD,
self.cfg.UNBIASEDTEACHER.KEYPOINT_THRESHOLD,
"rpn",
"thresholding",
)
joint_proposal_dict["proposals_pseudo_rpn"] = pesudo_proposals_rpn_unsup_k
## No need this end
# Pseudo-labeling for ROI head (bbox location/objectness)
pesudo_proposals_roih_unsup_k, _ = self.process_pseudo_label(
proposals_roih_unsup_k,
self.cfg.UNBIASEDTEACHER.BBOX_THRESHOLD,
self.cfg.UNBIASEDTEACHER.MASK_THRESHOLD,
self.cfg.UNBIASEDTEACHER.KEYPOINT_THRESHOLD,
"roih",
"thresholding",
)
joint_proposal_dict["proposals_pseudo_roih"] = pesudo_proposals_roih_unsup_k
######################## For probe #################################
analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,pesudo_proposals_roih_unsup_k,'pred')
record_dict.update(analysis_pred)
# Probe for analysis (usually for research development)
if self.cfg.UNBIASEDTEACHER.PROBE:
record_dict = probe(
self.cfg,
proposals_roih_unsup_k,
unlabel_data_k,
pesudo_proposals_roih_unsup_k,
record_dict,
)
# 3. add pseudo-label to unlabeled data
unlabel_data_q = self.add_label(
unlabel_data_q, joint_proposal_dict["proposals_pseudo_roih"]
)
unlabel_data_k = self.add_label(
unlabel_data_k, joint_proposal_dict["proposals_pseudo_roih"]
)
# all_label_data = label_data_q + label_data_k
if self.cfg.UNBIASEDTEACHER.ISAUG == "No":
all_label_data = label_data_k
all_unlabel_data = unlabel_data_k
else:
all_label_data = label_data_q + label_data_k
all_unlabel_data = unlabel_data_q
# 4. input both strongly and weakly augmented labeled data into student model
# all_unlabel_data = unlabel_data_q
record_all_label_data, _, _, _ = self.model(all_label_data, branch="supervised")
record_dict.update(record_all_label_data)
# 5. input strongly augmented unlabeled data into model
record_all_unlabel_data, _, _, _ = self.model(
all_unlabel_data, branch="supervised-pseudo"
)
# rename unsupervised loss
# NOTE: names of the recorded output from model are hard-coded
# we rename them accordingly for unlabeled data
new_record_all_unlabel_data = {}
for key in record_all_unlabel_data.keys():
new_record_all_unlabel_data[key + "_pseudo"] = record_all_unlabel_data[key]
record_dict.update(new_record_all_unlabel_data)
# 6. input weakly labeled data (source) and weakly unlabeled data (target) to student model
# give sign to the target data
for i_index in range(len(unlabel_data_k)):
# unlabel_data_item = {}
for k, v in unlabel_data_k[i_index].items():
# label_data_k[i_index][k + "_unlabeled"] = v
label_data_k[i_index][k + "_unlabeled"] = v
# unlabel_data_k[i_index] = unlabel_data_item
all_domain_data = label_data_k
# all_domain_data = label_data_k + unlabel_data_k
record_all_domain_data, _, _, _ = self.model(all_domain_data, branch="domain")
record_dict.update(record_all_domain_data)
# 7. distill teacher
# for distill back to teacher
with torch.no_grad():
(
_,
proposals_rpn_unsup_dis,
proposals_roih_unsup_dis,
_,
) = self.model(unlabel_data_k, branch="unsup_data_weak")
pesudo_proposals_roih_unsup_k, _ = self.process_pseudo_label(
proposals_roih_unsup_dis,
self.cfg.UNBIASEDTEACHER.BBOX_THRESHOLD,
self.cfg.UNBIASEDTEACHER.MASK_THRESHOLD,
self.cfg.UNBIASEDTEACHER.KEYPOINT_THRESHOLD,
"roih",
"thresholding",
)
unlabel_data_k = self.remove_label(unlabel_data_k)
unlabel_data_k = self.add_label(
unlabel_data_k, pesudo_proposals_roih_unsup_k
)
record_distill_data, _, _, _ = self.model_teacher(
unlabel_data_k, branch="supervised-pseudo"
)
new_record_all_distill_data = {}
for key in record_distill_data.keys():
new_record_all_distill_data[key + "_distill"] = record_distill_data[key]
record_dict.update(new_record_all_distill_data)
# weighting losses
loss_dict = self.weight_losses(record_dict)
#Add discriminator loss here
#loss_dict.update(...)
losses = sum(loss_dict.values())
return losses, loss_dict, record_dict
def weight_losses(self, record_dict):
loss_dict = {}
REGRESSION_LOSS_WEIGHT = 0
for key in record_dict.keys():
if key.startswith("loss"):
if key == "loss_rpn_cls_pseudo":
loss_dict[key] = (
record_dict[key]
* self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT_RPN_CLS
)
elif (
key == "loss_rpn_loc_pseudo" or key == "loss_box_reg_pseudo"
): # set pseudo bbox regression to 0
loss_dict[key] = record_dict[key] * REGRESSION_LOSS_WEIGHT
elif (
key == "loss_rpn_loc_distill" or key == "loss_box_reg_distill"
): # set pseudo bbox regression to 0
loss_dict[key] = record_dict[key] * REGRESSION_LOSS_WEIGHT
elif key.endswith("mask_pseudo"): # unsupervised loss for segmentation
loss_dict[key] = (
record_dict[key]
* self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT_MASK
)
elif key.endswith("keypoint_pseudo"): # unsupervised loss for keypoint
loss_dict[key] = (
record_dict[key]
* self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT_KEYPOINT
)
elif key.endswith("pseudo"): # unsupervised loss
loss_dict[key] = (
record_dict[key] * self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT
)
elif (
key == "loss_D_img_s" or key == "loss_D_img_t"
): # set weight for discriminator
# import pdb
# pdb.set_trace()
loss_dict[key] = record_dict[key] * self.cfg.UNBIASEDTEACHER.DIS_LOSS_WEIGHT #Need to modify defaults and yaml
else: # supervised loss
loss_dict[key] = record_dict[key] * 1
return loss_dict
def threshold_bbox(
self,
proposal_bbox_inst,
thres=0.7,
mask_thres=0.5,
keypoint_thres=0.5,
proposal_type="roih",
):
if proposal_type == "rpn":
valid_map = proposal_bbox_inst.objectness_logits > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.proposal_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.pred_boxes = new_boxes
new_proposal_inst.objectness_logits = proposal_bbox_inst.objectness_logits[
valid_map
]
elif proposal_type == "roih":
valid_map = proposal_bbox_inst.scores > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.pred_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.pred_boxes = new_boxes
new_proposal_inst.gt_classes = proposal_bbox_inst.pred_classes[valid_map]
new_proposal_inst.pred_classes = proposal_bbox_inst.pred_classes[valid_map]
new_proposal_inst.scores = proposal_bbox_inst.scores[valid_map]
if self.cfg.MODEL.MASK_ON and new_boxes:
# put predicted output into gt_masks with thresholding
new_masks = proposal_bbox_inst.pred_masks[valid_map].squeeze(1)
new_masks = new_masks >= mask_thres
new_proposal_inst.gt_masks = BitMasks(new_masks)
if self.cfg.MODEL.KEYPOINT_ON and new_boxes:
# we use the keypoint score as the basis for thresholding
new_keypoints = proposal_bbox_inst.pred_keypoints[valid_map, :]
invalid_keypoints = new_keypoints[:, :, 2] < keypoint_thres
# (x, y, visibility): visibility flag = 0 -> not labeled (in which case x=y=0)
new_keypoints[invalid_keypoints] = torch.FloatTensor([0, 0, 0]).to(
new_keypoints.device
)
new_proposal_inst.gt_keypoints = Keypoints(new_keypoints)
return new_proposal_inst
| adaptive_teacher-main | prod_lib/engine/trainer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.4.1
Usage: defines various replacement policy to be used in AutoCAT
'''
import block
import random
INVALID_TAG = '--------'
# interface for cache replacement policy per set
class rep_policy:
def __init__(self):
self.verbose = False
def touch(self, tag, timestamp):
pass
def reset(self, tag, timestamp):
pass
def invalidate(self, tag):
pass
def find_victim(self, timestamp):
pass
def vprint(self, *args):
if self.verbose == 1:
print( " "+" ".join(map(str,args))+" ")
# LRU policy
class lru_policy(rep_policy):
def __init__(self, associativity, block_size, verbose=False):
self.associativity = associativity
self.block_size = block_size
self.blocks = {}
self.verbose = verbose
def touch(self, tag, timestamp):
assert(tag in self.blocks)
self.blocks[tag].last_accessed = timestamp
def reset(self, tag, timestamp):
return self.touch(tag, timestamp)
def instantiate_entry(self, tag, timestamp):
assert(tag == INVALID_TAG or tag not in self.blocks)
assert(len(self.blocks) < self.associativity)
self.blocks[tag] = block.Block(self.block_size, timestamp, False, 0)
#def reset(self, tag):
def invalidate(self, tag):
assert(tag in self.blocks)
del self.blocks[tag]
#def reset(self, tag):
def invalidate_unsafe(self, tag):
if tag in self.blocks:
del self.blocks[tag]
def find_victim(self, timestamp):
in_cache = list(self.blocks.keys())
#print(len(in_cache))
#print(in_cache)
victim_tag = in_cache[0]
for b in in_cache:
self.vprint(b + ' '+ str(self.blocks[b].last_accessed))
if self.blocks[b].last_accessed < self.blocks[victim_tag].last_accessed:
victim_tag = b
return victim_tag
# random replacement policy
class rand_policy(rep_policy):
def __init__(self, associativity, block_size, verbose=False):
self.associativity = associativity
self.block_size = block_size
self.blocks = {}
self.verbose = verbose
def touch(self, tag, timestamp):
assert(tag in self.blocks)
self.blocks[tag].last_accessed = timestamp
def reset(self, tag, timestamp):
return self.touch(tag, timestamp)
def instantiate_entry(self, tag, timestamp):
assert(tag not in self.blocks)
self.blocks[tag] = block.Block(self.block_size, timestamp, False, 0)
def invalidate(self, tag):
assert(tag in self.blocks)
del self.blocks[tag]
def find_victim(self, timestamp):
in_cache = list(self.blocks.keys())
index = random.randint(0,len(in_cache)-1)
victim_tag = in_cache[index]
return victim_tag
# still needs to debug
import math
# based on c implementation of tree_plru
# https://github.com/gem5/gem5/blob/87c121fd954ea5a6e6b0760d693a2e744c2200de/src/mem/cache/replacement_policies/tree_plru_rp.cc
class tree_plru_policy(rep_policy):
import math
def __init__(self, associativity, block_size, verbose = False):
self.associativity = associativity
self.block_size = block_size
self.num_leaves = associativity
self.plrutree = [ False ] * ( self.num_leaves - 1 )
self.count = 0
self.candidate_tags = [ INVALID_TAG ] * self.num_leaves
self.verbose = verbose
self.vprint(self.plrutree)
self.vprint(self.candidate_tags)
#self.tree_instance = # holds the latest temporary tree instance created by
def parent_index(self,index):
return math.floor((index - 1) / 2)
def left_subtree_index(self,index):
return 2 * index + 1
def right_subtree_index(self,index):
return 2 * index + 2
def is_right_subtree(self, index):
return index % 2 == 0
def touch(self, tag, timestamp):
# find the index
tree_index = 0
self.vprint(tree_index)
while tree_index < len(self.candidate_tags):
if self.candidate_tags[tree_index] == tag:
break
else:
tree_index += 1
tree_index += ( self.num_leaves - 1)
# set the path
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
self.plrutree[tree_index] = not right
while tree_index != 0:
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
#exit(-1)
self.plrutree[tree_index] = not right
self.vprint(self.plrutree)
self.vprint(self.candidate_tags)
def reset(self, tag, timestamp):
self.touch(tag, timestamp)
#def reset(self, tag):
def invalidate(self, tag):
# find index of tag
self.vprint('invalidate ' + tag)
tree_index = 0
while tree_index < len(self.candidate_tags):
if self.candidate_tags[tree_index] == tag:
break
else:
tree_index += 1
#print(tree_index)
self.candidate_tags[tree_index] = INVALID_TAG
tree_index += (self.num_leaves - 1 )
# invalidate the path
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
self.plrutree[tree_index] = right
while tree_index != 0:
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
self.plrutree[tree_index] = right
self.vprint(self.plrutree)
self.vprint(self.candidate_tags)
def find_victim(self, timestamp):
tree_index = 0
while tree_index < len(self.plrutree):
if self.plrutree[tree_index] == 1:
tree_index = self.right_subtree_index(tree_index)
else:
tree_index = self.left_subtree_index(tree_index)
victim_tag = self.candidate_tags[tree_index - (self.num_leaves - 1) ]
return victim_tag
# notice the usage of instantiate_entry() here is
# different from instantiateEntry() in gem5
# in gem5 the function is only called during cache initialization
# while here instantiate_entry is used when a line is evicted and new line is installed
def instantiate_entry(self, tag, timestamp):
# find a tag that can be invalidated
index = 0
while index < len(self.candidate_tags):
if self.candidate_tags[index] == INVALID_TAG:
break
index += 1
assert(self.candidate_tags[index] == INVALID_TAG) # this does not always hold for tree-plru
self.candidate_tags[index] = tag
# touch the entry
self.touch(tag, timestamp)
class bit_plru(rep_policy):
def __init__(self, associativity, block_size, verbose = False):
self.associativity = associativity
self.block_size = block_size
self.blocks = {}
self.verbose = verbose
def touch(self, tag, timestamp):
assert(tag in self.blocks)
self.blocks[tag].last_accessed = 1
def reset(self, tag, timestamp):
return self.touch(tag, timestamp)
def instantiate_entry(self, tag, timestamp):
assert(tag not in self.blocks)
timestamp = 1
self.blocks[tag] = block.Block(self.block_size, timestamp, False, 0)
#def reset(self, tag):
def invalidate(self, tag):
assert(tag in self.blocks)
del self.blocks[tag]
def find_victim(self, timestamp):
in_cache = list(self.blocks.keys())
victim_tag = in_cache[0]
found = False
for b in in_cache:
self.vprint(b + ' '+ str(self.blocks[b].last_accessed))
# find the smallest last_accessed address
if self.blocks[b].last_accessed == 0:
victim_tag = b
found = True
break
if found == True:
return victim_tag
else:
# reset all last_accessed to 0
for b in in_cache:
self.blocks[b].last_accessed = 0
# find the leftmost tag
for b in in_cache:
if self.blocks[b].last_accessed == 0:
victim_tag = b
break
return victim_tag
#pl cache option
PL_NOTSET = 0
PL_LOCK = 1
PL_UNLOCK = 2
class plru_pl_policy(rep_policy):
def __init__(self, associativity, block_size, verbose = False):
self.associativity = associativity
self.block_size = block_size
self.num_leaves = associativity
self.plrutree = [ False ] * ( self.num_leaves - 1 )
self.count = 0
self.candidate_tags = [ INVALID_TAG ] * self.num_leaves
self.lockarray = [ PL_UNLOCK ] * self.num_leaves
self.verbose = verbose
self.vprint(self.plrutree)
self.vprint(self.lockarray)
self.vprint(self.candidate_tags)
#self.tree_instance = # holds the latest temporary tree instance created by
def parent_index(self,index):
return math.floor((index - 1) / 2)
def left_subtree_index(self,index):
return 2 * index + 1
def right_subtree_index(self,index):
return 2 * index + 2
def is_right_subtree(self, index):
return index % 2 == 0
def touch(self, tag, timestamp):
# find the index
tree_index = 0
self.vprint(tree_index)
while tree_index < len(self.candidate_tags):
if self.candidate_tags[tree_index] == tag:
break
else:
tree_index += 1
tree_index += ( self.num_leaves - 1)
# set the path
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
self.plrutree[tree_index] = not right
while tree_index != 0:
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
#exit(-1)
self.plrutree[tree_index] = not right
self.vprint(self.plrutree)
self.vprint(self.lockarray)
self.vprint(self.candidate_tags)
def reset(self, tag, timestamp):
self.touch(tag, timestamp)
#def reset(self, tag):
def invalidate(self, tag):
# find index of tag
self.vprint('invalidate ' + tag)
tree_index = 0
while tree_index < len(self.candidate_tags):
if self.candidate_tags[tree_index] == tag:
break
else:
tree_index += 1
#print(tree_index)
self.candidate_tags[tree_index] = INVALID_TAG
tree_index += (self.num_leaves - 1 )
# invalidate the path
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
self.plrutree[tree_index] = right
while tree_index != 0:
right = self.is_right_subtree(tree_index)
tree_index = self.parent_index(tree_index)
self.plrutree[tree_index] = right
self.vprint(self.plrutree)
self.vprint(self.lockarray)
self.vprint(self.candidate_tags)
def find_victim(self, timestamp):
tree_index = 0
while tree_index < len(self.plrutree):
if self.plrutree[tree_index] == 1:
tree_index = self.right_subtree_index(tree_index)
else:
tree_index = self.left_subtree_index(tree_index)
index = tree_index - (self.num_leaves - 1)
# pl cache
if self.lockarray[index] == PL_UNLOCK:
victim_tag = self.candidate_tags[index]
return victim_tag
else:
return INVALID_TAG
# notice the usage of instantiate_entry() here is
# different from instantiateEntry() in gem5
# in gem5 the function is only called during cache initialization
# while here instantiate_entry is used when a line is evicted and new line is installed
def instantiate_entry(self, tag, timestamp):
# find a tag that can be invalidated
index = 0
while index < len(self.candidate_tags):
if self.candidate_tags[index] == INVALID_TAG:
break
index += 1
assert(self.candidate_tags[index] == INVALID_TAG)
self.candidate_tags[index] = tag
###while index < self.num_leaves:
### if self.candidate_tags[index] == INVALID:
### self.candidate_tags[index] = tag
### break
### else:
### index += 1
# touch the entry
self.touch(tag, timestamp)
# pl cache set lock scenario
def setlock(self, tag, lock):
self.vprint("setlock "+ tag + ' ' + str(lock))
# find the index
index = 0
self.vprint(index)
while index < len(self.candidate_tags):
if self.candidate_tags[index] == tag:
break
else:
index += 1
# set / unset lock
self.lockarray[index] = lock
#implementation based on https://github.com/gem5/gem5/blob/87c121fd954ea5a6e6b0760d693a2e744c2200de/src/mem/cache/replacement_policies/brrip_rp.cc
# testcase based on https://dl.acm.org/doi/pdf/10.1145/1816038.1815971
class brrip_policy(rep_policy):
def __init__(self, associativity, block_size, verbose = False):
self.associativity = associativity
self.block_size = block_size
self.count = 0
self.candidate_tags = [ INVALID_TAG ] * self.associativity
self.verbose = verbose
self.num_rrpv_bits = 2
self.rrpv_max = int(math.pow(2, self.num_rrpv_bits)) - 1
self.rrpv = [ self.rrpv_max ] * associativity
self.hit_priority = False
self.btp = 100
self.vprint(self.candidate_tags)
self.vprint(self.rrpv)
#self.tree_instance = # holds the latest temporary tree instance created by
def instantiate_entry(self, tag, timestamp):
# find a tag that can be invalidated
index = 0
while index < len(self.candidate_tags):
if self.candidate_tags[index] == INVALID_TAG:
self.candidate_tags[index] = tag
self.rrpv[index] = self.rrpv_max
break
index += 1
# touch the entry
self.touch(tag, timestamp, hit = False)
def touch(self, tag, timestamp, hit = True):
# find the index
index = 0
self.vprint(index)
while index < len(self.candidate_tags):
if self.candidate_tags[index] == tag:
break
else:
index += 1
if self.hit_priority == True:
self.rrpv[index] = 0
else:
if self.rrpv[index] > 0:
if hit == True:
self.rrpv[index] = 0
else:
self.rrpv[index] -= 1
self.vprint(self.candidate_tags)
self.vprint(self.rrpv)
def reset(self, tag, timestamp):
index = 0
self.vprint(index)
while index < len(self.candidate_tags):
if self.candidate_tags[index] == tag:
break
else:
index += 1
if random.randint(1,100) <= self.btp:
if self.rrpv[index] > 0:
self.rrpv[index] -= 1
self.vprint(self.candidate_tags)
self.vprint(self.rrpv)
#def reset(self, tag):
def invalidate(self, tag):
# find index of tag
self.vprint('invalidate ' + tag)
index = 0
while index < len(self.candidate_tags):
if self.candidate_tags[index] == tag:
break
else:
index += 1
#print(tree_index)
self.candidate_tags[index] = INVALID_TAG
self.rrpv[index] = self.rrpv_max
self.vprint(self.candidate_tags)
self.vprint(self.rrpv)
def find_victim(self, timestamp):
max_index = 0
index = 0
while index < len(self.candidate_tags):
if self.rrpv[index] > self.rrpv[max_index]:
max_index = index
index += 1
# invalidate the path
diff = self.rrpv_max - self.rrpv[max_index]
self.rrpv[max_index] = self.rrpv_max
if diff > 0:
index = 0
while index < len(self.candidate_tags):
self.rrpv[index] += diff
index += 1
#self.vprint(self.plrutree)
#self.vprint(self.candidate_tags)
self.vprint(self.candidate_tags)
self.vprint(self.rrpv)
return self.candidate_tags[max_index] | AutoCAT-main | src/replacement_policy.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Author: Mulong Luo
# date 2021.12.3
# description: environment for study RL for side channel attack
from calendar import c
from collections import deque
import numpy as np
import random
import os
import yaml, logging
import sys
import replacement_policy
from itertools import permutations
from cache_simulator import print_cache
import gym
from gym import spaces
from omegaconf.omegaconf import open_dict
from cache_simulator import *
import time
"""
Description:
A L1 cache with total_size, num_ways
assume cache_line_size == 1B
Observation:
# let's book keep all obvious information in the observation space
since the agent is dumb
it is a 2D matrix
self.observation_space = (
[
3, #cache latency
len(self.attacker_address_space) + 1, # last action
self.window_size + 2, #current steps
2, #whether the victim has accessed yet
] * self.window_size
)
Actions:
action is one-hot encoding
v = | attacker_addr | ( flush_attacker_addr ) | v | victim_guess_addr | ( guess victim not access ) |
Reward:
Starting state:
fresh cache with nolines
Episode termination:
when the attacker make a guess
when there is length violation
when there is guess before victim violation
episode terminates
"""
class CacheGuessingGameEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, env_config={
"length_violation_reward":-10000,
"double_victim_access_reward": -10000,
"force_victim_hit": False,
"victim_access_reward":-10,
"correct_reward":200,
"wrong_reward":-9999,
"step_reward":-1,
"window_size":0,
"attacker_addr_s":4,
"attacker_addr_e":7,
"victim_addr_s":0,
"victim_addr_e":3,
"flush_inst": False,
"allow_victim_multi_access": True,
"verbose":0,
"reset_limit": 1, # specify how many reset to end an epoch?????
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,
"associativity": 1,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
}
):
# prefetcher
# pretetcher: "none" "nextline" "stream"
# cf https://my.eng.utah.edu/~cs7810/pres/14-7810-13-pref.pdf
self.prefetcher = env_config["prefetcher"] if "prefetcher" in env_config else "none"
# remapping function for randomized cache
self.rerandomize_victim = env_config["rerandomize_victim"] if "rerandomize_victim" in env_config else False
self.ceaser_remap_period = env_config["ceaser_remap_period"] if "ceaser_remap_period" in env_config else 200000
# set-based channel or address-based channel
self.allow_empty_victim_access = env_config["allow_empty_victim_access"] if "allow_empty_victim_access" in env_config else False
# enable HPC-based-detection escaping setalthystreamline
self.force_victim_hit =env_config["force_victim_hit"] if "force_victim_hit" in env_config else False
self.length_violation_reward = env_config["length_violation_reward"] if "length_violation_reward" in env_config else -10000
self.victim_access_reward = env_config["victim_access_reward"] if "victim_access_reward" in env_config else -10
self.victim_miss_reward = env_config["victim_miss_reward"] if "victim_miss_reward" in env_config else -10000 if self.force_victim_hit else self.victim_access_reward
self.double_victim_access_reward = env_config["double_victim_access_reward"] if "double_victim_access_reward" in env_config else -10000
self.allow_victim_multi_access = env_config["allow_victim_multi_access"] if "allow_victim_multi_access" in env_config else True
self.correct_reward = env_config["correct_reward"] if "correct_reward" in env_config else 200
self.wrong_reward = env_config["wrong_reward"] if "wrong_reward" in env_config else -9999
self.step_reward = env_config["step_reward"] if "step_reward" in env_config else 0
self.reset_limit = env_config["reset_limit"] if "reset_limit" in env_config else 1
self.cache_state_reset = env_config["cache_state_reset"] if "cache_state_reset" in env_config else True
window_size = env_config["window_size"] if "window_size" in env_config else 0
attacker_addr_s = env_config["attacker_addr_s"] if "attacker_addr_s" in env_config else 4
attacker_addr_e = env_config["attacker_addr_e"] if "attacker_addr_e" in env_config else 7
victim_addr_s = env_config["victim_addr_s"] if "victim_addr_s" in env_config else 0
victim_addr_e = env_config["victim_addr_e"] if "victim_addr_e" in env_config else 3
flush_inst = env_config["flush_inst"] if "flush_inst" in env_config else False
self.verbose = env_config["verbose"] if "verbose" in env_config else 0
self.super_verbose = env_config["super_verbose"] if "super_verbose" in env_config else 0
self.logger = logging.getLogger()
self.fh = logging.FileHandler('log')
self.sh = logging.StreamHandler()
self.logger.addHandler(self.fh)
self.logger.addHandler(self.sh)
self.fh_format = logging.Formatter('%(message)s')
self.fh.setFormatter(self.fh_format)
self.sh.setFormatter(self.fh_format)
self.logger.setLevel(logging.INFO)
if "cache_configs" in env_config:
self.configs = env_config["cache_configs"]
else:
self.config_file_name = os.path.dirname(os.path.abspath(__file__))+'/../configs/config_simple_L1'
self.config_file = open(self.config_file_name)
self.logger.info('Loading config from file ' + self.config_file_name)
self.configs = yaml.load(self.config_file, yaml.CLoader)
self.vprint(self.configs)
# cahce configuration
self.num_ways = self.configs['cache_1']['associativity']
self.cache_size = self.configs['cache_1']['blocks']
self.flush_inst = flush_inst
self.reset_time = 0
if "rep_policy" not in self.configs['cache_1']:
self.configs['cache_1']['rep_policy'] = 'lru'
if 'cache_1_core_2' in self.configs:
if "rep_policy" not in self.configs['cache_1_core_2']:
self.configs['cache_1_core_2']['rep_policy'] = 'lru'
self.configs['cache_1_core_2']['prefetcher'] = self.prefetcher
#with open_dict(self.configs):
self.configs['cache_1']['prefetcher'] = self.prefetcher
'''
check window size
'''
if window_size == 0:
#self.window_size = self.cache_size * 8 + 8 #10
self.window_size = self.cache_size * 4 + 8 #10
else:
self.window_size = window_size
self.feature_size = 4
'''
instantiate the cache
'''
self.hierarchy = build_hierarchy(self.configs, self.logger)
self.step_count = 0
self.attacker_address_min = attacker_addr_s
self.attacker_address_max = attacker_addr_e
self.attacker_address_space = range(self.attacker_address_min,
self.attacker_address_max + 1) # start with one attacker cache line
self.victim_address_min = victim_addr_s
self.victim_address_max = victim_addr_e
self.victim_address_space = range(self.victim_address_min,
self.victim_address_max + 1) #
'''
for randomized address mapping rerandomization
'''
if self.rerandomize_victim == True:
addr_space = max(self.victim_address_max, self.attacker_address_max) + 1
self.perm = [i for i in range(addr_space)]
# keeping track of the victim remap length
self.ceaser_access_count = 0
self.mapping_func = lambda addr : addr
# initially do a remap for the remapped cache
self.remap()
'''
define the action space
'''
# using tightened action space
if self.flush_inst == False:
# one-hot encoding
if self.allow_empty_victim_access == True:
# | attacker_addr | v | victim_guess_addr | guess victim not access |
self.action_space = spaces.Discrete(
len(self.attacker_address_space) + 1 + len(self.victim_address_space) + 1
)
else:
# | attacker_addr | v | victim_guess_addr |
self.action_space = spaces.Discrete(
len(self.attacker_address_space) + 1 + len(self.victim_address_space)
)
else:
# one-hot encoding
if self.allow_empty_victim_access == True:
# | attacker_addr | flush_attacker_addr | v | victim_guess_addr | guess victim not access |
self.action_space = spaces.Discrete(
2 * len(self.attacker_address_space) + 1 + len(self.victim_address_space) + 1
)
else:
# | attacker_addr | flush_attacker_addr | v | victim_guess_addr |
self.action_space = spaces.Discrete(
2 * len(self.attacker_address_space) + 1 + len(self.victim_address_space)
)
'''
define the observation space
'''
self.max_box_value = max(self.window_size + 2, 2 * len(self.attacker_address_space) + 1 + len(self.victim_address_space) + 1)#max(self.window_size + 2, len(self.attacker_address_space) + 1)
self.observation_space = spaces.Box(low=-1, high=self.max_box_value, shape=(self.window_size, self.feature_size))
self.state = deque([[-1, -1, -1, -1]] * self.window_size)
'''
initilizate the environment configurations
'''
self.vprint('Initializing...')
self.l1 = self.hierarchy['cache_1']
#self.lv = self.hierarchy['cache_1']
# check multicore
if 'cache_1_core_2' in self.hierarchy:
self.lv = self.hierarchy['cache_1_core_2']
else:
self.lv = self.hierarchy['cache_1']
self.current_step = 0
self.victim_accessed = False
if self.allow_empty_victim_access == True:
self.victim_address = random.randint(self.victim_address_min, self.victim_address_max + 1)
else:
self.victim_address = random.randint(self.victim_address_min, self.victim_address_max)
self._randomize_cache()
'''
For PLCache
'''
if self.configs['cache_1']["rep_policy"] == "plru_pl": # pl cache victim access always uses locked access
assert(self.victim_address_min == self.victim_address_max) # for plru_pl cache, only one address is allowed
self.vprint("[init] victim access (hex) %x locked cache line" % self.victim_address_max)
self.l1.read(hex(self.ceaser_mapping(self.victim_address_max))[2:], self.current_step, replacement_policy.PL_LOCK, domain_id='v')
'''
internal guessing buffer
does not change after reset
'''
self.guess_buffer_size = 100
self.guess_buffer = [False] * self.guess_buffer_size
self.last_state = None
'''
clear the history buffer that calculates the correctness rate
'''
def clear_guess_buffer_history(self):
self.guess_buffer = [False] * self.guess_buffer_size
'''
set the seed for randomization
'''
def seed(self, seed):
random.seed(seed)
'''
remap the victim address range
'''
def remap(self):
if self.rerandomize_victim == False:
self.mapping_func = lambda addr : addr
else:
self.vprint("doing remapping!")
random.shuffle(self.perm)
'''
ceasar remapping
addr is integer not string
'''
def ceaser_mapping(self, addr):
if self.rerandomize_victim == False:
return addr
else:
self.ceaser_access_count += 1
return self.perm[addr]
'''
gym API: step
this is the function that implements most of the logic
'''
def step(self, action):
# print_cache(self.l1)
'''
For cyclone, default value of the cyclic set and way index
'''
cyclic_set_index = -1
cyclic_way_index = -1
self.vprint('Step...')
info = {}
'''
upack the action to adapt to slightly different RL framework
'''
if isinstance(action, np.ndarray):
action = action.item()
'''
parse the action
'''
original_action = action
action = self.parse_action(original_action)
address = hex(action[0]+self.attacker_address_min)[2:] # attacker address in attacker_address_space
is_guess = action[1] # check whether to guess or not
is_victim = action[2] # check whether to invoke victim
is_flush = action[3] # check whether to flush
victim_addr = hex(action[4] + self.victim_address_min)[2:] # victim address
'''
The actual stepping logic
1. first cehcking if the length is over the window_size, if not, go to 2, otherwise terminate
2. second checking if it is a victim access, if so go to 3, if not, go to 4
3. check if the victim can be accessed according to these options, if so make the access, if not terminate
4. check if it is a guess, if so, evaluate the guess, if not go to 5. if not, terminate
5. do the access, first check if it is a flush, if so do flush, if not, do normal access
'''
victim_latency = None
# if self.current_step > self.window_size : # if current_step is too long, terminate
if self.step_count >= self.window_size - 1:
r = 2 #
self.vprint("length violation!")
reward = self.length_violation_reward #-10000
done = True
else:
if is_victim == True:
if self.allow_victim_multi_access == True or self.victim_accessed == False:
r = 2 #
self.victim_accessed = True
if True: #self.configs['cache_1']["rep_policy"] == "plru_pl": no need to distinuish pl and normal rep_policy
if self.victim_address <= self.victim_address_max:
self.vprint("victim access (hex) %x " % self.victim_address)
t, cyclic_set_index, cyclic_way_index, _ = self.lv.read(hex(self.ceaser_mapping(self.victim_address))[2:], self.current_step, domain_id='v')
t = t.time # do not need to lock again
else:
self.vprint("victim make a empty access!") # do not need to actually do something
t = 1 # empty access will be treated as HIT??? does that make sense???
#t = self.l1.read(str(self.victim_address), self.current_step).time
if t > 500: # for LRU attack, has to force victim access being hit
victim_latency = 1
self.current_step += 1
reward = self.victim_miss_reward #-5000
if self.force_victim_hit == True:
done = True
self.vprint("victim access has to be hit! terminate!")
else:
done = False
else:
victim_latency = 0
self.current_step += 1
reward = self.victim_access_reward #-10
done = False
else:
r = 2
self.vprint("does not allow multi victim access in this config, terminate!")
self.current_step += 1
reward = self.double_victim_access_reward # -10000
done = True
else:
if is_guess == True:
r = 2 #
'''
this includes two scenarios
1. normal scenario
2. empty victim access scenario: victim_addr parsed is victim_addr_e,
and self.victim_address is also victim_addr_e + 1
'''
if self.victim_accessed and victim_addr == hex(self.victim_address)[2:]:
if victim_addr != hex(self.victim_address_max + 1)[2:]:
self.vprint("correct guess (hex) " + victim_addr)
else:
self.vprint("correct guess empty access!")
# update the guess buffer
self.guess_buffer.append(True)
self.guess_buffer.pop(0)
reward = self.correct_reward # 200
done = True
else:
if victim_addr != hex(self.victim_address_max + 1)[2:]:
self.vprint("wrong guess (hex) " + victim_addr )
else:
self.vprint("wrong guess empty access!")
# update the guess buffer
self.guess_buffer.append(False)
self.guess_buffer.pop(0)
reward = self.wrong_reward #-9999
done = True
elif is_flush == False or self.flush_inst == False:
lat, cyclic_set_index, cyclic_way_index, _ = self.l1.read(hex(self.ceaser_mapping(int('0x' + address, 16)))[2:], self.current_step, domain_id='a')
lat = lat.time # measure the access latency
if lat > 500:
self.vprint("access (hex) " + address + " miss")
r = 1 # cache miss
else:
self.vprint("access (hex) " + address + " hit" )
r = 0 # cache hit
self.current_step += 1
reward = self.step_reward #-1
done = False
else: # is_flush == True
self.l1.cflush(hex(self.ceaser_mapping(int('0x' + address, 16)))[2:], self.current_step, domain_id='X')
#cflush = 1
self.vprint("cflush (hex) " + address )
#self.vprint("mapped (hex) " + hex(self.ceaser_mapping(int('0x' + address, 16)))[2:])
r = 2
self.current_step += 1
reward = self.step_reward
done = False
#return observation, reward, done, info
if done == True and is_guess != 0:
info["is_guess"] = True
if reward > 0:
info["guess_correct"] = True
else:
info["guess_correct"] = False
else:
info["is_guess"] = False
# the observation (r.time) in this case
# must be consistent with the observation space
# return observation, reward, done?, info
#return r, reward, done, info
current_step = self.current_step
if self.victim_accessed == True:
victim_accessed = 1
else:
victim_accessed = 0
'''
append the current observation to the sliding window
'''
self.state.append([r, victim_accessed, original_action, self.step_count])
self.state.popleft()
self.step_count += 1
'''
support for multiple guess per episode
'''
if done == True:
self.reset_time += 1
if self.reset_time == self.reset_limit: # really need to end the simulation
self.reset_time = 0
done = True # reset will be called by the agent/framework
#self.vprint('correct rate:' + str(self.calc_correct_rate()))
else:
done = False # fake reset
self._reset() # manually reset
'''
the observation should not obverve the victim latency
thus, we put victim latency in the info
the detector (ccHunter, Cyclone) can take advantage of the victim latency
'''
if victim_latency is not None:
info["victim_latency"] = victim_latency
if self.last_state is None:
cache_state_change = None
else:
cache_state_change = victim_latency ^ self.last_state
self.last_state = victim_latency
else:
if r == 2:
cache_state_change = 0
else:
if self.last_state is None:
cache_state_change = None
else:
cache_state_change = r ^ self.last_state
self.last_state = r
'''
this info is for use of various wrappers like cchunter_wrapper and cyclone_wrapper
'''
info["cache_state_change"] = cache_state_change
info["cyclic_way_index"] = cyclic_way_index
info["cyclic_set_index"] = cyclic_set_index
if self.super_verbose == True:
for cache in self.hierarchy:
if self.hierarchy[cache].next_level:
print_cache(self.hierarchy[cache])
return np.array(list(reversed(self.state))), reward, done, info
'''
Gym API: reset the cache state
'''
def reset(self,
victim_address=-1,
reset_cache_state=False,
reset_observation=True,
seed = -1):
if self.ceaser_access_count > self.ceaser_remap_period:
self.remap() # do the remap, generating a new mapping function if remap is set true
self.ceaser_access_count = 0
if self.cache_state_reset or reset_cache_state or seed != -1:
self.vprint('Reset...(also the cache state)')
self.hierarchy = build_hierarchy(self.configs, self.logger)
self.l1 = self.hierarchy['cache_1']
# check multicore
if 'cache_1_core_2' in self.hierarchy:
self.lv = self.hierarchy['cache_1_core_2']
else:
self.lv = self.hierarchy['cache_1']
if seed == -1:
self._randomize_cache()
else:
self.seed_randomization(seed)
else:
self.vprint('Reset...(cache state the same)')
self._reset(victim_address) # fake reset
'''
reset the observation space
'''
if reset_observation:
self.state = deque([[-1, -1, -1, -1]] * self.window_size)
self.step_count = 0
self.reset_time = 0
if self.configs['cache_1']["rep_policy"] == "plru_pl": # pl cache victim access always uses locked access
assert(self.victim_address_min == self.victim_address_max) # for plru_pl cache, only one address is allowed
self.vprint("[reset] victim access %d locked cache line" % self.victim_address_max)
lat, cyclic_set_index, cyclic_way_index, _ = self.lv.read(hex(self.ceaser_mapping(self.victim_address_max))[2:], self.current_step, replacement_policy.PL_LOCK, domain_id='v')
self.last_state = None
if self.super_verbose == True:
for cache in self.hierarchy:
if self.hierarchy[cache].next_level:
print_cache(self.hierarchy[cache])
return np.array(list(reversed(self.state)))
'''
function to calculate the correctness rate
using a sliding window
'''
def calc_correct_rate(self):
return self.guess_buffer.count(True) / len(self.guess_buffer)
'''
evluate the correctness of an action sequence (action+ latency)
action_buffer: list [(action, latency)]
'''
def calc_correct_seq(self, action_buffer):
last_action, _ = action_buffer[-1]
last_action = self.parse_action(last_action)
#print(last_action)
guess_addr = last_action[4]
#print(guess_addr)
self.reset(victim_addr = guess_addr)
self.total_guess = 0
self.correct_guess = 0
while self.total_guess < 20:
self.reset(victim_addr)
for i in range(0, len(action_buffer)):
p = action_buffer[i]
state, _, _, _ = self.step(p[0])
latency = state[0]
if latency != p[1]:
break
if i < len(action_buffer) - 1:
continue
else:
self.total_guess += 1
if guess_addr == self.victim_address:
self.correct_guess += 1
return self.correct_guess / self.total_guess
def set_victim(self, victim_address=-1):
self.victim_address = victim_address
'''
fake reset the environment, just set a new victim addr
the actual physical state of the cache does not change
'''
def _reset(self, victim_address=-1):
self.current_step = 0
self.victim_accessed = False
if victim_address == -1:
if self.allow_empty_victim_access == False:
self.victim_address = random.randint(self.victim_address_min, self.victim_address_max)
else: # when generating random addr use self.victim_address_max + 1 to represent empty access
self.victim_address = random.randint(self.victim_address_min, self.victim_address_max + 1)
else:
assert(victim_address >= self.victim_address_min)
if self.allow_empty_victim_access == True:
assert(victim_address <= self.victim_address_max + 1 )
else:
assert(victim_address <= self.victim_address_max )
self.victim_address = victim_address
if self.victim_address <= self.victim_address_max:
self.vprint("victim address (hex) " + hex(self.victim_address))
else:
self.vprint("victim has empty access")
'''
use to render the result
not implemented
'''
def render(self, mode='human'):
return
'''
not implememented
'''
def close(self):
return
'''
use a given seed to randomize the cache
so that we can set the same state for randomization
'''
def seed_randomization(self, seed=-1):
return self._randomize_cache(mode="union", seed=seed)
'''
randomize the cache so that the attacker has to do a prime step
'''
def _randomize_cache(self, mode="union", seed=-1):
# use seed so that we can get identical initialization states
if seed != -1:
random.seed(seed)
if mode == "attacker":
self.l1.read(hex(self.ceaser_mapping(0))[2:], -2, domain_id='X')
self.l1.read(hex(self.ceaser_mapping(1))[2:], -1, domain_id='X')
return
if mode == "none":
return
self.current_step = -self.cache_size * 2
for _ in range(self.cache_size * 2):
if mode == "victim":
addr = random.randint(self.victim_address_min, self.victim_address_max)
elif mode == "attacker":
addr = random.randint(self.attacker_address_min, self.attacker_address_max)
elif mode == "union":
addr = random.randint(self.victim_address_min, self.victim_address_max) if random.randint(0,1) == 1 else random.randint(self.attacker_address_min, self.attacker_address_max)
elif mode == "random":
addr = random.randint(0, sys.maxsize)
else:
raise RuntimeError from None
self.l1.read(hex(self.ceaser_mapping(addr))[2:], self.current_step, domain_id='X')
self.current_step += 1
'''
rerturns the dimension of the observation space
'''
def get_obs_space_dim(self):
return int(np.prod(self.observation_space.shape))
'''
returns the action space dimension in a int number
'''
def get_act_space_dim(self):
return int(np.prod(self.action_space.shape))
'''
same as print() when self.verbose == 1
otherwise does not do anything
'''
def vprint(self, *args):
if self.verbose == 1:
print( " "+" ".join(map(str,args))+" ")
'''
parse the action in the degenerate space (no redundant actions)
returns list of 5 elements representing
address, is_guess, is_victim, is_flush, victim_addr
'''
def parse_action(self, action):
address = 0
is_guess = 0
is_victim = 0
is_flush = 0
victim_addr = 0
if self.flush_inst == False:
if action < len(self.attacker_address_space):
address = action
elif action == len(self.attacker_address_space):
is_victim = 1
else:
is_guess = 1
victim_addr = action - ( len(self.attacker_address_space) + 1 )
else:
if action < len(self.attacker_address_space):
address = action
elif action < 2 * len(self.attacker_address_space):
is_flush = 1
address = action - len(self.attacker_address_space)
is_flush = 1
elif action == 2 * len(self.attacker_address_space):
is_victim = 1
else:
is_guess = 1
victim_addr = action - ( 2 * len(self.attacker_address_space) + 1 )
return [ address, is_guess, is_victim, is_flush, victim_addr ]
| AutoCAT-main | src/cache_guessing_game_env_impl.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import math, block, response
import pprint
from replacement_policy import *
class Cache:
def __init__(self, name, word_size, block_size, n_blocks, associativity, hit_time, write_time, write_back, logger, next_level=None, rep_policy='', prefetcher="none", verbose=False):
#Parameters configured by the user
self.name = name
self.word_size = word_size
self.block_size = block_size
self.n_blocks = n_blocks
self.associativity = associativity
self.hit_time = hit_time
self.cflush_time = hit_time # assume flush is as fast as hit since
self.write_time = write_time
self.write_back = write_back
self.logger = logger
self.same_level_caches = []
self.logger.disabled = False#True
self.set_rep_policy = {}
self.verbose = verbose
if rep_policy == 'lru':
self.vprint("use lru")
self.rep_policy = lru_policy
elif rep_policy == 'tree_plru':
self.vprint("use tree_plru")
self.rep_policy = tree_plru_policy
elif rep_policy == 'rand':
self.vprint("use rand")
self.rep_policy = rand_policy
elif rep_policy == 'plru_pl':
self.vprint("use plru_pl")
self.rep_policy = plru_pl_policy
elif rep_policy == 'brrip':
self.vprint("use brrip")
self.rep_policy = brrip_policy
else:
self.rep_policy = lru_policy
if name == 'cache_1':
self.vprint("no rep_policy specified or policy specified not exist")
self.vprint("use lru_policy")
self.vprint("use " + prefetcher + " prefetcher")
# prefetcher == "none" "nextline" "stream"
self.prefetcher = prefetcher
if self.prefetcher == "stream":
self.prefetcher_table =[]
self.num_prefetcher_entry = 2
for i in range(self.num_prefetcher_entry):
temp = {"first": -1, "second": -1}
self.prefetcher_table.append(temp)
#Total number of sets in the cache
self.n_sets =int( n_blocks / associativity )
#Dictionary that holds the actual cache data
self.data = {}
self.set = {}
self.domain_id_tags = {} # for cyclone
#Pointer to the next lowest level of memory
#Main memory gets the default None value
self.next_level = next_level
#Figure out spans to cut the binary addresses into block_offset, index, and tag
self.block_offset_size = int(math.log(self.block_size, 2))
self.index_size = int(math.log(self.n_sets, 2))
#Initialize the data dictionary
if next_level:
for i in range(self.n_sets):
index = str(bin(i))[2:].zfill(self.index_size)
if index == '':
index = '0'
self.data[index] = [] # use array of blocks for each set
self.domain_id_tags[index] = [] # for cyclone
for j in range(associativity):
# isntantiate with empty tags
self.data[index].append((INVALID_TAG, block.Block(self.block_size, 0, False, 'x')))
self.domain_id_tags[index].append(('X','X')) # for cyclone
self.set_rep_policy[index] = self.rep_policy(associativity, block_size)
def vprint(self, *args):
if self.verbose == 1:
print( " "+" ".join(map(str,args))+" ")
# flush the cache line that contains the address from all cache hierachy
# since flush is does not affect memory domain_id
def cflush(self, address, current_step, domain_id = 'X'):
address = address.zfill(8)
# cyclone
cyclic_set_index = -1
cyclic_way_index = -1
r = response.Response({self.name:True}, self.cflush_time) #flush regardless
#Parse our address to look through this cache
block_offset, index, tag = self.parse_address(address)
#Get the tags in this set
in_cache = []
for i in range( 0, len(self.data[index]) ):
if self.data[index][i][0] != INVALID_TAG:#'x':
in_cache.append(self.data[index][i][0])
#If this tag exists in the set, this is a hit
if tag in in_cache:
#print(tag + ' in cache')
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == tag:
#print(self.data[index][i][1].address)
self.data[index][i] = (INVALID_TAG, block.Block(self.block_size, current_step, False, ''))
break
self.set_rep_policy[index].invalidate(tag)
# clflush from the next level of memory
if self.next_level != None and self.next_level.name != "mem":
self.next_level.cflush(address, current_step)
return r, cyclic_set_index, cyclic_way_index
# for multicore caches, if two same levels are connected to the shared caches then add
def add_same_level_cache(self, cache):
self.same_level_caches.append(cache)
# read with prefetcher
def read(self, address, current_step, pl_opt= -1, domain_id = 'X'):
address = address.zfill(8)
if self.prefetcher == "none":
return self.read_no_prefetch(address, current_step, pl_opt, domain_id)
elif self.prefetcher == "nextline":
ret = self.read_no_prefetch(hex(int(address, 16) + 1)[2:], current_step, pl_opt, domain_id)
# prefetch the next line
# print("nextline prefetech "+ hex(int(address, 16) + 1)[2:] )
self.read_no_prefetch(address, current_step, pl_opt, domain_id)
return ret
elif self.prefetcher == "stream":
ret = self.read_no_prefetch(address, current_step, pl_opt, domain_id)
# {"first": -1, "second": -1}
# first search if it is next expected
found= False
for i in range(len(self.prefetcher_table)):
entry=self.prefetcher_table[i]
if int(address, 16) + entry["first"] == entry["second"] * 2 and entry["first"] != -1 and entry["second"] != -1 :
found = True
pref_addr = entry["second"] + int(address, 16) - entry["first"]
# do prefetch
if pref_addr >= 0:
# print("stream prefetech "+ hex(pref_addr)[2:])
self.read_no_prefetch(hex(pref_addr)[2:], current_step, pl_opt, domain_id)
# update the table
self.prefetcher_table[i] = {"first": entry["second"], "second":pref_addr}
elif int(address, 16) == entry["first"] + 1 or int(address, 16) == entry["first"] -1 and entry["first"] != -1:
# second search if it is second access
self.prefetcher_table[i]["second"] = int(address, 16)
found = True
elif entry["first"] == int(address, 16):
found = True
# then search if it is in first access
# random evict a entry
if found == False:
i = random.randint(0, self.num_prefetcher_entry-1)
self.prefetcher_table[i] = {"first": int(address, 16), "second":-1}
return ret
else:
# prefetchter not known
assert(False)
# pl_opt: indicates the PL cache option
# pl_opt = -1: normal read
# pl_opt = PL_LOCK: lock the cache line
# pl_opt = PL_UNLOCK: unlock the cache line
def read_no_prefetch(self, address, current_step, pl_opt= -1, domain_id = 'X'):
# cyclone
cyclic_set_index = -1
cyclic_way_index = -1
#print('pl_opt ' + str(pl_opt))
r = None
#Check if this is main memory
#Main memory is always a hit
if not self.next_level:
r = response.Response({self.name:True}, self.hit_time)
evict_addr = -1
else:
#Parse our address to look through this cache
block_offset, index, tag = self.parse_address(address)
#print(block_offset)
#print(index)
#print(tag)
#Get the tags in this set
in_cache = []
for i in range( 0, len(self.data[index]) ):
if self.data[index][i][0] != INVALID_TAG:#'x':
in_cache.append(self.data[index][i][0])
#If this tag exists in the set, this is a hit
if tag in in_cache:
#print(tag + 'in cache')
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == tag:
self.data[index][i][1].read(current_step)
if domain_id != 'X':
if domain_id == self.domain_id_tags[index][i][1] and self.domain_id_tags[index][i][1] != self.domain_id_tags[index][i][0]:
cyclic_set_index = int(index,2)
cyclic_way_index = i
self.domain_id_tags[index][i] = (domain_id, self.domain_id_tags[index][i][0])
break
self.set_rep_policy[index].touch(tag, current_step)
# pl cache
if pl_opt != -1:
self.set_rep_policy[index].setlock(tag, pl_opt)
r = response.Response({self.name:True}, self.hit_time)
evict_addr = -1 #no evition needed
else:
#Read from the next level of memory
r, cyclic_set_index, cyclic_way_index, evict_addr = self.next_level.read(address, current_step, pl_opt)
# coherent eviction
# inclusive eviction (evicting in L1 if evicted by the higher level)
if evict_addr != -1:
###print('evict_addr '+ evict_addr)
###print(evict_addr)
#assert(False)
evict_block_offset, evict_index, evict_tag = self.parse_address(hex(int(evict_addr,2))[2:].zfill(8))#9 - len(hex(int(evict_addr,2))[2:])))
####print(evict_block_offset)
####print(evict_index)
####print(evict_tag)
for i in range(0,len(self.data[evict_index])):
if self.data[evict_index][i][0] == evict_tag:
#print('\tEvict addr ' + evict_addr + ' for inclusive cache')
self.data[evict_index][i] = (INVALID_TAG, block.Block(self.block_size, current_step, False, 'x'))
self.set_rep_policy[evict_index].invalidate(evict_tag)
#self.set_rep_policy[evict_index].instantiate_entry(INVALID_TAG, current_step)
break
# cohenrent eviction for otehr cache lines
for slc in self.same_level_caches:
evict_block_offset, evict_index, evict_tag = slc.parse_address(hex(int(evict_addr,2))[2:].zfill(8))#9 - len(hex(int(evict_addr,2))[2:])))
for i in range(0,len(slc.data[evict_index])):
if slc.data[evict_index][i][0] == evict_tag:
#slc.logger.info
#print('\tcoherent Evict addr ' + evict_addr + ' for inclusive cache')
slc.data[evict_index][i] = (INVALID_TAG, block.Block(slc.block_size, current_step, False, 'x'))
slc.set_rep_policy[evict_index].invalidate(evict_tag)
#slc.set_rep_policy[evict_index].instantiate_entry(INVALID_TAG, current_step)
break
r.deepen(self.write_time, self.name)
# refresh in_cache afeter coherent eviction
in_cache = []
for i in range( 0, len(self.data[index]) ):
if self.data[index][i][0] != INVALID_TAG:#'x':
in_cache.append(self.data[index][i][0])
#If there's space in this set, add this block to it
if len(in_cache) < self.associativity:
#print('a')
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == INVALID_TAG:#'x':
if domain_id != 'X':
if domain_id == self.domain_id_tags[index][i][1] and self.domain_id_tags[index][i][1] != self.domain_id_tags[index][i][0]:
cyclic_set_index = int(index, 2)
cyclic_way_index = i
self.domain_id_tags[index][i] = (domain_id, self.domain_id_tags[index][i][0])
self.data[index][i] = (tag, block.Block(self.block_size, current_step, False, address))
break
self.set_rep_policy[index].instantiate_entry(tag, current_step)
###if inst_victim_tag != INVALID_TAG: #instantiated entry sometimes does not replace an empty tag
####we have to evict it from the cache in this scenario
### del self.data[index][inst_victim_tag]
if pl_opt != -1:
self.set_rep_policy[index].setlock(tag, pl_opt)
else:
#print('B')
#print(len(in_cache))
#Find the victim block and replace it
victim_tag = self.set_rep_policy[index].find_victim(current_step)
##print('victim tag '+ victim_tag)
#print('index ' + index )
# pl cache may find the victim that is partition locked
if victim_tag != INVALID_TAG:
# Write the block back down if it's dirty and we're using write back
if self.write_back:
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == victim_tag:
if self.data[index][i][1].is_dirty():
self.logger.info('\tWriting back block ' + address + ' to ' + self.next_level.name)
temp, _, _ = self.next_level.write(self.data[index][i][1].address, True, current_step)
r.time += temp.time
break
# Delete the old block and write the new one
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == victim_tag:
if domain_id != 'X':
if domain_id == self.domain_id_tags[index][i][1] and self.domain_id_tags[index][i][1] != self.domain_id_tags[index][i][0]:
cyclic_set_index = int(index, 2)
cyclic_way_index = i
self.domain_id_tags[index][i] = (domain_id, self.domain_id_tags[index][i][0])
self.data[index][i] = (tag, block.Block(self.block_size, current_step, False, address))
break
if int(self.n_blocks/ self.associativity) == 1:
indexi = ''
else:
indexi = index
evict_addr = victim_tag + indexi + '0' * int(math.log(self.block_size,2))# assume line size is always 1B for different level
#print('index ' + index)
#print('victim tag ' + victim_tag)
self.set_rep_policy[index].invalidate(victim_tag)
self.set_rep_policy[index].instantiate_entry(tag, current_step)
if pl_opt != -1:
self.set_rep_policy[index].setlock(tag, pl_opt)
else:
evict_addr = -1
#if evict_addr != -1:
#print(evict_addr)
#evict_addr = hex(int(evict_addr))[2:].zfill(8 - len(hex(int(evict_addr))[2:]))
#print('evict_addr ' + evict_addr)
return r, cyclic_set_index, cyclic_way_index, evict_addr
# pl_opt: indicates the PL cache option
# pl_opt = -1: normal read
# pl_opt = 1: lock the cache line
# pl_opt = 2: unlock the cache line
def write(self, address, from_cpu, current_step, pl_opt = -1, domain_id = 'X'):
address = address.zfill(8)
# cyclcone
cyclic_set_index = -1
cyclic_way_index = -1
#wat is cache pls
r = None
if not self.next_level:
r = response.Response({self.name:True}, self.write_time)
else:
block_offset, index, tag = self.parse_address(address)
in_cache = []
for i in range( 0, len(self.data[index]) ):
if self.data[index][i][0] != INVALID_TAG:#'x':
in_cache.append(self.data[index][i][0])
if tag in in_cache:
#Set dirty bit to true if this block was in cache
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == tag:
if domain_id != 'X':
if domain_id == self.domain_id_tags[index][i][1] and self.domain_id_tags[index][i][1] != self.domain_id_tags[index][i][0]:
cyclic_set_index = int(index, 2)
cyclic_way_index = i
self.domain_id_tags[index][i] = (domain_id, self.domain_id_tags[index][i][0])
self.data[index][i][1].write(current_step)
break
self.set_rep_policy[index].touch(tag, current_step) # touch in the replacement policy
if pl_opt != -1:
self.set_rep_policy[index].setlock(tag, pl_opt)
if self.write_back:
r = response.Response({self.name:True}, self.write_time)
else:
#Send to next level cache and deepen results if we have write through
self.logger.info('\tWriting through block ' + address + ' to ' + self.next_level.name)
r = self.next_level.write(address, from_cpu, current_step)
r.deepen(self.write_time, self.name)
elif len(in_cache) < self.associativity:
#If there is space in this set, create a new block and set its dirty bit to true if this write is coming from the CPU
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == INVALID_TAG:#'x':
if domain_id != 'X':
if domain_id == self.domain_id_tags[index][i][1] and self.domain_id_tags[index][i][1] != self.domain_id_tags[index][i][0]:
cyclic_set_index = int(index,2)
cyclic_way_index = i
self.domain_id_tags[index][i] = (domain_id, self.domain_id_tags[index][i][0])
self.data[index][i] = (tag, block.Block(self.block_size, current_step, False, address))
break
self.set_rep_policy[index].instantiate_entry(tag, current_step)
if self.write_back:
r = response.Response({self.name:False}, self.write_time)
else:
self.logger.info('\tWriting through block ' + address + ' to ' + self.next_level.name)
r = self.next_level.write(address, from_cpu, current_step)
r.deepen(self.write_time, self.name)
if pl_opt != -1:
self.set_rep_policy[index].setlock(tag, pl_opt)
elif len(in_cache) == self.associativity:
#If this set is full, find the oldest block, write it back if it's dirty, and replace it
victim_tag = self.set_rep_policy[index].find_victim(current_step)
# pl cache may find the victim that is partition locked
# the Pl cache condition for write is not tested
if victim_tag != INVALID_TAG:
if self.write_back:
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == victim_tag:
if self.data[index][i][1].is_dirty():
#if self.data[index][victim_tag].is_dirty():
self.logger.info('\tWriting back block ' + address + ' to ' + self.next_level.name)
r, _, _ = self.next_level.write(self.data[index][i][1].address, from_cpu, current_step)
r.deepen(self.write_time, self.name)
break
else:
self.logger.info('\tWriting through block ' + address + ' to ' + self.next_level.name)
r, cyclic_set_index, cyclic_way_index = self.next_level.write(address, from_cpu, current_step)
r.deepen(self.write_time, self.name)
for i in range( 0, len(self.data[index])):
if self.data[index][i][0] == victim_tag:
if domain_id != 'X':
if domain_id == self.domain_id_tags[index][i][1] and self.domain_id_tags[index][i][1] != self.domain_id_tags[index][i][0]:
cyclic_set_index = int(index,2)
cyclic_way_index = i
self.domain_id_tags[index][i] = (domain_id, self.domain_id_tags[index][i][0])
self.data[index][i] = (tag, block.Block(self.block_size, current_step, False, address))
break
print('victim_tag '+ victim_tag)
self.set_rep_policy[index].invalidate(victim_tag)
self.set_rep_policy[index].instantiate_entry(tag, current_step)
# pl cache
if pl_opt != -1:
self.set_rep_policy[index].setlock(tag, pl_opt)
if not r:
r = response.Response({self.name:False}, self.write_time)
return r, cyclic_set_index, cyclic_way_index
def parse_address(self, address):
#Calculate our address length and convert the address to binary string
address_size = len(address) * 4
binary_address = bin(int(address, 16))[2:].zfill(address_size)
if self.block_offset_size > 0:
block_offset = binary_address[-self.block_offset_size:]
index = binary_address[-(self.block_offset_size+self.index_size):-self.block_offset_size]
if index == '':
index = '0'
tag = binary_address[:-(self.block_offset_size+self.index_size)]
else:
block_offset = '0'
if self.index_size != 0:
index = binary_address[-(self.index_size):]
tag = binary_address[:-self.index_size]
else:
index = '0'
tag = binary_address
return (block_offset, index, tag)
class InvalidOpError(Exception):
pass
| AutoCAT-main | src/cache.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
class Response:
def __init__(self, hit_list, time, data=''):
self.hit_list = hit_list
self.time = time
self.data = data
def deepen(self, time, name):
self.hit_list[name] = False
self.time += time
| AutoCAT-main | src/response.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# simpple SVM based detector
# based on Cyclone
# window_size = 4
# interval_size = 20
# 1 bucket
import copy
from typing import Any, Dict, Sequence, Tuple
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import gym
from sklearn import svm
from sklearn.model_selection import cross_val_score
from cache_guessing_game_env_impl import CacheGuessingGameEnv
import pickle
import os
class CycloneWrapper(gym.Env):
def __init__(self,
env_config: Dict[str, Any],
svm_data_path='/home/mulong/cyclone_svm_data.txt',
keep_latency: bool = True) -> None:
env_config["cache_state_reset"] = False
self.reset_observation = env_config.get("reset_observation", False)
self.keep_latency = keep_latency
self.env_config = env_config
self.episode_length = env_config.get("episode_length", 160)
#self.threshold = env_config.get("threshold", 0.8)
path = os.getcwdb().decode('utf-8') + '/../../../svm.txt'
self.clf = pickle.load(open(path, 'rb'))
self.cyclone_window_size = env_config.get("cyclone_window_size", 4)
self.cyclone_interval_size = env_config.get("cyclone_interval_size", 40)
self.cyclone_num_buckets = env_config.get("cyclone_num_buckets", 4)
# self.cyclone_bucket_size = self.env_config.cache_configs.cache_1.blocks / self.cyclone_num_buckets
self.cyclone_bucket_size = self.env_config["cache_configs"]["cache_1"][
"blocks"] / self.cyclone_num_buckets
self.cyclone_collect_data = env_config.get("cyclone_collect_data", False)
self.cyclone_malicious_trace = env_config.get("cyclone_malicious_trace", False)
self.X = []
self.Y = []
#self.cyclone_counters = [[0]* self.cyclone_num_buckets ] * self.cyclone_window_size
self.cyclone_counters = []
for j in range(self.cyclone_num_buckets):
temp =[]
for i in range(self.cyclone_window_size):
temp.append(0)
self.cyclone_counters.append(temp)
self.cyclone_coeff = env_config.get("cyclone_coeff", 1.0)
self.cyclone_heatmap = [[], [], [], []]
# self.cc_hunter_detection_reward = env_config.get(
# "cc_hunter_detection_reward", -1.0)
#self.cc_hunter_coeff = env_config.get("cc_hunter_coeff", 1.0)
#self.cc_hunter_check_length = env_config.get("cc_hunter_check_length",
# 4)
self._env = CacheGuessingGameEnv(env_config)
self.validation_env = CacheGuessingGameEnv(env_config)
self.observation_space = self._env.observation_space
self.action_space = self._env.action_space
self.victim_address_min = self._env.victim_address_min
self.victim_address_max = self._env.victim_address_max
self.attacker_address_max = self._env.attacker_address_max
self.attacker_address_min = self._env.attacker_address_min
self.victim_address = self._env.victim_address
self.svm_data_path = svm_data_path
self.cnt = 0
self.step_count = 0
#self.cc_hunter_history = []
self.no_guess = True
self.no_guess_reward = env_config["no_guess_reward"]
def load_svm_model(self):
from numpy import loadtxt
data = loadtxt('all.txt.svm.txt')
X = data[:,1:]
Y = data[:,0]
clf = svm.SVC(random_state=0)
clf.fit(X, Y)
def set_victim(self, victim_addr):
self._env.victim_address = victim_addr
def save_svm_data(self):
fp = open(self.svm_data_path, 'a')
for i in range(len(self.X)):
str1 = ' '.join(str(e) for e in self.X[i])
str1 = str(self.Y[i]) + ' ' + str1 + '\n'
fp.write(str1)
fp.close()
# if save_data==True, sa
def reset(self,
victim_address=-1,
save_data=False,
set_victim=False,
seed: int = -1):
if save_data == True:
self.save_svm_data()
# drwa figure
print(self.cyclone_heatmap)
#p=sns.heatmap(self.cyclone_heatmap, vmin=0, vmax=20)
#p.set_xlabel('Time intervals (40 cycles)')
#p.set_ylabel('Set index')
#fig= p.get_figure()
#fig.set_size_inches(3, 3)
#fig_path ='/home/mulong/RL_SCA/src/CacheSimulator/src/heatmap.png'
##fig_path = os.getcwdb().decode('utf-8') + '/../heatmap.png'
#fig.savefig(fig_path)
if set_victim == True and victim_address != -1:
obs = self._env.reset(victim_address=victim_address,
reset_cache_state=False,
seed=seed)
return obs
# reset cyclone counter
#self.cyclone_counters = [[0]* self.cyclone_num_buckets ] * self.cyclone_window_size
self.cyclone_counters = []
for j in range(self.cyclone_num_buckets):
temp =[]
for i in range(self.cyclone_window_size):
temp.append(0)
self.cyclone_counters.append(temp)
self.step_count = 0
self.cnt = 0
#self.cc_hunter_history = []
obs = self._env.reset(victim_address=victim_address,
reset_cache_state=True,
seed=seed)
self.victim_address = self._env.victim_address
self.no_guess = True
return obs
####def autocorr(self, x: np.ndarray, p: int) -> float:
#### if p == 0:
#### return 1.0
#### mean = x.mean()
#### var = x.var()
#### return ((x[:-p] - mean) * (x[p:] - mean)).mean() / var
def cyclone_attack(self, cyclone_counters):
# collect data to train svm
#print(cyclone_counters)
for i in range(len(cyclone_counters)):
self.cyclone_heatmap[i] += cyclone_counters[i]
if self.cyclone_collect_data == True:
x = np.array(cyclone_counters).reshape(-1)
if self.cyclone_malicious_trace == True:
y = 1
else:
y = 0
self.X.append(x)
self.Y.append(y)
x = np.array(cyclone_counters).reshape(-1)
#print(x)
######print(x)
######x_mod = np.array(cyclone_counters).reshape(-1)
######x_mod[0] = 0
######y = 1
######y_mod = 0
######X = [x, x_mod]
######Y= [y, y_mod]
######clf = svm.SVC(random_state=0)
######clf.fit(X,Y)
y = self.clf.predict([x])[0]
rew = -y
return rew.item()
def step(self, action):
obs, reward, done, info = self._env.step(action)
if info["is_guess"]:
self.no_guess = False
# is_guess = (self._env.parse_action(action)[1] == 1)
cur_step_obs = obs[0, :]
latency = cur_step_obs[0] if self.keep_latency else -1
# self.cc_hunter_history.append(latency)
# self.cc_hunter_history.append(None if latency == 2 else latency)
# Mulong Luo
# cyclone
if "cyclic_set_index" in info and info["cyclic_set_index"] != -1:
set = int(info["cyclic_set_index"])
if self.step_count < self.episode_length:
self.cyclone_counters[int(set / self.cyclone_bucket_size) ][int(self.step_count / self.cyclone_interval_size) ] += 1
self.step_count += 1
# self.cc_hunter_history.append(info.get("cache_state_change", None))
if done:
self.cnt += 1 #TODO(Mulong) fix the logic so taht only guess increment the cnt
obs = self._env.reset(victim_address=-1,
reset_cache_state=False,
reset_observation=self.reset_observation)
self.victim_address = self._env.victim_address
if self.step_count < self.episode_length:
done = False
# else:
# #rew, cnt = self.cc_hunter_attack(self.cc_hunter_history)
# rew = self.cyclone_attack(self.cyclone_counters)
# reward += self.cyclone_coeff * rew
# info["cyclone_attack"] = (rew != 0.0) #self.cnt
#
# if self.no_guess:
# reward += self.no_guess_reward
if self.step_count >= self.episode_length:
rew = self.cyclone_attack(self.cyclone_counters)
reward += self.cyclone_coeff * rew
info["cyclone_attack"] = (rew != 0.0) # self.cnt
if self.no_guess:
reward += self.no_guess_reward
done = True
return obs, reward, done, info
def seed(self, seed: int) -> None:
self._env.seed(seed)
| AutoCAT-main | src/cyclone_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
#!/usr/bin/env python
# encoding: utf-8
import logging
# now we patch Python code to add color support to logging.StreamHandler
def add_coloring_to_emit_windows(fn):
# add methods we need to the class
def _out_handle(self):
import ctypes
return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
out_handle = property(_out_handle)
def _set_color(self, code):
import ctypes
# Constants from the Windows API
self.STD_OUTPUT_HANDLE = -11
hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
setattr(logging.StreamHandler, '_set_color', _set_color)
def new(*args):
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE|FOREGROUND_GREEN |FOREGROUND_RED
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
levelno = args[1].levelno
if(levelno>=50):
color = BACKGROUND_MAGNETA | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
elif(levelno>=40):
color = FOREGROUND_RED | FOREGROUND_INTENSITY
elif(levelno>=30):
color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
elif(levelno>=20):
color = FOREGROUND_GREEN
elif(levelno>=10):
color = FOREGROUND_MAGENTA
else:
color = FOREGROUND_WHITE
args[0]._set_color(color)
ret = fn(*args)
args[0]._set_color( FOREGROUND_WHITE )
#print "after"
return ret
return new
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if(levelno>=50):
color = '\x1b[31m' # red
elif(levelno>=40):
color = '\x1b[31m' # red
elif(levelno>=30):
color = '\x1b[33m' # yellow
elif(levelno>=20):
color = '\x1b[32m' # green
elif(levelno>=10):
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
args[1].msg = color + args[1].msg + '\x1b[0m' # normal
#print "after"
return fn(*args)
return new
import platform
if platform.system()=='Windows':
# Windows does not support ANSI escapes and we are using API calls to set the console color
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
else:
# all non-Windows platforms are supporting ANSI escapes so we use them
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
#log = logging.getLogger()
#log.addFilter(log_filter())
#//hdlr = logging.StreamHandler()
#//hdlr.setFormatter(formatter())
| AutoCAT-main | src/colorer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
from typing import Any, Dict, Sequence, Tuple
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import gym
from autocorrelation import autocorrelation
from cache_guessing_game_env_impl import CacheGuessingGameEnv
class CCHunterWrapper(gym.Env):
def __init__(self,
env_config: Dict[str, Any],
keep_latency: bool = True) -> None:
env_config["cache_state_reset"] = False
self.reset_observation = env_config.get("reset_observation", False)
self.keep_latency = keep_latency
self.env_config = env_config
self.episode_length = env_config.get("episode_length", 80)
self.threshold = env_config.get("threshold", 0.8)
# self.cc_hunter_detection_reward = env_config.get(
# "cc_hunter_detection_reward", -1.0)
self.cc_hunter_coeff = env_config.get("cc_hunter_coeff", 1.0)
self.cc_hunter_check_length = env_config.get("cc_hunter_check_length",
4)
self._env = CacheGuessingGameEnv(env_config)
self.validation_env = CacheGuessingGameEnv(env_config)
self.observation_space = self._env.observation_space
self.action_space = self._env.action_space
self.victim_address_min = self._env.victim_address_min
self.victim_address_max = self._env.victim_address_max
self.attacker_address_max = self._env.attacker_address_max
self.attacker_address_min = self._env.attacker_address_min
self.victim_address = self._env.victim_address
self.step_count = 0
self.cc_hunter_history = []
self.no_guess = True
self.no_guess_reward = env_config["no_guess_reward"]
def reset(self, victim_address=-1, seed: int = -1):
self.step_count = 0
self.cc_hunter_history = []
obs = self._env.reset(victim_address=victim_address,
reset_cache_state=True,
seed=seed)
self.victim_address = self._env.victim_address
self.no_guess = True
return obs
def cc_hunter_attack(self, data: Sequence[int]) -> Tuple[float, int]:
# Mulong: only calculate 4 * size_cache size lag
n = min(len(data), self._env.cache_size * self.cc_hunter_check_length)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
mask = corr > self.threshold
rew = -np.square(corr).mean().item() if len(corr) > 0 else 0.0
cnt = mask.sum().item()
return rew, cnt
def step(self, action):
obs, reward, done, info = self._env.step(action)
self.step_count += 1
# is_guess = (self._env.parse_action(action)[1] == 1)
cur_step_obs = obs[0, :]
latency = cur_step_obs[0] if self.keep_latency else -1
# self.cc_hunter_history.append(latency)
# self.cc_hunter_history.append(None if latency == 2 else latency)
# Mulong Luo
# change the semantics of cc_hunter_history following the paper
# only append when there is a conflict miss (i.e., victim_latency is 1(miss))
# then check the action
# if the action is attacker access, then it is T->S append 1
# else if the action is trigger victim, then it is S->T append 0
if "victim_latency" in info and info["victim_latency"] == 1:
self.cc_hunter_history.append(0)
elif latency == 1:
self.cc_hunter_history.append(1)
if info["is_guess"]:
self.no_guess = False
# self.cc_hunter_history.append(info.get("cache_state_change", None))
if done:
obs = self._env.reset(victim_address=-1,
reset_cache_state=False,
reset_observation=self.reset_observation)
self.victim_address = self._env.victim_address
if self.step_count < self.episode_length:
done = False
# else:
# rew, cnt = self.cc_hunter_attack(self.cc_hunter_history)
# reward += self.cc_hunter_coeff * rew
# info["cc_hunter_attack"] = cnt
#
# if self.no_guess:
# reward += self.no_guess_reward
if self.step_count >= self.episode_length:
rew, cnt = self.cc_hunter_attack(self.cc_hunter_history)
reward += self.cc_hunter_coeff * rew
info["cc_hunter_attack"] = cnt
if self.no_guess:
reward += self.no_guess_reward
done = True
return obs, reward, done, info
def seed(self, seed: int) -> None:
self._env.seed(seed)
| AutoCAT-main | src/cchunter_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import numpy as np
def autocorrelation(x: np.ndarray, p: int, normalized: bool = True) -> float:
if p == 0:
return 1.0
mean = x.mean()
if normalized:
return ((x[:-p] - mean) * (x[p:] - mean)).mean() / x.var()
return ((x[:-p] - mean) * (x[p:] - mean)).sum() / np.square(x - mean).sum()
| AutoCAT-main | src/autocorrelation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
class Block:
def __init__(self, block_size, current_step, dirty, address, domain_id = -1):
self.size = block_size
self.dirty_bit = dirty
self.last_accessed = current_step
self.address = address
self.doamin_id = domain_id # for cyclone
def is_dirty(self):
return self.dirty_bit
def write(self, current_step):
self.dirty_bit = True
self.last_accessed = current_step
def clean(self):
self.dirty_bit = False
def read(self, current_step):
self.last_accessed = current_step
| AutoCAT-main | src/block.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
#!/usr/bin/env python
import yaml, cache, argparse, logging, pprint
from terminaltables.other_tables import UnixTable
from replacement_policy import *
def main():
#Set up our arguments
parser = argparse.ArgumentParser(description='Simulate a cache')
parser.add_argument('-c','--config-file', help='Configuration file for the memory heirarchy', required=True)
parser.add_argument('-t', '--trace-file', help='Tracefile containing instructions', required=True)
parser.add_argument('-l', '--log-file', help='Log file name', required=False)
parser.add_argument('-p', '--pretty', help='Use pretty colors', required=False, action='store_true')
parser.add_argument('-d', '--draw-cache', help='Draw cache layouts', required=False, action='store_true')
parser.add_argument('-f', '--result-file', help='Result trace', required=False)
arguments = vars(parser.parse_args())
if arguments['pretty']:
import colorer
log_filename = 'cache_simulator.log'
if arguments['log_file']:
log_filename = arguments['log_file']
result_file = 'result.txt'
if arguments['result_file']:
result_file = arguments['result_file']
with open(result_file, 'w'):
pass
#Clear the log file if it exists
with open(log_filename, 'w'):
pass
logger = logging.getLogger()
fh = logging.FileHandler(log_filename)
sh = logging.StreamHandler()
logger.addHandler(fh)
logger.addHandler(sh)
fh_format = logging.Formatter('%(message)s')
fh.setFormatter(fh_format)
sh.setFormatter(fh_format)
logger.setLevel(logging.INFO)
logger.info('Loading config...')
config_file = open(arguments['config_file'])
configs = yaml.full_load(config_file)
hierarchy = build_hierarchy(configs, logger)
logger.info('Memory hierarchy built.')
logger.info('Loading tracefile...')
trace_file = open(arguments['trace_file'])
trace = trace_file.read().splitlines()
trace = [item for item in trace if not item.startswith('#')]
logger.info('Loaded tracefile ' + arguments['trace_file'])
logger.info('Begin simulation!')
simulate(hierarchy, trace, logger, result_file = result_file)
if arguments['draw_cache']:
for cache in hierarchy:
if hierarchy[cache].next_level:
print_cache(hierarchy[cache])
#Print the contents of a cache as a table
#If the table is too long, it will print the first few sets,
#break, and then print the last set
def print_cache(cache):
table_size = 5
ways = [""]
sets = []
set_indexes = sorted(cache.data.keys())
if len(cache.data.keys()) > 0:
first_key = list(cache.data.keys())[0]
way_no = 0
#Label the columns
for way in range(cache.associativity):
ways.append("Way " + str(way_no))
way_no += 1
#Print either all the sets if the cache is small, or just a few
#sets and then the last set
sets.append(ways)
if len(set_indexes) > table_size + 4 - 1:
for s in range(min(table_size, len(set_indexes) - 4)):
temp_way = ["Set " + str(s)]
for w in range(0, cache.associativity):
temp_way.append(cache.data[set_indexes[s]][w][1].address)
sets.append(temp_way)
for i in range(3):
temp_way = ['.']
for w in range(cache.associativity):
temp_way.append('')
sets.append(temp_way)
##set_ways = cache.data[set_indexes[len(set_indexes) - 1]].keys()
temp_way = ['Set ' + str(len(set_indexes) - 1)]
for w in range(0, cache.associativity):
temp_way.append(cache.data[set_indexes[len(set_indexes) - 1]][w][1].address)
sets.append(temp_way)
else:
for s in range(len(set_indexes)):
temp_way = ["Set " + str(s)]
for w in range(0, cache.associativity):
temp_way.append(cache.data[set_indexes[s]][w][1].address)
sets.append(temp_way)
table = UnixTable(sets)
table.title = cache.name
table.inner_row_border = True
print(table.table)
#Loop through the instructions in the tracefile and use
#the given memory hierarchy to find AMAT
def simulate(hierarchy, trace, logger, result_file=''):
responses = []
if result_file != '':
f = open(result_file, 'w')
#We only interface directly with L1. Reads and writes will automatically
#interact with lower levels of the hierarchy
l1 = hierarchy['cache_1']
if 'cache_1_core_2' in hierarchy:
l1_c2 = hierarchy['cache_1_core_2']
for current_step in range(len(trace)):
instruction = trace[current_step]
address, op = instruction.split()
#Call read for this address on our memory hierarchy
if op == 'R' or op == 'R2':
logger.info(str(current_step) + ':\tReading ' + address + ' ' + op)
if op == 'R2':
l = l1_c2
else:
l = l1
r, _, _, _ = l.read(address, current_step)
logger.warning('\thit_list: ' + pprint.pformat(r.hit_list) + '\ttime: ' + str(r.time) + '\n')
responses.append(r)
elif op == 'RL' or op == 'RL2': # pl cache lock cacheline
assert(l1.rep_policy == plru_pl_policy) # must be pl cache
# multilcore not implemented
assert(op == 'RL')
logger.info(str(current_step) + ':\tReading ' + address + ' ' + op)
r, _, _, _ = l1.read(address, current_step, pl_opt = PL_LOCK )
logger.warning('\thit_list: ' + pprint.pformat(r.hit_list) + '\ttime: ' + str(r.time) + '\n')
responses.append(r)
elif op == 'RU' or op == 'RU2': # pl cache unlock cacheline
assert(l1.rep_policy == plru_pl_policy)
# multilcore not implemented
assert(op == 'RU')
logger.info(str(current_step) + ':\tReading ' + address + ' ' + op)
r, _, _, _ = l1.read(address, current_step, pl_opt = PL_UNLOCK )
logger.warning('\thit_list: ' + pprint.pformat(r.hit_list) + '\ttime: ' + str(r.time) + '\n')
responses.append(r)
#Call write
elif op == 'W' or op == 'W2':
# multilcore not implemented
#assert(op == 'W')
logger.info(str(current_step) + ':\tWriting ' + address + ' ' + op)
r, _, _= l1.write(address, True, current_step)
logger.warning('\thit_list: ' + pprint.pformat(r.hit_list) + '\ttime: ' + str(r.time) + '\n')
responses.append(r)
#Call cflush
elif op == 'F' or op == 'F2':
## multilcore not implemented
#assert(op == 'F')
logger.info(str(current_step) + ':\tFlushing ' + address + ' ' + op)
r, _, _ = l1.cflush(address, current_step)
#logger.warning('\thit_list: ' + pprint.pformat(r.hit_list) + '\ttime: ' + str(r.time) + '\n')
else:
raise InvalidOpError
#if result_file != '':
# print the trace
print(address + ' ' + str(r.time), file = f )
for cache in hierarchy:
if hierarchy[cache].next_level:
print_cache(hierarchy[cache])
logger.info('Simulation complete')
analyze_results(hierarchy, responses, logger)
def analyze_results(hierarchy, responses, logger):
#Parse all the responses from the simulation
n_instructions = len(responses)
total_time = 0
for r in responses:
total_time += r.time
logger.info('\nNumber of instructions: ' + str(n_instructions))
logger.info('\nTotal cycles taken: ' + str(total_time) + '\n')
amat = compute_amat(hierarchy['cache_1'], responses, logger)
logger.info('\nAMATs:\n'+pprint.pformat(amat))
def compute_amat(level, responses, logger, results={}):
#Check if this is main memory
#Main memory has a non-variable hit time
if not level.next_level:
results[level.name] = level.hit_time
else:
#Find out how many times this level of cache was accessed
#And how many of those accesses were misses
n_miss = 0
n_access = 0
for r in responses:
if level.name in r.hit_list.keys():
n_access += 1
if r.hit_list[level.name] == False:
n_miss += 1
if n_access > 0:
miss_rate = float(n_miss)/n_access
#Recursively compute the AMAT of this level of cache by computing
#the AMAT of lower levels
results[level.name] = level.hit_time + miss_rate * compute_amat(level.next_level, responses, logger)[level.next_level.name] #wat
else:
results[level.name] = 0 * compute_amat(level.next_level, responses, logger)[level.next_level.name] #trust me, this is good
logger.info(level.name)
logger.info('\tNumber of accesses: ' + str(n_access))
logger.info('\tNumber of hits: ' + str(n_access - n_miss))
logger.info('\tNumber of misses: ' + str(n_miss))
return results
def build_hierarchy(configs, logger):
#Build the cache hierarchy with the given configuration
hierarchy = {}
#Main memory is required
main_memory = build_cache(configs, 'mem', None, logger)
prev_level = main_memory
hierarchy['mem'] = main_memory
if 'cache_3' in configs.keys():
cache_3 = build_cache(configs, 'cache_3', prev_level, logger)
prev_level = cache_3
hierarchy['cache_3'] = cache_3
if 'cache_2' in configs.keys():
cache_2 = build_cache(configs, 'cache_2', prev_level, logger)
prev_level = cache_2
hierarchy['cache_2'] = cache_2
if 'cache_1_core_2' in configs.keys():
cache_1_core_2 = build_cache(configs, 'cache_1_core_2', prev_level, logger)
prev_level = cache_2
hierarchy['cache_1_core_2'] = cache_1_core_2
#Cache_1 is required
cache_1 = build_cache(configs, 'cache_1', prev_level, logger)
if 'cache_1_core_2' in configs.keys():
cache_1.add_same_level_cache(cache_1_core_2)
cache_1_core_2.add_same_level_cache(cache_1)
#print(len(cache_1.same_level_caches))
hierarchy['cache_1'] = cache_1
return hierarchy
def build_cache(configs, name, next_level_cache, logger):
return cache.Cache(name,
configs['architecture']['word_size'],
configs['architecture']['block_size'],
configs[name]['blocks'] if (name != 'mem') else -1,
configs[name]['associativity'] if (name != 'mem') else -1,
configs[name]['hit_time'],
configs[name]['hit_time'],
configs['architecture']['write_back'],
logger,
next_level_cache,
rep_policy = configs[name]['rep_policy'] if 'rep_policy' in configs[name] else '',
prefetcher = configs[name]['prefetcher'] if 'prefetcher' in configs[name] else "none",
verbose = configs['verbose'] if 'verbose' in configs else 'False' )
if __name__ == '__main__':
main()
| AutoCAT-main | src/cache_simulator.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, dim: int) -> None:
super(ResidualBlock, self).__init__()
self.dim = dim
layers = []
layers.append(nn.ReLU())
layers.append(nn.Linear(self.dim, self.dim))
layers.append(nn.ReLU())
layers.append(nn.Linear(self.dim, self.dim))
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.layers(x)
class DNNEncoder(nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_blocks: int = 1) -> None:
super(DNNEncoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_blocks = num_blocks
layers = []
layers.append(nn.Linear(self.input_dim, self.hidden_dim))
for _ in range(self.num_blocks):
layers.append(ResidualBlock(self.hidden_dim))
layers.append(nn.ReLU())
layers.append(nn.Linear(self.hidden_dim, self.output_dim))
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.layers(x)
| AutoCAT-main | src/models/dnn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models.dnn import DNNEncoder
class CacheBackbone(nn.Module):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
window_size: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
num_blocks: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
self.window_size = window_size
self.action_embed_dim = action_embed_dim
self.step_embed_dim = step_embed_dim
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim +
self.step_embed_dim) * self.window_size
self.hidden_dim = hidden_dim
self.num_blocks = num_blocks
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.dnn_encoder = DNNEncoder(self.input_dim, self.hidden_dim,
self.hidden_dim, self.num_blocks)
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> torch.Tensor:
obs = obs.to(torch.int64)
assert obs.dim() == 3
batch_size = obs.size(0)
(l, v, act, step) = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
step = self.make_embedding(step, self.step_embed)
x = torch.cat((l, v, act, step), dim=-1)
x = x.view(batch_size, -1)
y = self.dnn_encoder(x)
return y
| AutoCAT-main | src/models/backbone.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import ModelConfigDict, TensorType
from models.dnn import DNNEncoder
class TransformerModel(TorchModelV2, nn.Module):
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str, **kwargs) -> None:
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
if len(kwargs) > 0:
custom_model_config = kwargs
else:
custom_model_config = model_config["custom_model_config"]
self.latency_dim = custom_model_config["latency_dim"]
self.victim_acc_dim = custom_model_config["victim_acc_dim"]
self.action_dim = custom_model_config["action_dim"]
self.step_dim = custom_model_config["step_dim"]
self.window_size = custom_model_config["window_size"]
self.action_embed_dim = custom_model_config["action_embed_dim"]
self.step_embed_dim = custom_model_config["step_embed_dim"]
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim)
self.hidden_dim = custom_model_config["hidden_dim"]
self.output_dim = num_outputs
self.num_blocks = custom_model_config.get("num_blocks", 1)
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
# self.linear_o = nn.Linear(self.hidden_dim * self.window_size,
# self.hidden_dim)
encoder_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim,
nhead=8)
self.encoder = nn.TransformerEncoder(encoder_layer, self.num_blocks)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
self._features = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
@override(TorchModelV2)
def forward(self, input_dict: Dict[str,
TensorType], state: List[TensorType],
seq_lens: TensorType) -> Tuple[TensorType, List[TensorType]]:
if self._device is None:
self._device = next(self.parameters()).device
obs = input_dict["obs"].to(self._device)
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
l, v, act, stp = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
stp = self.make_embedding(stp, self.step_embed)
x = torch.cat((l, v, act, stp), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
h = self.encoder(x)
# h = self.linear_o(h.view(batch_size, -1))
h = h.mean(dim=0)
a = self.linear_a(h)
self._features = h
return a, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None
v = self.linear_v(self._features)
return v.squeeze(1)
ModelCatalog.register_custom_model("transformer_model", TransformerModel)
| AutoCAT-main | src/models/transformer_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import ModelConfigDict, TensorType
from models.dnn import DNNEncoder
class DNNModel(TorchModelV2, nn.Module):
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str, **kwargs) -> None:
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
if len(kwargs) > 0:
custom_model_config = kwargs
else:
custom_model_config = model_config["custom_model_config"]
self.latency_dim = custom_model_config["latency_dim"]
self.victim_acc_dim = custom_model_config["victim_acc_dim"]
self.action_dim = custom_model_config["action_dim"]
self.step_dim = custom_model_config["step_dim"]
self.window_size = custom_model_config["window_size"]
self.action_embed_dim = custom_model_config["action_embed_dim"]
self.step_embed_dim = custom_model_config["step_embed_dim"]
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim) * self.window_size
self.hidden_dim = custom_model_config["hidden_dim"]
self.output_dim = num_outputs
self.num_blocks = custom_model_config.get("num_blocks", 1)
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.backbone = DNNEncoder(self.input_dim, self.hidden_dim,
self.hidden_dim, self.num_blocks)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
self._features = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
@override(TorchModelV2)
def forward(self, input_dict: Dict[str,
TensorType], state: List[TensorType],
seq_lens: TensorType) -> Tuple[TensorType, List[TensorType]]:
if self._device is None:
self._device = next(self.parameters()).device
obs = input_dict["obs"].to(self._device)
obs = obs.to(torch.int64)
assert obs.dim() == 3
batch_size = obs.size(0)
(l, v, act, step) = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
step = self.make_embedding(step, self.step_embed)
x = torch.cat((l, v, act, step), dim=-1)
x = x.view(batch_size, -1)
h = self.backbone(x)
a = self.linear_a(h)
self._features = h
return a, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None
v = self.linear_v(self._features)
return v.squeeze(1)
ModelCatalog.register_custom_model("dnn_model", DNNModel)
| AutoCAT-main | src/models/dnn_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author Mulong Luo
Date 2022.1.24
usage: resotre the ray checkpoint to replay the agent and extract the attack pattern
'''
from copy import deepcopy
import gym
from starlette.requests import Request
import requests
import pprint
import ray
import json
from ray import serve
from test_custom_policy_diversity_works import *
from cache_simulator import print_cache
#from run_gym_rrllib import * # need this to import the config and PPOtrainer
#config["num_workers"] = 1
#config["num_envs_per_worker"] = 1
#print(config)
#tune.register_env("cache_guessing_game_env_fix", CacheGuessingGameEnv)#Fix)
#exit(0)
checkpoint_path = sys.argv[1:][0]
print(checkpoint_path[0])
#exit(-1)
#'/home/ml2558/ray_results/PPO_cache_guessing_game_env_fix_2022-01-24_21-18-203pft9506/checkpoint_000136/checkpoint-136'
i = checkpoint_path.rfind('/')
config_path = checkpoint_path[0:i] + '/../params.json'
print(config_path)
config = json.load(open(config_path))
##if os.path.isfile(config_path):
## print('load env configuration in', config_path)
## #exit(0)
## with open(config_path, 'rb') as handle:
## config["env_config"] = pickle.load(handle)
## print(config["env_config"])
##else:
## print('env.config not found! using defualt one')
## print('be careful to that the env.cofnig matches the env which generate the checkpoint')
## print(config["env_config"])
config["env_config"]["verbose"] = 1
print(config)
trainer = PPOCustomTrainer(config=config)
trainer.restore(checkpoint_path)
#local_worker = trainer.workers.local_worker()
#env = local_worker.env_context
env = CacheGuessingGameEnv(config["env_config"])
#obs = env.reset()
#for _ in range(1000):
# print(f"-> Sending observation {obs}")
# # Setting explore=False should always return the same action.
# action = trainer.compute_single_action(obs, explore=False)
# print(f"<- Received response {action}")
# obs, reward, done, info = env.step(action)
# if done == True:
# obs = env.reset()
#
## no cache randomization
## no randomized inference
#pattern_buffer = []
#for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
# obs = env.reset(victim_address=victim_addr)
# action_buffer = []
# done = False
# while done == False:
# print(f"-> Sending observation {obs}")
# action = trainer.compute_single_action(obs, explore=False)
# print(f"<- Received response {action}")
# obs, reward, done, info = env.step(action)
# action_buffer.append((action, obs[0]))
# if reward > 0:
# correct = True
# else:
# correct = False
# pattern_buffer.append((victim_addr, action_buffer, correct))
#pprint.pprint(pattern_buffer)
def replay_agent():
# no cache randomization
# rangomized inference ( 10 times)
pattern_buffer = []
num_guess = 0
num_correct = 0
pattern_dict = {}
if env.allow_empty_victim_access == False:
end_address = env.victim_address_max + 1
else:
end_address = env.victim_address_max + 1 + 1
for victim_addr in range(env.victim_address_min, end_address):
for repeat in range(1):#000):
obs = env.reset(victim_address=victim_addr)
# for debugging purposes
print_cache(env.l1)
#env._randomize_cache()#"union")#"victim")
action_buffer = []
done = False
legend=[]
step = 0
while done == False:
# for debugging purposes
print_cache(env.l1)
step += 1
#print(f"-> Sending observation {obs}")
action = trainer.compute_single_action(obs, explore=False) # randomized inference
# print the log likelihood for each action
# see https://github.com/ray-project/ray/blob/7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065/rllib/policy/policy.py#L228
local_worker = trainer.workers.local_worker()
pp = local_worker.preprocessors["default_policy"]
###print(obs)
observation = pp.transform(obs)
episodes = None
policy = trainer.get_policy()
logp = policy.compute_log_likelihoods(
actions = [i for i in range(0, env.action_space.n)],
obs_batch = [observation],
)
#prev_action_batch = None,
#prev_reward_batch = None,
#action_normalized=True)
#print(logp)
#print(np.argmax(logp.cpu().numpy()))
import matplotlib.pyplot as plt
plt.plot(logp.cpu().numpy())
#print(action)
legend.append('step '+ str(step))
#print(f"<- Received response {action}")
obs, reward, done, info = env.step(action)
latency = obs[0][0] #
action_buffer.append((action, latency))
if reward > 0:
correct = True
num_correct += 1
else:
correct = False
num_guess += 1
pattern_buffer.append((victim_addr, action_buffer, correct))
print(pattern_buffer)
if pattern_dict.get((victim_addr, tuple(action_buffer), correct)) == None:
pattern_dict[(victim_addr, tuple(action_buffer), correct)] = 1
else:
pattern_dict[(victim_addr, tuple(action_buffer), correct)] += 1
plt.xlabel('action label')
plt.ylabel('logp')
plt.legend(legend)
#plt.show()
secret = open('victim.txt', 'a')
with open('temp.txt', 'a') as out:
for pattern in pattern_buffer:
trajectory = pattern[1]
for point in trajectory:
print(point[0], end=' ', file=out)
print(pattern[0], file = secret)
print(' ', file = out)
print( "overall accuray " + str(1.0 * num_correct / num_guess) )
pprint.pprint(pattern_dict)
print("num distinct patterns "+ str(len(pattern_dict)))
return 1.0 * num_correct / num_guess, pattern_buffer
replay_agent()
#if __name__ == "__main__":
#import pickle
#ickle.loads(pickle.dumps(trainer.get_policy()))
# cache randomization
# no randomized inference
| AutoCAT-main | src/rllib/replay_checkpoint.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.10
Description:
split the agent into two different agent
P1: just generate the sequence but not the guess
P2: just make the guess, given the memory access sequence and observations
P1: action space: autoCAT's memory access
observation space: guessability
P2: action space: NOP
observation space: original observation space
P1 wrapper of CacheGuessingGameEnv
blocking the guess action or just have one guess action
when guess is structed, calculate the guessability as the reward
observation space becomes concatenated observations
reward becomes agregated reward
'''
from random import random
import sys
import os
import gym
import sys
import numpy as np
from gym import spaces
import signal
from sklearn import svm
from sklearn.model_selection import cross_val_score
class CacheSimulatorP1Wrapper(gym.Env):
def __init__(self, env_config):
#sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
# for offline training, the environment returns filler observations and zero reward
# until the guess
# the step reward is also temporarily accumulated until the end
self.offline_training = True
self.copy = 1
self.env_list = []
self.env_config = env_config
self.cache_state_reset = False # has to force no reset
self.env = CacheGuessingGameEnv(env_config)
self.victim_address_min = self.env.victim_address_min
self.victim_address_max = self.env.victim_address_max
self.window_size = self.env.window_size
self.secret_size = self.victim_address_max - self.victim_address_min + 1
self.max_box_value = self.env.max_box_value
self.feature_size = self.env.feature_size
# expand the observation space
self.observation_space = spaces.Box(low=-1, high=self.max_box_value, shape=(self.window_size, self.feature_size * self.secret_size * self.copy))
# merge all guessing into one action
self.action_space_size = (self.env.action_space.n - self.secret_size+1)
print(self.env.action_space.n)
print(self.env.get_act_space_dim())
self.action_space = spaces.Discrete(self.action_space_size)
# instantiate the environment
self.env_list.append(CacheGuessingGameEnv(env_config))
self.env_config['verbose'] = False
for _ in range(1,self.secret_size * self.copy):
self.env_list.append(CacheGuessingGameEnv(env_config))
# instantiate the latency_buffer
# for each permuted secret, latency_buffer stores the latency
self.latency_buffer = []
for i in range(0, self.secret_size * self.copy):
self.latency_buffer.append([])
#permute the victim addresses
self.victim_addr_arr = np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
self.victim_addr_arr = []
for i in range(self.victim_address_min, self.victim_address_max+1):
self.victim_addr_arr.append(i)
# reset the addresses
self.env_config['verbose'] = True
self.env_list[0].reset(self.victim_addr_arr[0])
self.env_config['verbose'] = False
self.reset_state = np.array([[]] * self.window_size)
# initialize the offline_state as filler state if we use offline training
if self.offline_training == True:
self.offline_state = self.env.reset(seed=-1)
self.offline_reward = 0
self.offline_action_buffer = []
self.last_offline_state = self.env.reset()
for cp in range(0, self.copy):
seed = -1#random.randint(1, 1000000)
for i in range(0, len(self.victim_addr_arr)):
state = self.env_list[i + cp * len(self.victim_addr_arr)].reset(victim_address = self.victim_addr_arr[i], seed= seed)
self.reset_state = np.concatenate((self.reset_state, state), axis=1)
# same seed esure the initial state are teh same
def reset(self):
# permute the victim addresses
#self.victim_addr_arr = np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
self.victim_addr_arr = []
for i in range(self.victim_address_min, self.victim_address_max+1):
self.victim_addr_arr.append(i)
# restore the total state
total_state = np.array([[]] * self.window_size)
for i in range(len(self.env_list)):
seed = -1#random.randint(1, 1000000)
env = self.env_list[i]
state = env.reset(victim_address = self.victim_addr_arr[i % len(self.victim_addr_arr)], seed = seed)
total_state = np.concatenate((total_state, state), axis=1)
if self.offline_training == True:
state = self.offline_state
self.offline_action_buffer = []
# reset the latency_buffer
self.latency_buffer = []
for i in range(0, self.secret_size * self.copy):
self.latency_buffer.append([])
self.last_offline_state = self.env.reset()
return total_state
#return self.reset_state
# feed the actions to all subenv with different secret
def step(self, action):
early_done_reward = 0
total_reward = 0
total_state = []
total_done = False
done_arr = []
total_state = np.array([[]] * self.window_size)
#parsed_orig_action = action #self.env.parse_action(action)
if action == self.action_space_size - 1: # guessing action
info = {}
# for offline training the total_reward needs to include the history reward
if self.offline_training == True:
# just similate all actions here
i = 0
print(self.offline_action_buffer)
for env in self.env_list:
for act in self.offline_action_buffer:
#print('simulate in offline_action_buffer')
state, reward, done, info = env.step(act)
total_reward += reward
latency = state[0][0]
self.latency_buffer[i].append(latency) #
if done == True:
break
i += 1
# TODO(MUlong): need to think whether the last observation is needt for the agent
total_state = self.reset_state
self.offline_action_buffer = []
total_reward = self.P2oracle()
else:
#calculate the reward and terminate
for env in self.env_list:
state, reward, done, info = env.step(action)
#total_state = np.concatenate((total_state, state), axis=1)
total_state = self.reset_state
total_reward = self.P2oracle()
total_done = True
else: # use the action and collect and concatenate observation
### for offline RL, we need to mask the state and accumulate reward
# for offline RL, just store the action
if self.offline_training == True:
total_reward = 0
self.offline_action_buffer.append(action)
# feferining to cahce_gurssing_game_env_impl.py to create an empty next state
step_count = 1 + self.last_offline_state[0][3]
if step_count == self.env.window_size:
print('length violation!!!')
total_done = True
#total_reward = len(self.env_list) * self.env.length_violation_reward
i = 0
#print(self.offline_action_buffer)
for env in self.env_list:
for act in self.offline_action_buffer:
#print('simulate in offline_action_buffer')
state, reward, done, info = env.step(act)
total_reward += reward
latency = state[0][0]
self.latency_buffer[i].append(latency) #
if done == True:
break
i += 1
total_done = done
print(total_reward)
original_action = action #self.last_offline_state[0][2]
_, _, is_victim, _, _ = self.env.parse_action(action)
if is_victim == 1:
victim_accessed = 1
else:
if self.last_offline_state[0][1] == 1:
victim_accessed = 1
else:
victim_accessed = 0
r = self.last_offline_state[0][0]
new_obs = np.array([[r, victim_accessed, original_action, step_count]])
#del self.last_offline_state[-1]
self.last_offline_state = np.concatenate((new_obs, self.last_offline_state[0:-1,]), axis= 0)
state = self.last_offline_state
# state is a n * 4 matrix
# r, victim_accesesd, original_action, self.step_count
# we only need to mask the r
state[:,0] = self.offline_state[:, 0]
for env in self.env_list:
total_state = np.concatenate((total_state, state), axis=1)
#print(total_state)
#print('step')
info={}
else: #online RL
i = 0
for env in self.env_list:
state, reward, done, info = env.step(action)
latency = state[0][0]
# length violation or other type of violation
if done == True:
env.reset()
total_done = True
self.latency_buffer[i].append(latency) #
total_reward += reward
total_state = np.concatenate((total_state, state), axis=1)
i += 1
info = {}
total_reward = total_reward * 1.0 / len(self.env_list)#self.secret_size
return total_state, total_reward, total_done, info
# given the existing sequence, calculate the P2 oracle reward
# calculate the expected guessing correctness
def P2oracle(self):
# score
# calculate the total score
# which correspond to the number of distinguishable secret
latency_dict = {}
for i in range(0, len(self.latency_buffer)):
latency_dict[tuple(self.latency_buffer[i])] = 1
score = 1.0 * len(latency_dict) / len(self.latency_buffer)
print(self.latency_buffer)
print(' P2oracle score %f'% score)
return score * self.env.correct_reward + ( 1 - score ) * self.env.wrong_reward
# use SVM to evaluate the guessability (oracle guessing correctness rate)
def P2SVMOracle(self):
if len(self.latency_buffer[0]) == 0:
score = 0
else:
X = self.latency_buffer
y = []
for cp in range(0, self.copy):
for sec in range(0, len(self.victim_addr_arr)):
y.append(self.victim_addr_arr[sec])
clf = svm.SVC(random_state=0)
print(len(X))
print(len(y))
#print(X)
#print(y)
ans = cross_val_score(clf, X, y, cv=4, scoring='accuracy')
score = ans.mean()
print("P2 SVM accuracy %f" % score)
return score * self.env.correct_reward + ( 1 - score ) * self.env.wrong_reward
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1)
if ray.is_initialized():
ray.shutdown()
#tune.register_env("cache_guessing_game_env_fix", CacheSimulatorSIMDWrapper)#
tune.register_env("cache_guessing_game_env_fix", CacheSimulatorP1Wrapper)
config = {
'env': 'cache_guessing_game_env_fix', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"force_victim_hit": False,
'flush_inst': True,#False,
"allow_victim_multi_access": True,#False,
"attacker_addr_s": 0,
"attacker_addr_e": 7,
"victim_addr_s": 0,
"victim_addr_e": 3,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,
"associativity": 1,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_guessability.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
CacheSimulatorSIMDWrapper
wraps multiple environment with different initialization into a single env
'''
#from msilib.schema import DuplicateFile
from random import random
import sys
import os
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
#from cache_guessing_game_env_impl import *
import pdb
import sys
import signal
# random initialization
# same secret
class CacheSimulatorSIMDWrapper(gym.Env):
def __init__(self, env_config, duplicate = 1, victim_addr = -1):
self.duplicate = duplicate
self.env_list = []
self.env_config = env_config
self.victim_addr = victim_addr
self.env = CacheGuessingGameEnv(env_config)
self.victim_address_min = self.env.victim_address_min
self.victim_address_max = self.env.victim_address_max
self.observation_space = spaces.MultiDiscrete(list(self.env.observation_space.nvec) * self.duplicate)
self.action_space = self.env.action_space
self.env_list.append(CacheGuessingGameEnv(env_config))
self.env_config['verbose'] = False
for _ in range(1,self.duplicate):
self.env_list.append(CacheGuessingGameEnv(env_config))
def reset(self, victim_addr = -1):
total_state = []
# same victim_addr (secret) for all environments
if self.victim_addr == -1 and victim_addr == -1:
victim_addr = random.randint(self.env.victim_address_min, self.env.victim_address_max)
elif victim_addr == -1:
victim_addr = self.victim_addr
for env in self.env_list:
state = env.reset(victim_addr)
env._randomize_cache()#mode="union")
total_state += list(state)
return total_state
def step(self, action):
early_done_reward = 0
total_reward = 0
total_state = []
total_done = False
done_arr = []
for env in self.env_list:
state, reward, done, info = env.step(action)
total_reward += reward
total_state += list(state)
done_arr.append(done)
if done:
total_done = True
if total_done:
for done in done_arr:
if done == False:
total_reward -= early_done_reward
info = {}
return total_state, total_reward, total_done, info
# multiple initialization
# multiple secret
class CacheSimulatorMultiGuessWrapper(gym.Env):
def __init__(self, env_config):
self.duplicate = 4
self.block_duplicate = 4
self.env_list = []
self.env_config = env_config
self.env = CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate)
#permute the victim addresses
self.secret_size = self.env.victim_address_max - self.env.victim_address_min + 1
self.victim_addr_arr = [] #np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
for _ in range(self.block_duplicate):
#for _ in range(self.secret_size):
rand = random.randint(self.env.victim_address_min, self.env.victim_address_max )
self.victim_addr_arr.append(rand)
self.observation_space = spaces.MultiDiscrete(list(self.env.observation_space.nvec) * self.block_duplicate )
self.action_space = spaces.MultiDiscrete([self.env.action_space.n] + [self.secret_size] * self.block_duplicate)
self.env_config['verbose'] = True
self.env_list.append(CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate, victim_addr=self.victim_addr_arr[0]))
self.env_config['verbose'] = False
for i in range(1, len(self.victim_addr_arr)):
#for victim_addr in self.victim_addr_arr:
#self.env_list.append(CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate, victim_addr = victim_addr))
#self.env_config['verbose'] = False
#for _ in range(0,self.block_duplicate):
self.env_list.append(CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate, victim_addr=self.victim_addr_arr[i]))
def reset(self):
total_state = []
# same victim_addr (secret) for all environments
#self.victim_addr_arr = np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
self.victim_addr_arr = [] #np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
for _ in range(self.block_duplicate):
#for _ in range(self.secret_size):
rand = random.randint(self.env.victim_address_min, self.env.victim_address_max)
#print('self.env.victim_address_min')
#print(self.env.victim_address_min)
#print('self.env.victim_address_max')
#print(self.env.victim_address_max)
#print('rand')
#print(rand)
#pdb.set_trace()
#exit(0)
self.victim_addr_arr.append(rand)
for i in range(len(self.env_list)):
env = self.env_list[i]
#print('len(self.env_list)')
#print(len(self.env_list))
#print('i')
#print(i)
#print('victim_addr_arr')
#print(len(self.victim_addr_arr))
state = env.reset(self.victim_addr_arr[i])
total_state += list(state)
return total_state
def step(self, action):
early_done_reward = 0
total_reward = 0
total_state = []
total_done = False
done_arr = []
orig_action = action[0] # first digit is the original action
parsed_orig_action = self.env.env.parse_action(orig_action)
is_guess = parsed_orig_action[1] # check whether to guess or not
is_victim = parsed_orig_action[2] # check whether to invoke victim
#is_flush = orig_action[3] # check if it is a guess
if is_victim != True and is_guess == True:
guess_addrs = action[1:]
for i in range(0, len(self.env_list)):
env = self.env_list[i]
#pdb.set_trace()
action = orig_action - orig_action % self.secret_size + guess_addrs[i] - self.env.env.victim_address_min
_, is_guesss, _, _, _ = self.env.env.parse_action(action)
state, reward, done, info = env.step(action)
assert(is_guesss == True)
assert(done == True)
total_reward += reward
total_state += list(state)
info = {}
return total_state, total_reward * 1.0 / self.duplicate / self.block_duplicate, True, info
for env in self.env_list:
state, reward, done, info = env.step(orig_action)
total_reward += reward
total_state += list(state)
done_arr.append(done)
if done:
total_done = True
info = {}
return total_state, total_reward * 1.0 / self.duplicate / self.block_duplicate , total_done, info
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1)
if ray.is_initialized():
ray.shutdown()
#tune.register_env("cache_guessing_game_env_fix", CacheSimulatorSIMDWrapper)#
tune.register_env("cache_guessing_game_env_fix", CacheSimulatorMultiGuessWrapper)
config = {
'env': 'cache_guessing_game_env_fix', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"force_victim_hit": False,
'flush_inst': True,#False,
"allow_victim_multi_access": True,#False,
"attacker_addr_s": 0,
"attacker_addr_e": 7,
"victim_addr_s": 0,
"victim_addr_e": 3,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,
"associativity": 1,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_simd.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))+ '/third_party/cachequery/tool/')
from cachequery import CacheQuery
class CacheQueryWrapper(CacheQuery):
pass | AutoCAT-main | src/rllib/cache_query_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.10
Function: Add one reveal action so that the agent has to explicit reveal the secret,
once the secret is revealed, it must make a guess immediately
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
import signal
from sklearn import svm
from sklearn.model_selection import cross_val_score
import numpy as np
class CacheGuessingGameWithRevealEnv(gym.Env):
def __init__(self, env_config):
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
self.env = CacheGuessingGameEnv(env_config)
self.action_space_size = self.env.action_space.n + 1 # increase the action space by one
self.action_space = spaces.Discrete(self.action_space_size)
self.observation_space = self.env.observation_space
self.revealed = False # initially
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
def reset(self):
self.revealed = False # reset the revealed
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
return state
def step(self, action):
if action == self.action_space_size - 1:
if self.revealed == True:
self.env.vprint("double reveal! terminated!")
state, reward, done, info = self.last_unmasked_tuple
reward = self.env.wrong_reward
done = True
return state, reward, done, info
self.revealed = True
self.env.vprint("reveal observation")
# return the revealed obs, reward,# return the revealed obs, reward,
state, reward, done, info = self.last_unmasked_tuple
reward = 0 # reveal action does not cost anything
return state, reward, done, info
elif action < self.action_space_size - 1: # this time the action must be smaller than sction_space_size -1
_, is_guess, _, _, _ = self.env.parse_action(action)
# need to check if revealed first
# if revealed, must make a guess
# if not revealed can do any thing
if self.revealed == True:
if is_guess == 0: # revealed but not guess # huge penalty
self.env.vprint("reveal but no guess! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
elif is_guess != 0: # this must be guess and terminate
return self.env.step(action)
elif self.revealed == False:
if is_guess != 0:
# guess without revewl --> huge penalty
self.env.vprint("guess without reward! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
else:
state, reward, done, info = self.env.step(action)
self.last_unmasked_tuple = ( state.copy(), reward, done, info )
# mask the state so that nothing is revealed
state[:,0] = np.zeros((state.shape[0],))
return state, reward, done, info
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameWithRevealEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": True,
"attacker_addr_s": 0,
"attacker_addr_e": 8,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 0,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 4,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_reveal_action.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# look at https://github.com/ray-project/ray/blob/ea2bea7e309cd60457aa0e027321be5f10fa0fe5/rllib/examples/custom_env.py#L2
#from CacheSimulator.src.gym_cache.envs.cache_simulator_wrapper import CacheSimulatorWrapper
#from CacheSimulator.src.replay_checkpint import replay_agent
import gym
import ray
import ray.tune as tune
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.modelv2 import restore_original_dimensions
import torch.nn as nn
import numpy as np
from ray.rllib.models import ModelCatalog
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.sac import SACTrainer
import json
import sys
import copy
import torch
def replay_agent(trainer, env, randomize_init=False, non_deterministic=False, repeat_time=-1):
# no cache randomization
# rangomized inference ( 10 times)
pattern_buffer = []
num_guess = 0
num_correct = 0
if randomize_init == False and non_deterministic == False:
repeat_times = 1
else:
if repeat_time == -1:
repeat_times = 50
for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
for repeat in range(repeat_times):
obs = env.reset(victim_address=victim_addr)
if randomize_init:
env._randomize_cache("union")
action_buffer = []
done = False
while done == False:
print(f"-> Sending observation {obs}")
action = trainer.compute_single_action(obs, explore = non_deterministic) # randomized inference
print(f"<- Received response {action}")
obs, reward, done, info = env.step(action)
action_buffer.append((action, obs[0]))
if reward > 0:
correct = True
num_correct += 1
else:
correct = False
num_guess += 1
pattern_buffer.append((victim_addr, action_buffer, correct))
pprint.pprint(pattern_buffer)
return 1.0 * num_correct / num_guess, pattern_buffer
if __name__ == "__main__":
import signal
import sys
import pickle
from test_custom_policy_diversity_works import *
if len(sys.argv) > 1:
config_name = sys.argv[1]
print(config_name)
f = open(config_name)
config = json.load(f)
if len(sys.argv) == 5:
nset = int(sys.argv[2])
nway = int(sys.argv[3])
nopt = int(sys.argv[4])
config["env_config"]["cache_configs"]["cache_1"]["associativity"] = nway
config["env_config"]["cache_configs"]["cache_1"]["blocks"] = nset * nway
config["env_config"]["victim_addr_s"] = 0
config["env_config"]["victim_addr_e"] = nset * nway - 1
if nopt == 0: # shared
config["env_config"]["attacker_addr_s"] = 0
config["env_config"]["attacker_addr_e"] = nset * nway - 1
config["env_config"]["flush_inst"] = True
elif nopt == 1: # not shared
config["env_config"]["attacker_addr_s"] = nset * nway
config["env_config"]["attacker_addr_e"] = 2 * nset * nway - 1
config["env_config"]["flush_inst"] = False
elif nopt == 2: # all + clflush allowed
config["env_config"]["attacker_addr_s"] = 0
config["env_config"]["attacker_addr_e"] = 2 * nset * nway - 1
config["env_config"]["flush_inst"] = False
elif nopt == 3: # all + clflush not allowed
config["env_config"]["attacker_addr_s"] = 0
config["env_config"]["attacker_addr_e"] = 2 * nset * nway - 1
config["env_config"]["flush_inst"] = True
#print(config)
#exit(0)
elif len(sys.argv)!= 2:
print("not correct number of argument. Exit!!!")
exit(-1)
else:
print("(warning) config file not specified! use default configrations!")
#tune.run(PPOTrainer, config=config)#config={"env": 'Freeway-v0', "num_gpus":1})
from ray.tune.logger import pretty_print
#tune.register_env("cache_guessing_game_env_fix", CacheSimulatorMultiGuessWrapper)
#from run_gym_rllib_simd import *
#config['num_workers'] = 6
#config['num_envs_per_worker']= 2
print(config)
env = CacheGuessingGameEnv(config["env_config"])
#env = CacheSimulatorMultiGuessWrapper(config["env_config"])
trainer = PPOTrainer(config=config)
#trainer = SACTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
i = checkpoint.rfind('/')
config_name = checkpoint[0:i] + '/../env.config'
print("env config saved ad ", config_name)
#### dump the binary config file
###with open(config_name, 'wb') as handle:
### pickle.dump(config["env_config"], handle)
#### dump the txt config file
###with open(config_name + '.txt', 'w') as txtfile:
### txtfile.write(json.dumps(config["env_config"]))
policy = trainer.get_policy()
for model in policy.past_models:
print(model.state_dict()['_hidden_layers.1._model.0.weight'], protocol=pickle.HIGHEST_PROTOCOL)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
i = 0
thre =0.95 #0.98
#buf = []
all_raw_patterns = []
all_categorized_patterns = []
while True:
# Perform one iteration of training the policy with PPO
result = trainer.train()
print(pretty_print(result))
i += 1
if i % 1 == 0: # give enought interval to achieve small verificaiton overhead
accuracy, patterns = replay_agent(trainer, env, randomize_init=True, non_deterministic=True)
if i == 1:
checkpoint = trainer.save()
print("Initial checkpoint saved at", checkpoint)
i = checkpoint.rfind('/')
config_name = checkpoint[0:i] + '/../env.config'
print("env config saved ad ", config_name)
# dump the binary config file
with open(config_name, 'wb') as handle:
pickle.dump(config["env_config"], handle)
# dump the txt config file
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(config["env_config"])
with open(config_name + '.txt', 'w') as txtfile:
#txtfile.write(pp.pprint(config["env_config"]))
txtfile.write(json.dumps(config, indent=4, sort_keys=True))
# just with lower reward
# HOW TO PREVENT THE SAME AGENT FROM BEING ADDED TWICE????
# HOW TO TELL IF THEY ARE CONSIDERED THE SAME AGENT?
# HOW TO FORCE TRAINER TO KNOW THAT THEY ARE STILL DISCOVERING THE SAME AGENT???
if accuracy > thre:
# if the agent is different from the known agent
policy = trainer.get_policy()
if policy.existing_agent(env, trainer) == False:
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
# this agent might have high accuracy but
# it ccould be that it is still the same agent
# add this agent to blacklist
trainer.get_policy().push_current_model()
#buf.append(copy.deepcopy(trainer.get_weights()))
policy = trainer.get_policy()
for model in policy.past_models:
print(model.state_dict()['_hidden_layers.1._model.0.weight'])
#for weight in policy.past_weights:
# print(weight['_value_branch._model.0.bias'])
#print(weight['default_policy']['_value_branch._model.0.bias'])
#print(policy.model.state_dict()['_hidden_layers.1._model.0.weight'])
#for w in buf:
# print(w['default_policy']['_value_branch._model.0.bias']) | AutoCAT-main | src/rllib/run_gym_rllib_agent_blacklist.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import cache_guessing_game_env_impl as env
import sys
import pandas as pd
from pandas.core.arrays import numeric
#def number_of_set(x):
# return x%2 #number_of_set = 2
# suppose "dummy" is a dual list of 7 for example. Will be relaced later
dummy = [[1, 0, 0, 0, 0], [3, 0, 0, 0, 0], [4, 0, 0, 0, 0], [1, 0, 0, 0, 0], [5, 0, 0, 0, 0], [0, 0, 1, 0, 0], [3, 0, 0, 0, 0]]
def read_file(): # will read the files in json. I have just left this function to read 'temp.txt'
f = open('/home/geunbae/CacheSimulator/src/temp.txt', mode='r', encoding='UTF-8')
d=f.read()
d
return d
def parser_action(): # split the input into [(attacker's)addr, is_guess, is_victim, is_flush, victim_addr]
input = pd.DataFrame(dummy)
input = input.astype('int')
input.columns =['addr', 'is_guess', 'is_victim', 'is_flush', 'victim_addr']
#input['set'] = 2
#input['set'] = input['addr']%2
input.assign(set = lambda x: (x['addr'])%2)
#input['set'] = input['addr'].apply(number_of_set(x))
def get_set(): # return addr%number_of_set
#input2 = pd.DataFrame(input)
#input2 = input2.astype('int')
#input2 = input.assign(set = 2)
#input[:,'set'] = 2
#input2[:,'set'] = 2
#input2['set'] = input2['addr'].apply(lambda x: x% 2)
#input2['set'] = input2[0].apply(lambda x: x% 2)
#input['set'] = input['addr'].apply(lambda x: x% 2)
#input['set'] = input.columns=['addr']% 2
#input2['set'] = input2[0]% 2
#input2['set'] = input2[0].apply(lambda x: x% 2)
#input2['set'] = input2[0].apply(lambda x: x% 2)
#input['set'] = input.apply(number_of_set)
pass
get_set()
def get_order(): # return (addr)/number_of_set
input_set0 = input[input.set==0]
input_set0['order'] = input_set0['addr'].rank(method='dense',ascending=False).astype(int)
print(input_set0)
input_set1 = input[input.set==1]
input_set1['order'] = input_set1['addr'].rank(method='dense',ascending=True).astype(int)
print(input_set1)
frames = [input_set0, input_set1]
result = pd.concat(frames)
output = pd.DataFrame(result)
output =output.sort_index(axis=0, ascending=True)
get_order()
def rename_addr(): # rename the addres in the pattern based on the set and the order appeared in the pattern
# output = [#set, #the order the address appear in the attack, is_guess, is_victim, is_flush, victim_addr]
output = output[['set','order','is_guess', 'is_victim', 'is_flush', 'victim_addr']]
return output
def remove(): # remove repeated access
return output.drop_duplicates()
print(output)
# Defining main function
def main():
number_of_set(x)
read_file()
parser_action()
get_set()
get_order()
rename_addr()
remove()
# Using the special variable
# __name__
if __name__=="__main__":
main() | AutoCAT-main | src/rllib/categorization.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
| AutoCAT-main | src/rllib/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.10
Usage: wrapper fucntion to solve the import issues
'''
import sys
import os
import gym
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#sys.path.append(os.path.dirname(os.path.abspath(__file__)))
#sys.path.append('../src')
from cache_guessing_game_env_impl import CacheGuessingGameEnv
from cchunter_wrapper import CCHunterWrapper
from cyclone_wrapper import CycloneWrapper
class CacheGuessingGameEnvWrapper(CacheGuessingGameEnv):
pass
class CycloneWrapperWrapper(CycloneWrapper):
pass
class CCHunterWrapperWrapper(CCHunterWrapper):
pass | AutoCAT-main | src/rllib/cache_guessing_game_env_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.12
Usage: wrapper for cachequery that interact with the gym environment
the observation space and action space should be the same as the original autocat
'''
from collections import deque
import signal
import numpy as np
import random
import os
import yaml, logging
import sys
from itertools import permutations
import gym
from gym import spaces
import os, cmd, sys, getopt, re, subprocess, configparser
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
#sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))+ '/third_party/cachequery/tool/')
from cache_query_wrapper import CacheQueryWrapper as CacheQuery
class CacheQueryEnv(gym.Env):
def __init__(self, env_config):
#sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
self.env = CacheGuessingGameEnv(env_config)
self.action_space_size = self.env.action_space.n + 1 # increase the action space by one
self.action_space = spaces.Discrete(self.action_space_size)
self.observation_space = self.env.observation_space
self.revealed = False # initially
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
'''
instantiate the CacheQuery
'''
# flags
output = None
verbose = False
interactive = False
# options
config_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))+ '/third_party/cachequery/tool/cachequery.ini' # default path
batch = None
# config overwrite
cacheset = None
level = None
cacheset='34'
level = 'L2' # for 4-way cache
# read config
try:
config = configparser.ConfigParser()
config.read(config_path)
# add method for dynamic cache check
def cache(self, prop):
return self.get(self.get('General', 'level'), prop)
def set_cache(self, prop, val):
return self.set(self.get('General', 'level'), prop, val)
setattr(configparser.ConfigParser, 'cache', cache)
setattr(configparser.ConfigParser, 'set_cache', set_cache)
except:
print("[!] Error: invalid config file")
sys.exit(1)
# overwrite options
if level:
config.set('General', 'level', level)
if cacheset:
config.set_cache('set', cacheset)
if output:
config.set('General', 'log_file', output)
# instantiate cq
self.CQ = CacheQuery(config)
self.cq_command = "A B C D E F G H A B" #establish the address alphabet to number mapping
def reset(self):
self.revealed = False # reset the revealed
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
#reset CacheQuery Command
self.cq_command = "A B C D E F G H A B"
return state
def step(self, action):
if action == self.action_space_size - 1:
if self.revealed == True:
self.env.vprint("double reveal! terminated!")
state, reward, done, info = self.last_unmasked_tuple
reward = self.env.wrong_reward
done = True
return state, reward, done, info
self.revealed = True
# return the revealed obs, reward,# return the revealed obs, reward,
state, reward, done, info = self.last_unmasked_tuple
reward = 0 # reveal action does not cost anything
self.env.vprint("reveal observation")
# when doing reveal, launch the actual cachequery
#self.CQ.command(self.cq_command)
answer = self.CQ.run(self.cq_command)[0]
#print(answer)
if answer != None:
lat_cq = answer.split()[answer.split().index('->')+1:]
lat_cq_cnt = len(lat_cq) - 1
for i in range(len(state)):
if state[i][0] != 2 and lat_cq_cnt >= 0:
if int(lat_cq[lat_cq_cnt]) > 50: # hit
state[i][0] = 0
else: # miss
state[i][0] = 1
lat_cq_cnt -= 1
print(state)
return state, reward, done, info
elif action < self.action_space_size - 1: # this time the action must be smaller than sction_space_size -1
tmpaction = self.env.parse_action(action)
address = hex(tmpaction[0]+self.env.attacker_address_min)[2:] # attacker address in attacker_address_space
is_guess = tmpaction[1] # check whether to guess or not
is_victim = tmpaction[2] # check whether to invoke victim
is_flush = tmpaction[3] # check whether to flush
victim_addr = hex(tmpaction[4] + self.env.victim_address_min)[2:] # victim address
# need to check if revealed first
# if revealed, must make a guess
# if not revealed can do any thing
if self.revealed == True:
if is_guess == 0: # revealed but not guess # huge penalty
self.env.vprint("reveal but no guess! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
elif is_guess != 0: # this must be guess and terminate
done = True
#return self.env.step(action)
if int(victim_addr,16) == self.env.victim_address:
reward = self.env.correct_reward
else:
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
elif self.revealed == False:
if is_guess != 0:
# guess without revewl --> huge penalty
self.env.vprint("guess without reward! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
else:
state, reward, done, info = self.env.step(action)
# append to the cq_command
if is_victim == True:
self.cq_command += (' ' + chr(ord('A') + self.env.victim_address))
elif is_flush == True:
self.cq_command += (' ' + chr(ord('A') + int(address, 16)) + '!')
else:
self.cq_command += (' ' + chr(ord('A') + int(address, 16)) + '?')
self.last_unmasked_tuple = ( state.copy(), reward, done, info )
# mask the state so that nothing is revealed
state[:,0] = - np.ones((state.shape[0],)) # use -1 as the default (unrevealed value)
#print(state)
return state, reward, done, info
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheQueryEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"prefetcher": "nextline",
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 0,
"attacker_addr_e": 7,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 3,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
# for L2 cache of Intel i7-6700
# it is a 4-way cache, this should not be changed
"cache_1": {#required
"blocks": 4,#4,
"associativity": 4,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/cache_query_env.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": True,
"attacker_addr_s": 0,
"attacker_addr_e": 8,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 0,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 4,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_example.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# using ray 1.92 to run
# python 3.9
from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy
from ray.rllib.agents.a3c.a3c_torch_policy import A3CTorchPolicy
from ray.rllib.agents.a3c.a2c import A2CTrainer
from ray.rllib.agents.ppo import PPOTrainer
import gym
import ray.tune as tune
from torch.nn import functional as F
from typing import Optional, Dict
import torch.nn as nn
import ray
from collections import deque
#from ray.rllib.agents.ppo.ppo_torch_policy import ValueNetworkMixin
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.evaluation.postprocessing import compute_gae_for_sample_batch, \
Postprocessing
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
#from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import Deprecated
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, sequence_mask
from ray.rllib.utils.typing import TrainerConfigDict, TensorType, \
PolicyID, LocalOptimizer
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
import copy
import numpy as np
import sys
import math
sys.path.append("../src")
torch, nn = try_import_torch()
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
from categorization_parser import *
def custom_init(policy: Policy, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, config: TrainerConfigDict)->None:
#pass
policy.past_len = 5
policy.past_models = deque(maxlen =policy.past_len)
policy.timestep = 0
def copy_model(model: ModelV2) -> ModelV2:
copdied_model= TorchModelV2(
obs_space = model.obs_space,
action_space = model.action_space,
num_outputs = model.num_outputs,
model_config = model.model_config,
name = 'copied')
return copied_model
def compute_div_loss(policy: Policy, model: ModelV2,
dist_class: ActionDistribution,
train_batch: SampleBatch):
#original_weight = copy.deepcopy(policy.get_weights())
logits, _ = model.from_batch(train_batch)
values = model.value_function()
valid_mask = torch.ones_like(values, dtype=torch.bool)
dist = dist_class(logits, model)
#log_probs = dist.logp(train_batch[SampleBatch.ACTIONS])#.reshape(-1)
print('log_probs')
#print(log_probs)
divs = []
#div_metric = nn.KLDivLoss(size_average=False, reduce=False)
div_metric = nn.KLDivLoss(reduction = 'batchmean')
#div_metric = nn.CrossEntropyLoss()
#if len(policy.past_models) > 1:
# assert(policy.past_models[0].state_dict() == policy.past_models[1].state_dict())
for idx, past_model in enumerate(policy.past_models):
#for idx, past_weights in enumerate(policy.past_weights):
#temp_policy = pickle.loads(pickle.dumps(policy))
#temp_policy.set_weights(past_weights)
#temp_model = pickle.loads(pickle.dumps(policy.model))
#temp_model.load_state_dict(past_weights)
#past_model.load_state_dict(policy.past_weights[i])
#past_model = temp_model.set_weights(past_weights)
#assert(False)
past_logits, _ = past_model.from_batch(train_batch)
past_values = past_model.value_function()
past_valid_mask = torch.ones_like(past_values, dtype=torch.bool)
past_dist = dist_class(train_batch[SampleBatch.ACTION_DIST_INPUTS], past_model)
div = math.atan( - policy.timestep_array[idx] + policy.timestep ) * math.exp( ( policy.timestep_array[idx] - policy.timestep ) / policy.timestep_array[idx]) * dist.kl(past_dist)
###print(div)
###print(dist)
###print(past_dist)
###print(train_batch[SampleBatch.ACTION_DIST_INPUTS])
#print(train_batch[SampleBatch.ACTIONS])
#print(log_probs)
#print(past_log_probs)
#print(train_batch[Postprocessing.ADVANTAGES])
#print(log_probs * train_batch[Postprocessing.ADVANTAGES])
#print(past_log_probs * train_batch[Postprocessing.ADVANTAGES])
#div = dist.multi_kl(past_dist) * train_batch[Postprocessing.ADVANTAGES]
#assert(
if idx == 0 and True:#policy.timestep % 10 == 0:
print('past_model.state_dict()')
#print(past_model.state_dict())
print('model.state_dict()')
#print(model.state_dict())
#div = past_dist.multi_kl(dist)
print('div')
#print(div)
div = div.sum().mean(0)
divs.append(div)
print('divs')
#print(divs)
div_loss = 0
div_loss_orig = 0
for div in divs:
div_loss += div
div_loss_orig += div
if len(policy.past_models) > 0:
div_loss = div_loss / len(policy.past_models)#policy.past_len
print('len(policy.past_models)')
print(len(policy.past_models))
#policy.set_weights(original_weight)
return div_loss
def compute_div_loss_weight(policy: Policy, weight,
dist_class: ActionDistribution,
train_batch: SampleBatch):
original_weight = copy.deepcopy(policy.get_weights())
policy.set_weights(weight)
model = policy.model
logits, _ = model.from_batch(train_batch)
values = model.value_function()
valid_mask = torch.ones_like(values, dtype=torch.bool)
dist = dist_class(logits, model)
log_probs = dist.logp(train_batch[SampleBatch.ACTIONS])#.reshape(-1)
print('log_probs')
#print(log_probs)
divs = []
div_metric = nn.KLDivLoss(size_average=False, reduce=False)
#div_metric = nn.CrossEntropyLoss()
#if len(policy.past_models) > 1:
# assert(policy.past_models[0].state_dict() == policy.past_models[1].state_dict())
for idx, past_weight in enumerate(policy.past_weights):
#assert(False)
policy.set_weights(past_weight)
past_model = policy.model
past_logits, _ = past_model.from_batch(train_batch)
past_values = past_model.value_function()
past_valid_mask = torch.ones_like(past_values, dtype=torch.bool)
past_dist = dist_class(past_logits, past_model)
past_log_probs = past_dist.logp(train_batch[SampleBatch.ACTIONS])#.reshape(-1)
div = div_metric(log_probs * train_batch[Postprocessing.ADVANTAGES], past_log_probs* train_batch[Postprocessing.ADVANTAGES])
#div = div_metric(log_probs, past_log_probs) * train_batch[Postprocessing.ADVANTAGES]
#div = dist.multi_kl(past_dist) * train_batch[Postprocessing.ADVANTAGES]
#assert(
if idx == 0 and True:#policy.timestep % 10 == 0:
print('past_model.state_dict()')
#print(past_model.state_dict())
print('model.state_dict()')
#print(model.state_dict())
#div = past_dist.multi_kl(dist)
print('div')
#print(div)
div = div.mean(0)
divs.append(div)
print('divs')
#print(divs)
div_loss = 0
div_loss_orig = 0
for div in divs:
div_loss += div
div_loss_orig += div
if len(policy.past_weights) > 0:
div_loss = div_loss / len(policy.past_weights)#policy.past_len
#print('len(policy.past_weights)')
#print(len(policy.past_weights))
#policy.set_weights(original_weight)
return div_loss
import pickle
def custom_loss(policy: Policy, model: ModelV2,
dist_class: ActionDistribution,
train_batch: SampleBatch) -> TensorType:
logits, _ = model.from_batch(train_batch)
values = model.value_function()
policy.timestep += 1
#if len(policy.devices) > 1:
# copy weights of main model (tower-0) to all other towers type
if policy.timestep % 100 == 0:
copied_model = pickle.loads(pickle.dumps(model))
copied_model.load_state_dict(model.state_dict())
policy.past_models.append(copied_model)
if policy.is_recurrent():
B = len(train_batch[SampleBatch.SEQ_LENS])
max_seq_len = logits.shape[0] // B
mask_orig = sequence_mask(train_batch[SampleBatch.SEQ_LENS],
max_seq_len)
valid_mask = torch.reshape(mask_orig, [-1])
else:
valid_mask = torch.ones_like(values, dtype=torch.bool)
dist = dist_class(logits, model)
log_probs = dist.logp(train_batch[SampleBatch.ACTIONS]).reshape(-1)
#print('log_probs')
#print(log_probs)
pi_err = -torch.sum(
torch.masked_select(log_probs * train_batch[Postprocessing.ADVANTAGES],
valid_mask))
# Compute a value function loss.
if policy.config["use_critic"]:
value_err = 0.5 * torch.sum(
torch.pow(
torch.masked_select(
values.reshape(-1) -
train_batch[Postprocessing.VALUE_TARGETS], valid_mask),
2.0))
# Ignore the value function.
else:
value_err = 0.0
entropy = torch.sum(torch.masked_select(dist.entropy(), valid_mask))
div_loss = compute_div_loss(policy, model, dist_class, train_batch)
total_loss = (pi_err + value_err * policy.config["vf_loss_coeff"] -
entropy * policy.config["entropy_coeff"] - 1000 * div_loss )
print('pi_err')
#print(pi_err)
print('value_err')
#print(value_err)
print('div_loss')
print(div_loss)
print('pi_err')
print(pi_err)
print('total_loss')
print(total_loss)
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["entropy"] = entropy
model.tower_stats["pi_err"] = pi_err
model.tower_stats["value_err"] = value_err
return total_loss
CustomPolicy = A3CTorchPolicy.with_updates(
name="MyCustomA3CTorchPolicy",
loss_fn=custom_loss,
#make_model= make_model,
before_init=custom_init)
CustomTrainer = A2CTrainer.with_updates(
get_policy_class=lambda _: CustomPolicy)
#PPOCustomPolicy = PPOTorchPolicy.with_updates(
# name="MyCustomA3CTorchPolicy",
# loss_fn=custom_loss,
# #make_model= make_model,
# before_init=custom_init)
from typing import Dict, List, Type, Union
from ray.rllib.utils.annotations import override
class CustomPPOTorchPolicy(PPOTorchPolicy):
def __init__(self, observation_space, action_space, config):
self.past_len = 10
#self.categorization_parser = CategorizationParser()
self.past_models = deque(maxlen =self.past_len)
#self.past_weights = deque(maxlen= self.past_len)
self.timestep = 0
self.timestep_array = deque(maxlen=self.past_len)
super(CustomPPOTorchPolicy, self).__init__(observation_space, action_space, config)
#@override(PPOTorchPolicy)
def loss(self, model: ModelV2, dist_class: Type[ActionDistribution],
train_batch: SampleBatch, extern_trigger = True ) -> Union[TensorType, List[TensorType]]:
#return custom_loss(self, model, dist_class, train_batch)
self.timestep += 1
if self.timestep % 20 == 0 and extern_trigger == False:
copied_model = pickle.loads(pickle.dumps(model))
copied_model.load_state_dict(model.state_dict())
self.past_models.append(copied_model)
total_loss = PPOTorchPolicy.loss(self, model, dist_class, train_batch)
#self.past_len
div_loss = 0 #compute_div_loss(self, model, dist_class, train_batch)
#div_loss = compute_div_loss_weight(self, copy.deepcopy(self.get_weights()), dist_class, train_batch)
print('total_loss')
print(total_loss)
print('div_loss')
print(div_loss)
#assert(False)
ret_loss = total_loss - 0.03 * div_loss
return ret_loss
'''
new_loss = []
if issubclass(type(total_loss),TensorType):
return total_loss - compute_div_loss(self, model, dist_class, train_batch)
else:
for loss in total_loss:
new_loss.append(loss - compute_div_loss(self, model, dist_class, train_batch))
return new_loss
'''
def replay_agent(self, env):
# no cache randomization
# rangomized inference ( 10 times)
pattern_buffer = []
num_guess = 0
num_correct = 0
for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
for repeat in range(1):
obs = env.reset(victim_address=victim_addr)
action_buffer = []
done = False
while done == False:
print(f"-> Sending observation {obs}")
action = self.compute_single_action(obs, explore=False) # randomized inference
print(f"<- Received response {action}")
obs, reward, done, info = env.step(action)
action_buffer.append((action, obs[0]))
if reward > 0:
correct = True
num_correct += 1
else:
correct = False
num_guess += 1
pattern_buffer.append((victim_addr, action_buffer, correct))
pprint.pprint(pattern_buffer)
return 1.0 * num_correct / num_guess, pattern_buffer
def push_current_model(self):
#print('len(self.past_weights)')
#print(len(self.past_weights))
model = pickle.loads(pickle.dumps(self.model))
model.load_state_dict(copy.deepcopy(self.model.state_dict()))
self.past_models.append(model)
self.timestep_array.append(self.timestep)
#self.past_weights.append(copy.deepcopy(self.get_weights()))
#self.past_weights.append(copy.deepcopy(agent.get_weights()))
return
#TODO(Mulong): is there an standard initialization condition???
#def is_same_agent(self, weight1, weight2, env, trainer):
def is_same_agent(self, model1, model2, env, trainer):
categorization_parser = CategorizationParser(env)
original_state_dict = copy.deepcopy(self.model.state_dict())
#original_weights = copy.deepcopy(self.get_weights())
for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
obs = env.reset(victim_address=victim_addr)
#from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
#pp = trainer.workers.local_worker().preprocessors[DEFAULT_POLICY_ID]
#obs = pp.transform(obs)
done = False
#while done == False:
# self.model.load_state_dict(model1.state_dict())
# #self.set_weights(weight1)
# action1 = trainer.compute_single_action(obs, explore=False) # randomized inference
# self.model.load_state_dict(model2.state_dict())
# #self.set_weights(weight2)
# action2 = trainer.compute_single_action(obs, explore=False) # randomized inference
# if action1 != action2:
# self.model.load_state_dict(original_state_dict)
# #self.set_weights(original_weights)
# return False
# else:
# action = action1
# obs, reward, done, info = env.step(action)
seq1 = []
while done == False:
self.model.load_state_dict(model1.state_dict())
action1 = trainer.compute_single_action(obs, explore=False) # randomized inference
seq1.append(action1)
obs, reward, done, info = env.step(action1)
seq2 = []
while done == False:
self.model.load_state_dict(model2.state_dict())
action2 = trainer.compute_single_action(obs, explore=False) # randomized inference
seq1.append(action2)
obs, reward, done, info = env.step(action2)
if categorization_parser.is_same_base_pattern(seq1, seq2) == False:
return False
self.model.load_state_dict(original_state_dict)
#self.set_weights(original_weights)
return True
def existing_agent(self, env, trainer):
print('existing_agent')
current_model = pickle.loads(pickle.dumps(self.model))
#current_weights = copy.deepcopy(self.get_weights())
#current_model.load_state_dict(self.model.state_dict())
for idx, past_model in enumerate(self.past_models):
#for idx, past_weights in enumerate(self.past_weights):
print(idx)
if self.is_same_agent(current_model, past_model, env, trainer):
#if self.is_same_agent(current_weights, past_weights, env, trainer):
return True
return False
PPOCustomTrainer = PPOTrainer.with_updates(
get_policy_class=lambda _: CustomPPOTorchPolicy)
import models.dnn_model
#tune.run(CustomTrainer, config={"env": 'Frostbite-v0', "num_gpus":0})#, 'model': { 'custom_model': 'test_model' }})
tune.register_env("cache_guessing_game_env_fix", CacheGuessingGameEnv)#Fix)
# Two ways of training
# method 2b
config = {
'env': 'cache_guessing_game_env_fix', #'cache_simulator_diversity_wrapper',
"evaluation_num_workers": 1,
"evaluation_interval": 5,
'env_config': {
'verbose': 1,
"force_victim_hit": False,
'flush_inst': False,#True,
"allow_victim_multi_access": True, #False,
"attacker_addr_s": 0,
"attacker_addr_e": 15,
"victim_addr_s": 0,
"victim_addr_e": 7,
"reset_limit": 1,
"length_violation_reward": -1,
"double_victim_access_reward": -0.001, # must be large value if not allow victim multi access
"victim_access_reward": -0.001,
"correct_reward": 0.02,
"wrong_reward": -1,
"step_reward": -0.001,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 8,
"associativity": 8,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
'lr': 1e-3, # decrease lr if unstable
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
### 'custom_model': 'dnn_model',#'rnn',
### 'custom_model_config': {
### 'window_size': 40, #16, #need to match
### 'latency_dim': 3,
### 'victim_acc_dim': 2,
### 'action_dim': 200, # need to be precise
### 'step_dim': 80,#40, # need to be precise
### 'action_embed_dim': 32,#,8, # can be increased 32
### 'step_embed_dim': 6,#4, # can be increased less than 16
### 'hidden_dim': 32,
### 'num_blocks': 1
### }
},
'framework': 'torch',
}
if __name__ == "__main__":
tune.run(PPOCustomTrainer, config=config)#config={"env": 'Freeway-v0', "num_gpus":1})
| AutoCAT-main | src/rllib/test_custom_policy_diversity_works.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 4,
"attacker_addr_e": 7,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 3,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_1_core_2": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_2": {
"blocks": 4,
"associativity": 1,
"hit_time": 16,
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_example_multicore.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 8,
"attacker_addr_e": 23,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 7,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_1_core_2": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_2": {
"blocks": 16,
"associativity": 2,
"hit_time": 16,
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_example_multicore_largel3.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import sys
import pandas as pd
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
from pandas.core.arrays import numeric
class CategorizationParser:
def __init__(self, number_of_set=2, attacker_address_range_max=8):
self.attacker_address_range_max = attacker_address_range_max #not include, the max value of addresses
self.number_of_set = number_of_set
def __init__(self, env=None):
self.gameenv = env
def _get_set(self, row):
"""return set number"""
return row['addr']%self.number_of_set
def readfile(self,filename):# python categorization_parser.py temp.txt
patterns=[]
f = open(filename, mode='r', encoding='UTF-8')
lines = f.readlines()
for l in lines:
l = l.split()
l = [int(i) for i in l]
patterns.append(l)
return patterns
def parse_action(self, action):
#gameenv = CacheGuessingGameEnv()
action = self.gameenv.parse_action(action)
return action
def convert_dataframe(self, input): # split into [(attacker's)addr, is_guess, is_victim, is_flush, victim_addr]
df = pd.DataFrame(input)
df = df.astype('int')
df.columns =['addr', 'is_guess', 'is_victim', 'is_flush', 'victim_addr']
return df
def add_set_column(self, df):
df['set'] = df.apply (lambda row: self._get_set(row), axis=1)
return df
def _make_order(self, df, col_name):
"""return the order of each element in df[col_name]"""
order = [-1 for i in range(self.attacker_address_range_max)] # could be further optimzed if min address is not 0
cnt = 0
for index, row in df.iterrows():
value = row[col_name]
if order[value] == -1:
order[value] = cnt
cnt = cnt + 1
#print(f'order = {order}')
return order
def _get_order(self, row, col_name, order):
"""return the order of each element in df[col_name]"""
return order[row[col_name]]
def rename_column(self,df, col_name):
"""rename the column based on the order the item appear in the column"""
order = self._make_order(df, col_name)
new_col_name = col_name + '_renamed'
df[new_col_name] = df.apply (lambda row: self._get_order(row, col_name, order), axis=1)
return df
def is_same_action_df(self, action1, action2):
if action1['is_victim'] == action2['is_victim'] and action1['is_victim'] == 1: # If both are is_victim==true, ignore rest of the columns
return True
if action1['is_victim'] != action2['is_victim']:
return False
if action1['is_guess'] == action2['is_guess'] and action1['is_guess'] == 1: # If both are is_guess==true, ignore rest of the columns
return True
if action1['is_guess'] != action2['is_guess']:
return False
if action1['addr_renamed'] == action2['addr_renamed'] and action1['set_renamed']== action2['set_renamed']: # else match the address and set
return True
return False
def remove_rep(self, df):
"""remove contiguous repeated access"""
for index_i, row in df.iterrows():
if index_i != 0:
if self.is_same_action_df(last_row, row):
df = df.drop(index=index_i, axis=0)
last_row=row
return df
def is_same_action_list(self, action1, action2):
""" action format [is_guess, is_victim, is_flush, victim_addr, addr_renamed, set_renamed]"""
if action1[1] == action2[1] and action1[1] == 1: # If both are is_victim==true, ignore rest of the columns
return True
if action1[1] != action2[1]: # is_victim is different
return False
if action1[0] == action2[0] and action1[0] == 1: # If both are is_guess==true, ignore rest of the columns
return True
if action1[0] == action2[0]: # is_guess is different
return False
if action1[4] == action2[4] and action1[5]== action2[5]: # else match the address and set
return True
return False
def is_same_base_pattern(self,pattern1, pattern2):
""" return whether two patterns after renaming is the same"""
if len(pattern1) != len(pattern2):
return False
for i in range(len(pattern1)):
if self.is_same_action_list(pattern1[i],pattern2[i]) == False:
return False
return True
def main_parser(self, pattern):
"""output a pattern after renaming,
format [is_guess, is_victim, is_flush, victim_addr, addr_renamed, set_renamed]"""
pattern_parsed = []
for action in pattern :
action_parsed = self.parse_action(action)
pattern_parsed.append(action_parsed)
df = self.convert_dataframe(pattern_parsed)
print(df)
df = self.add_set_column(df)
df = self.rename_column(df, 'addr') # rename address
df = self.rename_column(df, 'set') # rename set
print(df)
df = self.remove_rep(df) #remove repeated action
df = df.drop(columns=['addr', 'set'], axis=1)
print(df)
output = df.values.tolist()
return output
def main(argv): # Defining main function
filename = argv[1]
print(filename)
categorization_parser = CategorizationParser()
patterns = categorization_parser.readfile(filename)
print(patterns)
base_pattern = categorization_parser.main_parser(patterns[0])
print(base_pattern)
#for pattern in patterns :
# base_pattern = categorization_parser.main_parser(pattern)
if __name__=="__main__": # Using the special variable
main(sys.argv) | AutoCAT-main | src/rllib/categorization_parser.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# author: Mulong Luo
# usage: process the json file plotted by rllib
import json
from matplotlib import pyplot as plt
import numpy as np
import sys
import math
#pathname = '/home/mulong/ray_results/PPO_cache_guessing_game_env_fix_2022-03-30_09-03-46wrptlf7f'
assert(len(sys.argv) == 2)
pathname = '/home/geunbae/ray_results/' + sys.argv[1]
pathname += '/'
filename = pathname + '/result.json'
configname = pathname + '/params.json'
f = open(filename)
config = json.load(open(configname))
correct_reward = config['env_config']['correct_reward']
wrong_reward = config['env_config']['wrong_reward']
step_reward = config['env_config']['step_reward']
episode_reward_mean = []
episode_len_mean = []
num_steps_sampled = []
time_total_s = []
correct_rate_threshold = 0.95
data = f.readline()
while data:
data=json.loads(data)
episode_reward_mean.append(data['episode_reward_mean'])
episode_len_mean.append(data['episode_len_mean'])
num_steps_sampled.append(data['info']['num_steps_sampled'])
time_total_s.append(data['time_total_s'])
data = f.readline()
f.close()
# estimating the reward based on the following funciton
# episode_reward_reward = p * correct_reward + ( 1 - p ) * wrong_reward + ( episode_len_mean - 1 ) * step_reward
# thus p = (episode_reward_mean - wrong_reward - (episode_len_mean - 1) * step_reward) / ( correct_reward - wrong_reward )
correct_rate = []
for i in range(0, len(episode_reward_mean)):
p = (episode_reward_mean[i] - wrong_reward - (episode_len_mean[i] - 1) * step_reward) / ( correct_reward - wrong_reward )
correct_rate.append(p)
# find out the coverge_time and coverge_steps
i = 0
while i < len(correct_rate):
if correct_rate[i] > correct_rate_threshold:
break
i += 1
if i == len(correct_rate):
converge_time = math.nan
converge_steps = math.nan
else:
converge_time = time_total_s[i]
converge_steps = num_steps_sampled[i]
#plotting
#print(correct_rate)
#pathname = ''
plt.plot(num_steps_sampled, correct_rate)
plt.ylim(0,1)
plt.axhline(y=correct_rate_threshold, color='r', linestyle='-')
plt.xlim(left=0)
plt.xlabel('num_steps_sampled')
plt.ylabel('correct_rate')
plt.text(0, correct_rate_threshold - 0.1, 'converge_steps ='+str(converge_steps), color='r')
plt.grid(True)
plt.savefig(pathname + 'correct_rate_steps.png')
plt.close()
plt.plot(time_total_s, correct_rate)
plt.ylim(0,1)
plt.axhline(y=correct_rate_threshold, color='r', linestyle='-')
plt.xlim(left=0)
plt.xlabel('time_total_s')
plt.ylabel('correct_rate')
plt.text(0, correct_rate_threshold - 0.1, 'converge_time ='+str(converge_time), color='r')
plt.grid(True)
plt.savefig(pathname + 'correct_rate_time.png')
plt.close()
plt.plot(num_steps_sampled, episode_len_mean)
#plt.ylim(0,1)
converge_len=np.average(np.array(episode_len_mean[len(episode_len_mean)-100::len(episode_len_mean)-1]))
plt.axhline(y=converge_len, color='r', linestyle='-')
plt.text(0, correct_rate_threshold - 0.1, 'coverge_len ='+str(converge_len), color ='r')
plt.xlim(left=0)
plt.xlabel('num_steps_sampled')
plt.ylabel('episode_len_mean')
plt.grid(True)
plt.savefig(pathname + 'len_steps.png')
plt.close()
if converge_steps == math.nan:
converge_len = math.nan
print(str(converge_steps)+ ' ' + str(converge_time) + ' ' + str(converge_len))
| AutoCAT-main | src/rllib/process_record.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': True,
"allow_victim_multi_access": False,
"allow_empty_victim_access": False,
"attacker_addr_s": 0,
"attacker_addr_e": 3,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 3,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"rep_policy": "lru"
#"prefetcher": "nextline"
},
##"cache_1_core_2": {#required
## "blocks": 4,#4,
## "associativity": 1,
## "hit_time": 1, #cycles
## "prefetcher": "nextline"
##},
##"cache_2": {
## "blocks": 4,
## "associativity": 1,
## "hit_time": 16,
##},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train()
| AutoCAT-main | src/rllib/run_gym_rllib_example_multicore_flush.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 8,
"attacker_addr_e": 23,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 7,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_1_core_2": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_2": {
"blocks": 16,
"associativity": 2,
"hit_time": 16,
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | AutoCAT-main | src/rllib/run_gym_rllib_example_multicore_largel2.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
baseline_attack=[
0.03511984,
0.01022458,
0.11334784,
0.01202186,
0.02987794,
0.13556209,
0.07939993,
0.16500453,
0.17601161,
0.13473269,
0.15670964,
0.20633484,
0.23557083,
0.23359248,
0.23104890,
0.24362913,
0.24728261,
0.23643975,
0.24783550,
0.24437646,
0.25376656,
0.30623209,
0.34565488,
0.39174696,
0.41054716,
0.51179543,
0.67646532,
0.77440347,
0.84251969,
0.83644189,
0.82610274,
0.73526762,
0.65524702,
0.64337437,
0.48680882,
0.42476432,
0.42444606,
0.37932277,
0.34550360,
0.34787350,
0.31874439,
0.43236410,
0.80618893,
0.90441969,
0.90622214,
0.89821009,
0.89463292,
0.90063120,
0.89780497,
0.89733909,
0.90239971,
0.90200645,
0.90165392,
0.89677078,
0.89704824,
0.89854290,
0.90708044,
0.90394265,
0.90708044,
0.90627240,
0.90166279,
0.89674987,
0.90884718,
0.88950474,
0.86285097,
0.90988893,
0.91219599,
0.90659440,
0.90439462,
0.91699605,
0.88116673,
0.90368120,
0.90907444,
0.89090909,
0.90922062,
0.87500000,
0.90933866,
0.90714799,
0.87893594,
0.86880203,
0.87982756,
0.88706512,
0.90402309,
0.90210167,
0.90341011,
0.91535433,
0.89671867,
0.92821159,
0.89663805,
0.90790415,
0.91018286,
0.89252925,
0.89607349,
0.90305566,
0.90962312,
0.90638066,
0.90054545,
0.91430131,
0.87517883,
0.90473628
]
baseline_correct=[
0.17003057,
0.26112113,
0.36617593,
0.80079364,
0.83162248,
0.96034767,
0.97504492,
0.69745265,
0.89153894,
0.92732785,
0.94235275,
0.94593234,
0.89202204,
0.90819753,
0.91204853,
0.92484351,
0.92874372,
0.94292784,
0.94527474,
0.94597342,
0.94904447,
0.95465319,
0.95844457,
0.96174563,
0.96596844,
0.97051572,
0.97200700,
0.97203498,
0.97435360,
0.97981619,
0.98085362,
0.98440445,
0.98390276,
0.98243678,
0.98590628,
0.98819512,
0.98592214,
0.98514818,
0.98547970,
0.98320738,
0.98784575,
0.98477673,
0.97976684,
0.98268033,
0.98673281,
0.98516250,
0.98183617,
0.98435883,
0.98450603,
0.98959949,
0.98563428,
0.98864385,
0.98359483,
0.99001871,
0.98767029,
0.98543679,
0.98417436,
0.98310969,
0.98617189,
0.98688590,
0.97820682,
0.96949028,
0.94629435,
0.91744240,
0.94476702,
0.94148899,
0.94421695,
0.96886579,
0.95716285,
0.92035446,
0.92037878,
0.95178133,
0.97063129,
0.94932518,
0.96362125,
0.96804477,
0.96131675,
0.95154591,
0.94700002,
0.93731166,
0.94028843,
0.95962645,
0.93830837,
0.96319019,
0.96619115,
0.95473074,
0.96797761,
0.96588566,
0.96378715,
0.92592031,
0.94713073,
0.89421868,
0.95348555,
0.95734321,
0.95691816,
0.96012618,
0.96672242,
0.97520229,
0.94821455,
0.96289019
]
baseline_guess=[
14.34237683,
0.28099324,
1.10482408,
1.66684882,
1.42740026,
0.97997796,
1.03073546,
1.35793291,
1.29377608,
1.53265160,
1.86362811,
2.18787330,
3.11145287,
3.84632074,
4.86057081,
5.65244894,
6.30054348,
7.12322534,
7.85786436,
8.96940795,
10.02160102,
11.24874642,
12.56815355,
13.73150336,
15.22246220,
17.35872501,
20.07062198,
22.75723066,
25.24284180,
27.38432568,
29.80177891,
32.14128672,
33.06599351,
33.41401159,
34.19298880,
34.36022480,
34.50871776,
34.72658501,
34.84604317,
34.91348601,
34.80681614,
34.98085606,
35.96796960,
36.85088034,
36.88251025,
36.89929488,
36.87919584,
37.20685302,
37.14465635,
37.14365336,
37.10100287,
36.99104264,
37.10955006,
37.19622375,
37.25287977,
37.25813995,
37.21562333,
37.24175627,
37.11093276,
37.12867384,
36.65367424,
35.78057102,
36.05737265,
36.03432863,
34.90568754,
35.17663920,
35.72335479,
35.84444444,
36.03623318,
36.59665828,
35.55797623,
35.30861330,
36.34269154,
36.35175518,
35.93401106,
35.67611191,
36.22801599,
36.18606322,
35.70104958,
34.81831343,
35.20244297,
35.97769848,
36.38950027,
35.76594216,
36.43099447,
36.15819613,
36.01667563,
36.26808204,
35.98783977,
36.86319742,
35.12065256,
37.37137714,
36.11491354,
35.19334303,
35.70892792,
34.70696237,
35.72345455,
35.94850801,
35.03379828,
35.57319699
]
penalty_100_guess=[
29.50417933,
1.22344945,
0.75267437,
1.13464353,
1.08614153,
1.02332847,
1.15135022,
1.13255523,
1.02014120,
3.49820789,
1.07637432,
1.28426501,
1.27505155,
1.50063803,
1.65667230,
2.10797238,
2.49229795,
2.94014085,
3.36892596,
3.49624844,
3.56889932,
3.78941930,
3.87910479,
4.03514310,
4.57831840,
4.85875235,
5.12046931,
5.27399873,
5.70023124,
5.82654757,
6.13146233,
5.42238115,
5.59886887,
6.17286516,
6.46755162,
6.33142615,
6.35857143,
6.77089005,
7.22490470,
7.48215785,
7.62136326,
7.28035228,
7.30361702,
7.53855932,
7.57863874,
7.99914475,
8.27098271,
8.69726316,
8.35211864,
8.97658578,
9.33263202,
9.03434638,
9.16421412,
9.87647059,
11.08275572,
11.27029848,
12.34637588,
12.83673469,
13.50062267,
14.56401674,
14.94607843,
15.10694066,
15.79119258,
15.86199575,
16.29361883,
16.05272991,
16.76428869,
16.90456961,
17.13671875,
17.38062500,
17.64564438,
18.02278481,
17.94679315,
17.86673640,
18.26410256,
18.16972768,
18.18759170,
18.13489736,
18.39682875,
18.72902424,
18.25108047,
18.64890480,
18.96144934,
19.36812264,
18.89331919,
19.09357720,
19.28116656,
19.16133751,
18.86193086,
19.01802375,
18.98377704,
18.67985012,
18.72397611,
18.79231259,
19.20927318,
19.13312303,
19.48020113,
18.91074035,
19.57536842,
19.56521739,
19.18431621,
19.42294807,
19.22913153,
19.69543254,
19.53860601,
19.63589212,
19.70178384,
20.17845762,
19.92748489,
20.14783704,
20.59461166,
20.54669465,
20.77027592,
20.28511706,
20.71045972,
20.99154870,
21.47674915,
21.32629356,
21.57679325,
21.90462307,
21.73862679,
21.97312860,
21.76212569,
22.54166667,
22.09745763,
22.96579392,
22.76663191,
22.81576211,
22.78846154,
22.65538396,
23.44877436,
23.20168421,
23.18796199,
23.41472624,
23.19478624,
23.48477850,
24.28125000,
23.82924820,
24.04847472,
23.63428087,
24.08384372,
23.47174963,
24.07647059,
24.16207627,
24.62218084,
24.66561381,
24.45597815,
24.42048346,
24.54391821,
]
penalty_100_correct_rate=[
0.13757245,
0.22070691,
0.29907239,
0.61505162,
0.87329470,
0.90569559,
0.88720057,
0.97077224,
0.96172481,
0.92807427,
0.96560375,
0.93698818,
0.94918277,
0.93351697,
0.93408320,
0.90740492,
0.90219904,
0.90162322,
0.87831753,
0.88323177,
0.90669821,
0.90580084,
0.89196975,
0.90775231,
0.91618298,
0.91656244,
0.91732303,
0.91940618,
0.91861134,
0.91802211,
0.91777248,
0.93283100,
0.93771301,
0.92839511,
0.93743350,
0.93022563,
0.92496090,
0.94122153,
0.92643793,
0.93103427,
0.92845866,
0.93866879,
0.93329447,
0.93110663,
0.92859257,
0.94114582,
0.94611516,
0.94032850,
0.93971612,
0.94585564,
0.93393985,
0.93055444,
0.94348841,
0.93450596,
0.93681816,
0.93938515,
0.93995660,
0.93742645,
0.94090190,
0.94634112,
0.94466373,
0.94722140,
0.94918749,
0.94615934,
0.94665908,
0.95625397,
0.95736304,
0.94756137,
0.95296479,
0.95371451,
0.96250719,
0.95489270,
0.96019256,
0.96098124,
0.96428483,
0.95641637,
0.96215396,
0.95164828,
0.96128709,
0.96173180,
0.96050836,
0.96684761,
0.95735910,
0.96188616,
0.96332256,
0.95761918,
0.95727710,
0.96268587,
0.95846854,
0.96538839,
0.95921116,
0.95781530,
0.96320020,
0.96043409,
0.95988230,
0.96421904,
0.95105871,
0.96649123,
0.95428144,
0.95959083,
0.95786789,
0.95899402,
0.96298041,
0.95859683,
0.95841701,
0.95960966,
0.95874214,
0.95174934,
0.95451041,
0.95000610,
0.95768994,
0.95972187,
0.95544440,
0.94972269,
0.95216930,
0.95615790,
0.94646905,
0.94872856,
0.94970367,
0.95551362,
0.95996366,
0.95006704,
0.95082290,
0.95863353,
0.95562813,
0.95886373,
0.94392633,
0.94478859,
0.95116343,
0.95064296,
0.95480776,
0.95378959,
0.95051417,
0.95379620,
0.94546511,
0.96155770,
0.95195943,
0.95762728,
0.95516956,
0.95966776,
0.95482917,
0.95403618,
0.95376471,
0.95781742,
0.95715434,
0.95797879,
0.95463553,
0.94413048,
0.95308607,
]
penalty_100_attack=[
0.05547112,
0.00807137,
0.00000000,
0.00104537,
0.00021061,
0.00062487,
0.00000000,
0.00020842,
0.00062292,
0.00021084,
0.00272765,
0.00144928,
0.00103093,
0.00233943,
0.00168919,
0.00355723,
0.00506436,
0.00600663,
0.00959333,
0.00771155,
0.00810580,
0.01467245,
0.01338632,
0.00462963,
0.00874947,
0.00897142,
0.00817096,
0.01165501,
0.00988018,
0.00727046,
0.01139481,
0.01135886,
0.00544617,
0.00851860,
0.01264223,
0.01652893,
0.01102041,
0.00942408,
0.01164761,
0.01154492,
0.01475478,
0.01488782,
0.01617021,
0.01334746,
0.01654450,
0.01282874,
0.01138760,
0.01536842,
0.01588983,
0.01170711,
0.02251210,
0.02019151,
0.01364897,
0.01239496,
0.01512287,
0.01607180,
0.01368399,
0.02374011,
0.00996264,
0.02238494,
0.01960784,
0.01698469,
0.01938475,
0.01528662,
0.02098792,
0.01764584,
0.02085941,
0.02529224,
0.02097039,
0.02354167,
0.02193630,
0.02426160,
0.02309755,
0.02154812,
0.02799145,
0.02448807,
0.02578076,
0.01968999,
0.01902748,
0.02838202,
0.02490224,
0.01790227,
0.02254055,
0.02603948,
0.02672322,
0.02637176,
0.01910043,
0.02549634,
0.02255481,
0.02311281,
0.03223794,
0.02435470,
0.03114334,
0.02229773,
0.02882206,
0.02018927,
0.03079824,
0.02398332,
0.02989474,
0.02596032,
0.03677869,
0.02617253,
0.02487352,
0.02694170,
0.02337229,
0.02282158,
0.02896118,
0.03016784,
0.03313190,
0.02036959,
0.02189013,
0.02518363,
0.03010033,
0.02445652,
0.02889076,
0.01922671,
0.02303754,
0.02703273,
0.01898734,
0.03019575,
0.02464195,
0.02729793,
0.02066639,
0.03287270,
0.02139831,
0.02871622,
0.02377477,
0.01809108,
0.03486898,
0.01882801,
0.02723654,
0.02315789,
0.02492080,
0.02643172,
0.02398332,
0.01658619,
0.03378378,
0.02677652,
0.03113247,
0.02291048,
0.03041183,
0.02121403,
0.02436975,
0.02161017,
0.02876060,
0.03305959,
0.02500525,
0.02353690,
0.02482787
] | AutoCAT-main | src/cyclone_data/plot.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import argparse
import math
import json
import re
from datetime import datetime
from typing import Any, Dict, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
JSON_REGEX = re.compile("{.+}")
def parse_json(line: str) -> Optional[Dict[str, Any]]:
m = JSON_REGEX.search(line)
return None if m is None else json.loads(m.group())
def get_value(val: Union[float, Dict[str, float]]) -> float:
return val["mean"] if isinstance(val, dict) else val
def plot(log_file: str,
phase: str,
xkey: str,
ykey: str,
fig_file: Optional[str] = None) -> None:
x = []
y = []
with open(log_file, "r") as f:
line = f.readline()
# cfg = parse_json(line)
for line in f:
stats = parse_json(line)
if stats is None:
continue
cur_phase = stats.get("phase", None)
if cur_phase == phase:
x.append(get_value(stats[xkey]))
y.append(get_value(stats[ykey]))
# y.append(math.log(get_value(stats[ykey])))
# y.append(get_value(stats["gap"]) / get_value(stats["episode_length"]))
x = np.array(x)
y = np.array(y)
plt.plot(x, y, label=ykey)
plt.xlabel(xkey)
plt.ylabel(ykey)
plt.legend()
if fig_file is not None:
plt.savefig(fig_file)
else:
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", type=str, help="log file to plot")
parser.add_argument("--phase",
default="Eval",
type=str,
help="phase to plot.")
parser.add_argument("--xkey",
default="epoch",
type=str,
help="x values to plot.")
parser.add_argument("--ykey",
default="episode_return",
type=str,
help="y values to plot.")
parser.add_argument("--fig_file",
default=None,
type=str,
help="figure file to save.")
flags = parser.parse_intermixed_args()
plot(flags.log_file, flags.phase, flags.xkey, flags.ykey, flags.fig_file)
if __name__ == "__main__":
main()
| AutoCAT-main | src/cyclone_data/draw_figure.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
from cache_ppo_mlp_model import CachePPOMlpModel
from cache_ppo_lstm_model import CachePPOLstmModel
from cache_ppo_transformer_model import CachePPOTransformerModel
def get_model(cfg: Dict[str, Any],
window_size: int,
output_dim: int,
checkpoint: Optional[str] = None) -> nn.Module:
cfg.args.step_dim = window_size
if "window_size" in cfg.args:
cfg.args.window_size = window_size
cfg.args.output_dim = output_dim
model = None
if cfg.type == "mlp":
model = CachePPOMlpModel(**cfg.args)
elif cfg.type == "lstm":
model = CachePPOLstmModel(**cfg.args)
elif cfg.type == "transformer":
model = CachePPOTransformerModel(**cfg.args)
if model is not None and checkpoint is not None:
params = torch.load(checkpoint)
model.load_state_dict(params)
return model
| AutoCAT-main | src/rlmeta/model_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
from typing import Dict, Optional, Sequence
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
cyclone_attack = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
cyclone_attack += timestep.info.get("cyclone_attack", 0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"cyclone_attack": cyclone_attack,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env, agent, victim_addr=victim_addr)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env, agent, victim_addr=-1)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cyclone.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvWrapperFactory
from metric_callbacks import MetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_attack")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = MetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvWrapperFactory(OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes, keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| AutoCAT-main | src/rlmeta/train_ppo_attack.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Author: Mulong Luo
# date: 2022.6.28
# usage: to train the svm classifier of cycloen by feeding
# the date from TextbookAgent as malicious traces
# and spec traces for benign traces
import logging
from typing import Dict
import hydra
import torch
import torch.nn
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append("/home/mulong/RL_SCA/src/CacheSimulator/src")
import rlmeta.utils.nested_utils as nested_utils
import numpy as np
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
from textbook_attacker import TextbookAgent
# from cache_guessing_game_env_impl import CacheGuessingGameEnv
# from cchunter_wrapper import CCHunterWrapper
from cache_env_wrapper import CacheEnvWrapperFactory, CacheEnvCycloneWrapperFactory
from cyclone_wrapper import CycloneWrapper
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
class SpecAgent():
def __init__(self, env_config, trace_file):
self.local_step = 0
self.lat = []
self.no_prime = False # set to true after first prime
if "cache_configs" in env_config:
#self.logger.info('Load config from JSON')
self.configs = env_config["cache_configs"]
self.num_ways = self.configs['cache_1']['associativity']
self.cache_size = self.configs['cache_1']['blocks']
attacker_addr_s = env_config["attacker_addr_s"] if "attacker_addr_s" in env_config else 4
attacker_addr_e = env_config["attacker_addr_e"] if "attacker_addr_e" in env_config else 7
victim_addr_s = env_config["victim_addr_s"] if "victim_addr_s" in env_config else 0
victim_addr_e = env_config["victim_addr_e"] if "victim_addr_e" in env_config else 3
flush_inst = env_config["flush_inst"] if "flush_inst" in env_config else False
self.allow_empty_victim_access = env_config["allow_empty_victim_access"] if "allow_empty_victim_access" in env_config else False
assert(self.num_ways == 1) # currently only support direct-map cache
assert(flush_inst == False) # do not allow flush instruction
assert(attacker_addr_e - attacker_addr_s == victim_addr_e - victim_addr_s ) # address space must be shared
#must be no shared address space
assert( ( attacker_addr_e + 1 == victim_addr_s ) or ( victim_addr_e + 1 == attacker_addr_s ) )
assert(self.allow_empty_victim_access == False)
self.trace_file = trace_file
# load the data SPEC bengin traces
self.fp = open(self.trace_file)
line = self.fp.readline().split()
self.domain_id_0 = line[0]
self.domain_id_1 = line[0]
line = self.fp.readline().split()
while line != '':
self.domain_id_1 = line[0]
if self.domain_id_1 != self.domain_id_0:
break
line = self.fp.readline().split()
self.fp.close()
self.fp = open(self.trace_file)
def act(self, timestep):
info = {}
line = self.fp.readline().split()
if len(line) == 0:
action = self.cache_size
addr = 0#addr % self.cache_size
info={"file_done" : True}
return action, info
domain_id = line[0]
cache_line_size = 8
addr = int( int(line[3], 16) / cache_line_size )
print(addr)
if domain_id == self.domain_id_0: # attacker access
action = addr % self.cache_size
info ={}
else: # domain_id = self.domain_id_1: # victim access
action = self.cache_size
addr = addr % self.cache_size
info={"reset_victim_addr": True, "victim_addr": addr}
return action, info
@hydra.main(config_path="./config", config_name="sample_cyclone")
def main(cfg):
repeat = 80000
trace_file = '/home/mulong/remix3.txt'
svm_data_path = 'autocat.svm.txt' #trace_file + '.svm.txt'
#create env
cfg.env_config['verbose'] = 1
# generate dataset for malicious traces
cfg.env_config['cyclone_collect_data'] = True
cfg.env_config['cyclone_malicious_trace'] = True
env_fac = CacheEnvCycloneWrapperFactory(cfg.env_config)
env = env_fac(index=0)
env.svm_data_path = svm_data_path
fp = open(svm_data_path,'w')
fp.close()
agent = TextbookAgent(cfg.env_config)
episode_length = 0
episode_return = 0.0
for i in range(repeat):
timestep = env.reset()
num_guess = 0
num_correct = 0
while not timestep.done:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep.observation.unsqueeze_(0)
action, info = agent.act(timestep)
action = Action(action, info)
# unbatch the action
victim_addr = env._env.victim_address
timestep = env.step(action)
obs, reward, done, info = timestep
if "guess_correct" in info:
num_guess += 1
if info["guess_correct"]:
print(f"victim_address! {victim_addr} correct guess! {info['guess_correct']}")
num_correct += 1
else:
correct = False
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
env.reset(save_data=True) # save data to file
# generate benign traces
'''
cfg.env_config['cyclone_collect_data'] = True
cfg.env_config['cyclone_malicious_trace'] = False
env_fac = CacheEnvCycloneWrapperFactory(cfg.env_config)
env = env_fac(index=0)
print("mix.txt opened!")
agent = SpecAgent(cfg.env_config, trace_file)
episode_length = 0
episode_return = 0.0
file_done = False
# generate dataset for benign traces
iter = 0
while not file_done:
#for i in range(repeat):
timestep = env.reset()
num_guess = 0
num_correct = 0
done = False
count = 0
iter += 1
while not done:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep.observation.unsqueeze_(0)
action, info = agent.act(timestep)
if "file_done" in info:
file_done = True
break
if "victim_addr" in info:
print(info["victim_addr"])
#env.set_victim(info["victim_addr"])
env._env.set_victim(info["victim_addr"])
action = Action(action, info)
else:
action = Action(action, info)
# unbatch the action
victim_addr = env._env.victim_address
timestep = env.step(action)
obs, reward, done, info = timestep
count += 1
#if count % 10 == 0:
#action = Action(agent.cache_size * 2, {})
#timestep = env.step(action)
#obs, reward, done, info = timestep
if count == 160:
action = Action(agent.cache_size * 2, {})
timestep = env.step(action)
obs, reward, done, info = timestep
done = True
count = 0
#if "guess_correct" in info:
# num_guess += 1
# if info["guess_correct"]:
# print(f"victim_address! {victim_addr} correct guess! {info['guess_correct']}")
# num_correct += 1
# else:
# correct = False
#agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
env.reset(save_data=True) # save data to file
'''
#cfg.env_config['cyclone_malicious_trace'] = False
#env_fac = CacheEnvCCHunterWrapperFactory(cfg.env_config)
#env = env_fac(index=0)
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/cyclone_svm_trainer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(
env: Env,
agent: PPOAgent,
victim_addr: int = -1,
threshold: Union[float, Sequence[float]] = 0.75) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
autocorr_n = (env.env.env._env.cache_size *
env.env.env.cc_hunter_check_length)
max_ac = max_autocorr(env.env.cc_hunter_history, autocorr_n)
if isinstance(threshold, float):
threshold = (threshold, )
detect = [max_ac >= t for t in threshold]
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"max_autocorr": max_ac,
}
for t, d in zip(threshold, detect):
metrics[f"detect_rate-{t}"] = d
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False,
threshold: Union[float, Sequence[float]] = 0.75) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env,
agent,
cfg.num_episodes,
cfg.seed,
threshold=cfg.threshold)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cchunter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
class MetricCallbacks(EpisodeCallbacks):
def __init__(self):
super().__init__()
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
info = timestep.info
if info["is_guess"]:
self._custom_metrics["correct_rate"] = float(info["guess_correct"])
class CCHunterMetricCallbacks(EpisodeCallbacks):
def __init__(self):
super().__init__()
def on_episode_start(self, index: int) -> None:
self.tot_guess = 0
self.acc_guess = 0
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
info = timestep.info
if info["is_guess"]:
self.tot_guess += 1
self.acc_guess += int(info["guess_correct"])
if timestep.terminated or timestep.truncated:
self._custom_metrics["total_guess"] = self.tot_guess
if self.tot_guess > 0:
self._custom_metrics[
"correct_rate"] = self.acc_guess / self.tot_guess
if "cc_hunter_attack" in info:
self._custom_metrics["cc_hunter_attack"] = float(
info["cc_hunter_attack"])
class CycloneMetricCallbacks(EpisodeCallbacks):
def __init__(self):
super().__init__()
def on_episode_start(self, index: int) -> None:
self.tot_guess = 0
self.acc_guess = 0
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
info = timestep.info
if info["is_guess"]:
self.tot_guess += 1
self.acc_guess += int(info["guess_correct"])
if timestep.terminated or timestep.truncated:
self._custom_metrics["total_guess"] = self.tot_guess
if self.tot_guess > 0:
self._custom_metrics[
"correct_rate"] = self.acc_guess / self.tot_guess
if "cyclone_attack" in info:
self._custom_metrics["cyclone_attack"] = float(
info["cyclone_attack"])
| AutoCAT-main | src/rlmeta/metric_callbacks.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
class CachePPOTransformerModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
# self.window_size = window_size
self.action_embed_dim = action_embed_dim
self.step_embed_dim = step_embed_dim
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
# self.linear_o = nn.Linear(self.hidden_dim * self.window_size,
# self.hidden_dim)
encoder_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim,
nhead=8,
dropout=0.0)
self.encoder = nn.TransformerEncoder(encoder_layer, self.num_layers)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
def make_one_hot(self, src: torch.Tensor, num_classes: int,
mask: torch.Tensor) -> torch.Tensor:
# mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor, embed: nn.Embedding,
mask: torch.Tensor) -> torch.Tensor:
# mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
l, v, act, stp = torch.unbind(obs, dim=-1)
mask = (stp == -1)
l = self.make_one_hot(l, self.latency_dim, mask)
v = self.make_one_hot(v, self.victim_acc_dim, mask)
act = self.make_embedding(act, self.action_embed, mask)
stp = self.make_embedding(stp, self.step_embed, mask)
x = torch.cat((l, v, act, stp), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
h = self.encoder(x)
h = h.mean(dim=0)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| AutoCAT-main | src/rlmeta/cache_ppo_transformer_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from cProfile import label
from tkinter import font
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
fontaxes = {
'family': 'Arial',
'color': 'black',
'weight': 'bold',
'size': 8,
}
fontaxes_title = {
'family': 'Arial',
'color': 'black',
'weight': 'bold',
'size': 9,
}
lsmarkersize = 2.5
lslinewidth = 0.6
plt.figure(num=None, figsize=(3.5, 1.5), dpi=200, facecolor='w')
plt.subplots_adjust(right = 0.99, top =0.90, bottom=0.24, left=0.15, wspace=0.2, hspace=0.2)
# Without CCHunter, generated with python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess97_cchunter100/ppo_agent-53.pth
# trace4 = [0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 2, 1, 2]
# trace3 = [0, 0, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 2, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 2]
# trace2 = [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2]
# trace1 = [1, 2, 1, 2, 0, 0, 0, 1, 2, 0, 1, 2]
# trace4= [1, 1, 2, 0, 1, 2, 0, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 0, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 0, 0, 0, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
# trace3= [1, 0, 2, 0, 0, 0, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 0, 0, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 2, 0, 0, 2]
# trace2= [1, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 0, 2, 0, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 2, 0, 1, 2]
# trace1= [1, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 0, 2, 1, 2, 0, 0, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 0, 2, 1, 2, 0, 2, 0, 0, 0, 1, 2, 0, 1, 2, 1, 2]
# Without CCHUnter, generated with python sample_cchunter.py c
# heckpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess99_cchunter100/ppo_agent-338.pth
trace4 = [1, 0, 2, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 2, 1, 2, 0, 2, 0, 1, 2, 2, 1, 2, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 2, 0, 2]
trace3 = [1, 0, 2, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2]
trace2 = [1, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 2, 0, 1, 2, 1, 2, 1, 0, 2, 1, 2, 0, 2, 1, 2]
trace1 = [0, 1, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 2, 0, 2]
# With CCHUnter, generated with python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess95_cchunter0/ppo_agent-699.pth
# trace4= [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace3= [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace2= [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace1= [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# With CCHUnter, nondeterministic, generated with python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess95_cchunter0/ppo_agent-699.pth
# trace4 = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace3 = [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace2 = [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace1 = [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace = trace1 + trace2 + trace3 + trace4
trace = trace1
# With CCHunter
# ctrace4 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
# ctrace3 = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
# ctrace2 = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2]
# ctrace1 = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2]
# With CCHunter
# ctrace4 = [1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 0, 0, 1, 2, 1, 2, 2, 1, 2, 2, 0, 1, 2, 2, 1, 0, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 1, 0, 0, 2]
# ctrace3 = [1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 2, 0, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 0, 0, 1, 2, 0, 0, 2, 2, 0, 0, 1, 2, 2, 1, 0, 2, 2, 1, 0, 0, 2, 2, 1, 0, 0, 2, 2, 2, 2, 1, 0, 2, 2]
# ctrace2 = [1, 2, 0, 0, 0, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 1, 2, 0, 2, 1, 2, 2, 1, 2, 0, 1, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 2, 2, 1, 0, 0, 2, 2, 2, 2, 0, 1, 2, 2]
# ctrace1 = [1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 0, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 0, 0, 1, 2, 0, 2, 2, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 2, 0, 0, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 0, 1, 2, 1, 0, 2, 2, 0, 0, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 2, 2]
# [0, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 0, 2, 0, 1, 2, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 0, 2, 0, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 2, 0, 1, 2, 1, 2, 0, 0, 2, 0, 0, 1, 2, 2, 0, 2]
# ctrace = ctrace1 + ctrace2 + ctrace3 + ctrace4
# With CCHunter, deterministic, 0.13333333333333333: python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess99_cchunter11/ppo_agent-458.pth
ctrace4 = [1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 0, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 0, 1, 2, 2, 1, 2, 1, 0, 1, 2, 2, 1, 0, 0, 2, 2, 2, 2, 1, 0, 0, 2]
ctrace3 = [0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 0, 2, 2, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 0, 0, 2, 2, 1, 2, 0, 1, 2, 1, 2, 2, 1, 2, 0, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 0, 0, 2, 2, 1, 0, 0, 2, 2, 2, 2, 1, 0, 2, 2]
ctrace2 = [0, 2, 0, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 2, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 1, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 2, 0, 1, 2, 0, 1, 2, 1, 0, 2, 2, 1, 0, 0, 2, 2, 0, 0, 0, 2, 2, 2, 2, 1, 0, 0, 2]
ctrace1 = [0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 1, 2, 2, 0, 1, 2, 0, 0, 2, 2, 0, 0, 1, 2, 2, 1, 0, 2, 2, 1, 0, 1, 2, 2, 1, 0, 0, 2, 2, 2, 2, 0, 0, 1, 2]
ctrace = ctrace1
mask = [i != 2 for i in trace]
trace_lean = [i for i, v in zip(trace, mask) if v]
mask = [i != 2 for i in ctrace]
ctrace_lean = [i for i, v in zip(ctrace, mask) if v]
def calculate_autocorrelation_coefficients(x, lags):
"""
Calculate the autocorrelation coefficients for the given data and lags.
"""
# n = len(x)
series = pd.Series([i[0] for i in x])
# print("Series is:\n", series)
# print("series correlation:\n",series.autocorr())
# data = np.asarray(x)
# print(data)
# x_mean = np.mean(data)
# y_mean = np.mean(data)
# rho = np.zeros(lags)
# for lag in range(0, lags):
# x_m = data[:-lag]
# y_m = data[lag:]
# x_m -= x_mean
# y_m -= y_mean
# rho[lag] = np.sum(x_m * y_m) / (n - lag)
return series.autocorr(lags)
def autocorrelation_plot_forked(series, ax=None, n_lags=None, change_deno=False, change_core=False, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
n_lags: maximum number of lags to show. Default is len(series)
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n_full = len(series)
if n_full <= 2:
raise ValueError("""len(series) = %i but should be > 2
to maintain at least 2 points of intersection when autocorrelating
with lags"""%n_full)
# Calculate the maximum number of lags permissible
# Subtract 2 to keep at least 2 points of intersection,
# otherwise pandas.Series.autocorr will throw a warning about insufficient
# degrees of freedom
n_maxlags = n_full - 2
# calculate the actual number of lags
if n_lags is None:
# Choosing a reasonable number of lags varies between datasets,
# but if the data longer than 200 points, limit this to 100 lags as a
# reasonable default for plotting when n_lags is not specified
n_lags = min(n_maxlags, 100)
else:
if n_lags > n_maxlags:
raise ValueError("n_lags should be < %i (i.e. len(series)-2)"%n_maxlags)
if ax is None:
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0), label=label)
if not change_core:
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n_full)
def r(h):
deno = n_full if not change_deno else (n_full - h)
return ((data[:n_full - h] - mean) *
(data[h:] - mean)).sum() / float(deno) / c0
else:
def r(h):
return series.autocorr(lag=h)
x = np.arange(n_lags) + 1
# y = lmap(r, x)
y = np.array([r(xi) for xi in x])
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=0.8, linestyle='--', color='grey')
# ax.axhline(y=z95 / np.sqrt(n_full), color='grey')
ax.axhline(y=0.0, color='black')
# ax.axhline(y=-z95 / np.sqrt(n_full), color='grey')
# ax.axhline(y=-z99 / np.sqrt(n_full), linestyle='--', color='grey')
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
data = pd.Series(trace)
cdata = pd.Series(ctrace)
# data = pd.Series(trace_lean)
# cdata = pd.Series(ctrace_lean)
ax = autocorrelation_plot_forked(data, n_lags=len(data)-2, change_deno=True, label='Baseline')
autocorrelation_plot_forked(cdata, ax = ax, n_lags=len(cdata)-2, change_deno=True, label='With Autocorrelation Based Detection Penalty')
plt.tick_params(labelsize=6)
ax.set_xlabel("Lag (p)",fontdict = fontaxes)
ax.set_ylabel("Autocorrelation",fontdict = fontaxes)
ax.legend(prop={'size': 6})
plt.savefig('cchunter_hit_trace_{}_acf.pdf'.format(0)) | AutoCAT-main | src/rlmeta/cchunter_plot.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from textbook_attacker import TextbookAgent
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
# act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(
env: Env,
agent: PPOAgent,
victim_addr: int = -1,
threshold: Union[float, Sequence[float]] = 0.75) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
autocorr_n = (env.env.env._env.cache_size *
env.env.env.cc_hunter_check_length)
max_ac = max_autocorr(env.env.cc_hunter_history, autocorr_n)
if isinstance(threshold, float):
threshold = (threshold, )
detect = [max_ac >= t for t in threshold]
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"max_autocorr": max_ac,
}
for t, d in zip(threshold, detect):
metrics[f"detect_rate-{t}"] = d
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False,
threshold: Union[float, Sequence[float]] = 0.75) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
### Load model
##model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
## env.action_space.n, cfg.checkpoint)
##model.eval()
# Create agent
#agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
agent = TextbookAgent(cfg.env_config)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cchunter_textbook.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# script for plotting figure on paper
import logging
from typing import Dict
#import hydra
#import torch
#import torch.nn
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append("/home/mulong/RL_SCA/src/CacheSimulator/src")
#import rlmeta.utils.nested_utils as nested_utils
import numpy as np
#from rlmeta.agents.ppo.ppo_agent import PPOAgent
#from rlmeta.core.types import Action
#from rlmeta.envs.env import Env
#from rlmeta.utils.stats_dict import StatsDict
#from cchunter_wrapper import CCHunterWrapper
#from cache_env_wrapper import CacheEnvWrapperFactory
#from cache_ppo_model import CachePPOModel
#from cache_ppo_transformer_model import CachePPOTransformerModel
#from textbook_attacker import TextbookAgent
# from cache_guessing_game_env_impl import CacheGuessingGameEnv
# from cchunter_wrapper import CCHunterWrapper
#from cache_env_wrapper import CacheEnvWrapperFactory
#from cache_ppo_model import CachePPOModel
#from cache_ppo_transformer_model import CachePPOTransformerModel
#from cache_ppo_transformer_periodic_model import CachePPOTransformerPeriodicModel
import matplotlib.pyplot as plt
import pandas as pd
#from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
import matplotlib.font_manager as font_manager
from autocorrelation import autocorrelation
fontaxes = {
'family': 'Arial',
# 'color': 'black',
'weight': 'bold',
#'size': 6,
}
fontaxes_title = {
'family': 'Arial',
# 'color': 'black',
'weight': 'bold',
# 'size': 9,
}
font = font_manager.FontProperties(family='Arial',
weight='bold',
style='normal')
def autocorrelation_plot_forked(series, ax=None, n_lags=None, change_deno=False, change_core=False, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
n_lags: maximum number of lags to show. Default is len(series)
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n_full = len(series)
if n_full <= 2:
raise ValueError("""len(series) = %i but should be > 2
to maintain at least 2 points of intersection when autocorrelating
with lags"""%n_full)
# Calculate the maximum number of lags permissible
# Subtract 2 to keep at least 2 points of intersection,
# otherwise pandas.Series.autocorr will throw a warning about insufficient
# degrees of freedom
n_maxlags = n_full #- 2
# calculate the actual number of lags
if n_lags is None:
# Choosing a reasonable number of lags varies between datasets,
# but if the data longer than 200 points, limit this to 100 lags as a
# reasonable default for plotting when n_lags is not specified
n_lags = min(n_maxlags, 100)
else:
if n_lags > n_maxlags:
raise ValueError("n_lags should be < %i (i.e. len(series)-2)"%n_maxlags)
if ax is None:
ax = plt.gca(xlim=(0, n_lags), ylim=(-1.1, 1.6))
if not change_core:
data = np.asarray(series)
def r(h: int) -> float:
return autocorrelation(data, h)
else:
def r(h):
return series.autocorr(lag=h)
# x = np.arange(n_lags) + 1
x = np.arange(n_lags)
# y = lmap(r, x)
y = np.array([r(xi) for xi in x])
print(y)
print(f"y = {y}")
print(f"y_max = {np.max(y[1:])}")
z95 = 1.959963984540054
z99 = 2.5758293035489004
# ax.axhline(y=-z95 / np.sqrt(n_full), color='grey')
# ax.axhline(y=-z99 / np.sqrt(n_full), linestyle='--', color='grey')
ax.set_xlabel("Lag (p)", fontdict = fontaxes)
ax.set_ylabel("Autocorrelation \n Coefficient", fontdict = fontaxes)
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
def main():
plt.figure(num=None, figsize=(5, 2), dpi=300, facecolor='w')
series_human = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
#series_baseline = [1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# sampled from python sample_cchunter.py checkpoint=/home/ml2558/CacheSimulator/src/rlmeta/data/table8/hpca_ae_exp_8_baseline_new/exp1/ppo_agent-499.pth num_episodes=1
series_baseline = [0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
#series_l2 = [0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1]
# sampled from python sample_cchunter.py checkpoint=/home/ml2558/CacheSimulator/src/rlmeta/data/table8/hpca_ae_exp_8_autocor_new/exp1/ppo_agent-499.pth num_episodes=1
series_l2 = [0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0]
#series_l2 = [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1]
for i in range(0, len(series_baseline)):
series_baseline[i] += 1.2
for i in range(0, len(series_l2)):
series_l2[i] += 2.4
series_human = series_human[0:50]
series_baseline = series_baseline[0:50]
series_l2 = series_l2[0:50]
ax = plt.subplot(121)
ax.set_xlim([0, 48] )
ax.set_ylim([-0.1, 3.7])
ax.set_yticks([])
plt.tick_params(left=False)
text_x = -10
ax.text(text_x, 0.15, 'A->V', fontproperties=font)
ax.text(text_x, 0.85, 'V->A', fontproperties=font)
ax.text(text_x, 0.15+1.2, 'A->V',fontproperties=font)
ax.text(text_x, 0.85+1.2, 'V->A',fontproperties=font)
ax.text(text_x, 0.15+2.4, 'A->V', fontproperties=font)
ax.text(text_x, 0.85+2.4, 'V->A',fontproperties=font)
#ax.set_xlim([0, 60])
ax.plot(series_human)#, linewidth=4 )
ax.plot(series_baseline)
ax.plot(series_l2)
ax.set_xlabel("Number of cache conflicts", fontdict = fontaxes)
ax.legend(prop={'size': 6, 'family': 'Arial', 'weight':'bold'})
ax.legend(['textbook', 'RL_baseline', 'RL_autocor'], ncol=3,bbox_to_anchor=(2.2,1.28), prop=font)
data_human = pd.Series(series_human)
data_baseline = pd.Series(series_baseline)
data_l2 = pd.Series(series_l2)
cache_size = 4
#plt.figure(num=None, figsize=(5.2, 2), dpi=300, facecolor='w')
#plt.subplots_adjust(right = 0.98, top =0.97, bottom=0.24,left=0.13,wspace=0, hspace=0.2)
ax = plt.subplot(122)
autocorrelation_plot_forked(data_human,ax=ax, n_lags= 8 * cache_size, change_deno=True) #consider removing -2
autocorrelation_plot_forked(data_baseline, ax=ax,n_lags= 8 * cache_size, change_deno=True) #consider removing -2
autocorrelation_plot_forked(data_l2, ax=ax, n_lags= 8 * cache_size, change_deno=True) #consider removing -2
#plt.legend(['textbook', 'RL_baseline', 'RL_autocor'], ncol=3, prop=font)
plt.plot([0,40],[0.75,0.75], linestyle='--', color='grey')
# ax.axhline(y=z95 / np.sqrt(n_full), color='grey')
plt.plot([0,40],[0,0], color='black')
ax.set_xlim([0, 32] )
ax.yaxis.set_label_coords(-0.09, .5)
#plt.savefig('cchunter_compare.pdf')
#plt.savefig('cchunter_compare.png')
plt.subplots_adjust(right = 0.999, top =0.85, bottom=0.22,left=0.085,wspace=0.28, hspace=0.2)
plt.savefig('event_train.pdf')
plt.savefig('event_train.png')
if __name__ == "__main__":
main()
'''
human
Reset...(also the cache state)
victim address 3
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.79
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.8
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.81
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.82
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.83
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.84
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.85
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.86
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.87
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.88
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.89
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.9
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.91
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.92
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.93
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.94
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.95
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.96
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.97
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.98
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.99
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Episode number of guess: 26
Episode number of corrects: 26
correct rate: 1.0
bandwidth rate: 0.1625
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.98113208 0.96223727 -0.94339623 0.92447455 -0.90566038
0.88671182 -0.86792453 0.84894909 -0.83018868 0.81118637 -0.79245283
0.77342364 -0.75471698 0.73566091 -0.71698113 0.69789819 -0.67924528
0.66013546 -0.64150943 0.62237274 -0.60377358 0.58461001 -0.56603774
0.54684728 -0.52830189 0.50908456 -0.49056604 0.47132183 -0.45283019
0.4335591 -0.41509434]
y_max = 0.9622372735580283
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 104
Total number of corrects: 104
Episode total: 640
correct rate: 1.0
bandwidth rate: 0.1625
'''
'''
l2
Reset...(also the cache state)
victim address 3
Step...
victim access 3
Step...
acceee 5 miss
Step...
acceee 4 miss
Step...
acceee 6 miss
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 5 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
acceee 7 miss
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
acceee 7 miss
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 6 hit
Step...
access 4 hit
Step...
access 5 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 7 miss
Step...
victim access 1
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 6 hit
Step...
access 4 hit
Step...
access 5 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 7 miss
Step...
victim access 0
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
victim access 0
Step...
victim access 0
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 4 hit
Step...
access 4 hit
Step...
access 4 hit
Step...
access 4 hit
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Episode number of guess: 32
Episode number of corrects: 32
correct rate: 1.0
bandwidth rate: 0.19753086419753085
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.6823596 0.42214715 -0.34085761 0.25558463 -0.17101461
0.0498516 -0.0011716 -0.01648051 -0.03524565 0.08539014 -0.13729204
0.1872608 -0.30731079 0.42507615 -0.40935718 0.35528782 -0.30748653
0.25324143 -0.13764352 0.01525033 0.03219948 0.01689057 -0.00187456
-0.01718347 -0.06820667 0.08468718 -0.10228072 0.11858549 -0.1040967
0.08451144 -0.13817074]
y_max = 0.42507615402640003
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 134
Total number of corrects: 134
Episode total: 648
correct rate: 1.0
bandwidth rate: 0.20679012345679013
'''
'''
baseline
Reset...(also the cache state)
victim address 3
Step...
acceee 4 miss
Step...
acceee 7 miss
Step...
acceee 6 miss
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 7 hit
Step...
access 6 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 7 hit
Step...
access 6 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 2 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 2 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Episode number of guess: 38
Episode number of corrects: 38
correct rate: 1.0
bandwidth rate: 0.2360248447204969
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.92995169 0.94312692 -0.92874396 0.91403162 -0.89975845
0.88493632 -0.87077295 0.85584102 -0.84178744 0.82674572 -0.81280193
0.79765042 -0.78381643 0.76855512 -0.75483092 0.73945982 -0.72584541
0.71036451 -0.6968599 0.68126921 -0.6678744 0.65217391 -0.63888889
0.62307861 -0.60990338 0.59398331 -0.58091787 0.56488801 -0.55193237
0.53579271 -0.52294686]
y_max = 0.9431269213877909
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 147
Total number of corrects: 147
Episode total: 643
correct rate: 1.0
bandwidth rate: 0.2286158631415241
'''
| AutoCAT-main | src/rlmeta/plot_cchunter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
from typing import Dict, Optional
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvWrapperFactory
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1,
reset_cache_state: bool = False) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
if victim_addr == -1:
timestep = env.reset(reset_cache_state=reset_cache_state)
else:
timestep = env.reset(victim_address=victim_addr,
reset_cache_state=reset_cache_state)
agent.observe_init(timestep)
while not timestep.terminated or timestep.truncated:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
# Only correct guess has positive reward.
correct_rate = float(episode_return > 0.0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"correct_rate": correct_rate,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
reset_cache_state=reset_cache_state)
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
reset_cache_state=reset_cache_state)
metrics.extend(cur_metrics)
return metrics
@hydra.main(config_path="./config", config_name="sample_attack")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvWrapperFactory(OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed,
cfg.reset_cache_state)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_attack.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
from metric_callbacks import CCHunterMetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_cchunter")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = CCHunterMetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| AutoCAT-main | src/rlmeta/train_ppo_cchunter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import argparse
import math
import json
import re
from datetime import datetime
from typing import Any, Dict, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
JSON_REGEX = re.compile("{.+}")
def parse_json(line: str) -> Optional[Dict[str, Any]]:
m = JSON_REGEX.search(line)
return None if m is None else json.loads(m.group())
def get_value(val: Union[float, Dict[str, float]]) -> float:
return val["mean"] if isinstance(val, dict) else val
def plot(log_file: str,
phase: str,
xkey: str,
ykey: str,
fig_file: Optional[str] = None) -> None:
x = []
y = []
with open(log_file, "r") as f:
line = f.readline()
# cfg = parse_json(line)
for line in f:
stats = parse_json(line)
if stats is None:
continue
cur_phase = stats.get("phase", None)
if cur_phase == phase:
x.append(get_value(stats[xkey]))
y.append(get_value(stats[ykey]))
# y.append(math.log(get_value(stats[ykey])))
# y.append(get_value(stats["gap"]) / get_value(stats["episode_length"]))
x = np.array(x)
y = np.array(y)
plt.plot(x, y, label=ykey)
plt.xlabel(xkey)
plt.ylabel(ykey)
plt.legend()
if fig_file is not None:
plt.savefig(fig_file)
else:
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", type=str, help="log file to plot")
parser.add_argument("--phase",
default="Eval",
type=str,
help="phase to plot.")
parser.add_argument("--xkey",
default="epoch",
type=str,
help="x values to plot.")
parser.add_argument("--ykey",
default="episode_return",
type=str,
help="y values to plot.")
parser.add_argument("--fig_file",
default=None,
type=str,
help="figure file to save.")
flags = parser.parse_intermixed_args()
plot(flags.log_file, flags.phase, flags.xkey, flags.ykey, flags.fig_file)
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/plot_figure_remap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models.backbone import CacheBackbone
class CachePPOMlpModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
window_size: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.backbone = CacheBackbone(latency_dim, victim_acc_dim, action_dim,
step_dim, window_size, action_embed_dim,
step_embed_dim, hidden_dim, num_layers)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
h = self.backbone(obs)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if self._device is None:
self._device = next(self.parameters()).device
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| AutoCAT-main | src/rlmeta/cache_ppo_mlp_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
import logging
from typing import Dict, Optional, Sequence
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from textbook_attacker import TextbookAgent
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
# act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
cyclone_attack = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
cyclone_attack += timestep.info.get("cyclone_attack", 0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"cyclone_attack": cyclone_attack,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env, agent, victim_addr=victim_addr)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env, agent, victim_addr=-1)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
#model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
# env.action_space.n, cfg.checkpoint)
#model.eval()
# Create agent
agent = TextbookAgent(
cfg.env_config
) #PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cyclone_textbook.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Any, Dict
from rlmeta.envs.env import Env, EnvFactory
from rlmeta.envs.gym_wrapper import GymWrapper
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cache_guessing_game_env_impl import CacheGuessingGameEnv
from cchunter_wrapper import CCHunterWrapper
from cyclone_wrapper import CycloneWrapper
class CacheEnvWrapperFactory(EnvFactory):
def __init__(self, env_config: Dict[str, Any]) -> None:
self._env_config = env_config
@property
def env_config(self) -> Dict[str, Any]:
return self._env_config
def __call__(self, index: int, *args, **kwargs) -> Env:
env = CacheGuessingGameEnv(self.env_config)
env = GymWrapper(env, old_step_api=True)
return env
class CacheEnvCCHunterWrapperFactory(EnvFactory):
def __init__(self, env_config: Dict[str, Any]) -> None:
self._env_config = env_config
@property
def env_config(self) -> Dict[str, Any]:
return self._env_config
def __call__(self, index: int, *args, **kwargs) -> Env:
# env = CacheGuessingGameEnv(self.env_config)
env = CCHunterWrapper(self.env_config)
env = GymWrapper(env, old_step_api=True)
return env
class CacheEnvCycloneWrapperFactory(EnvFactory):
def __init__(self, env_config: Dict[str, Any]) -> None:
self._env_config = env_config
@property
def env_config(self) -> Dict[str, Any]:
return self._env_config
def __call__(self, index: int, *args, **kwargs) -> Env:
# env = CacheGuessingGameEnv(self.env_config)
env = CycloneWrapper(self.env_config)
env = GymWrapper(env, old_step_api=True)
return env
| AutoCAT-main | src/rlmeta/cache_env_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
class CachePPOLstmModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
# self.window_size = window_size
self.action_embed_dim = action_embed_dim
# self.step_embed_dim = step_embed_dim
# self.input_dim = (self.latency_dim + self.victim_acc_dim +
# self.action_embed_dim + self.step_embed_dim)
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
# self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
self.encoder = nn.LSTM(
self.hidden_dim,
self.hidden_dim,
self.num_layers,
bias=False, # Disable bias for pre-padding sequence
bidirectional=False)
self.linear_a = nn.Linear(2 * self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(2 * self.hidden_dim, 1)
self._device = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
obs = torch.flip(obs, dims=(1, )) # Reverse input to pre-padding
l, v, act, _ = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
# stp = self.make_embedding(stp, self.step_embed)
x = torch.cat((l, v, act), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
_, (h, c) = self.encoder(x)
h = h.mean(dim=0)
c = c.mean(dim=0)
h = torch.cat((h, c), dim=-1)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| AutoCAT-main | src/rlmeta/cache_ppo_lstm_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# a textbook prime probe attacker that serve as the agent
# which can have high reward for the cache guessing game
# used to generate the attack sequence that can be detected by cchunter
# currently it only works for the direct-map cache (associativity=1)
class TextbookAgent():
# the config is the same as the config cor cache_guessing_game_env_impl
def __init__(self, env_config):
self.local_step = 0
self.lat = []
self.no_prime = False # set to true after first prime
if "cache_configs" in env_config:
#self.logger.info('Load config from JSON')
self.configs = env_config["cache_configs"]
self.num_ways = self.configs['cache_1']['associativity']
self.cache_size = self.configs['cache_1']['blocks']
attacker_addr_s = env_config["attacker_addr_s"] if "attacker_addr_s" in env_config else 4
attacker_addr_e = env_config["attacker_addr_e"] if "attacker_addr_e" in env_config else 7
victim_addr_s = env_config["victim_addr_s"] if "victim_addr_s" in env_config else 0
victim_addr_e = env_config["victim_addr_e"] if "victim_addr_e" in env_config else 3
flush_inst = env_config["flush_inst"] if "flush_inst" in env_config else False
self.allow_empty_victim_access = env_config["allow_empty_victim_access"] if "allow_empty_victim_access" in env_config else False
assert(self.num_ways == 1) # currently only support direct-map cache
assert(flush_inst == False) # do not allow flush instruction
assert(attacker_addr_e - attacker_addr_s == victim_addr_e - victim_addr_s ) # address space must be shared
#must be no shared address space
assert( ( attacker_addr_e + 1 == victim_addr_s ) or ( victim_addr_e + 1 == attacker_addr_s ) )
assert(self.allow_empty_victim_access == False)
# initialize the agent with an observation
def observe_init(self, timestep):
# initialization doing nothing
self.local_step = 0
self.lat = []
self.no_prime = False
return
# returns an action
def act(self, timestep):
info = {}
# do prime
if self.local_step < self.cache_size - ( self.cache_size if self.no_prime else 0 ):#- 1:
action = self.local_step # do prime
self.local_step += 1
return action, info
elif self.local_step == self.cache_size - (self.cache_size if self.no_prime else 0 ):#- 1: # do victim trigger
action = self.cache_size # do victim access
self.local_step += 1
return action, info
elif self.local_step < 2 * self.cache_size + 1 -(self.cache_size if self.no_prime else 0 ):#- 1 - 1:# do probe
action = self.local_step - ( self.cache_size + 1 - (self.cache_size if self.no_prime else 0 ) )#- 1 )
self.local_step += 1
#timestep,state i state
# timestep.state[0] is [r victim_accessesd original_action self_count]
#self.lat.append(timestep.observation[0][0][0])
#print(timestep.observation)
return action, info
elif self.local_step == 2 * self.cache_size + 1 - (self.cache_size if self.no_prime else 0 ):# - 1 - 1: # do guess and terminate
# timestep is the observation from last step
# first timestep not useful
action = 2 * self.cache_size # default assume that last is miss
for addr in range(1, len(self.lat)):
if self.lat[addr].int() == 1: # miss
action = addr + self.cache_size
break
self.local_step = 0
self.lat=[]
self.no_prime = True
return action, info
else:
assert(False)
# is it useful for non-ML agent or not???
def observe(self, action, timestep):
if self.local_step < 2 * self.cache_size + 1 + 1 - (self.cache_size if self.no_prime else 0 ) and self.local_step > self.cache_size - (self.cache_size if self.no_prime else 0 ):#- 1:
## self.local_step += 1
self.lat.append(timestep.observation[0][0])
return
| AutoCAT-main | src/rlmeta/textbook_attacker.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import seaborn as sns
data=[[2, 6, 1, 1, 1, 2, 1, 1, 9, 5, 1, 4, 1, 8, 0, 2, 2, 0, 6, 1, 2, 2, 0, 0, 1, 2, 1, 1, 2, 4, 3, 3, 1, 0, 1, 2, 0, 3, 2, 1], [2, 2, 1, 2, 2, 1, 1, 1, 0, 1, 3, 2, 1, 0, 5, 1, 1, 0, 1, 1, 0, 3, 1, 5, 2, 5, 0, 3, 1, 0, 1, 1, 2, 4, 4, 1, 3, 0, 1, 2], [1, 0, 4, 1, 2, 0, 6, 4, 2, 1, 4, 1, 3, 1, 7, 3, 1, 7, 2, 4, 5, 1, 3, 2, 1, 3, 4, 1, 1, 1, 6, 5, 3, 1, 4, 2, 2, 2, 1, 1], [1, 1, 1, 4, 2, 4, 1, 2, 0, 1, 1, 0, 1, 1, 0, 1, 2, 2, 0, 3, 2, 0, 6, 1, 3, 0, 3, 2, 2, 2, 0, 1, 1, 3, 0, 3, 3, 6, 3, 4]]
p=sns.heatmap(self.cyclone_heatmap, vmin=0, vmax=20)
p.set_xlabel('Time intervals (40 cycles)')
p.set_ylabel('Set index')
fig= p.get_figure()
fig.set_size_inches(3, 3)
| AutoCAT-main | src/rlmeta/plot_heatmap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from metric_callbacks import CycloneMetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_cyclone")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = CycloneMetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| AutoCAT-main | src/rlmeta/train_ppo_cyclone.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import argparse
import json
import re
from tabulate import tabulate
from typing import Any, Dict, Optional, Union
from rlmeta.utils.stats_dict import StatsItem, StatsDict
JSON_REGEX = re.compile("{.+}")
def parse_json(line: str) -> Optional[Dict[str, Any]]:
m = JSON_REGEX.search(line)
return None if m is None else json.loads(m.group())
def show_table(stats: Dict[str, Any], info: Optional[str] = None) -> tabulate:
if info is None:
head = ["key", "mean", "std", "min", "max", "count"]
else:
head = ["info", "key", "mean", "std", "min", "max", "count"]
data = []
for k, v in stats.items():
if isinstance(v, dict):
row = [k, v["mean"], v["std"], v["min"], v["max"], v["count"]]
else:
row = [k, v, 0.0, v, v, 1]
if info is not None:
row = [info] + row
data.append(row)
return tabulate(data,
head,
numalign="right",
stralign="right",
floatfmt=".8f")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", type=str, help="log file to plot")
flags = parser.parse_intermixed_args()
with open(flags.log_file, "r") as f:
line = f.readline()
exp_cfg = parse_json(line)
print(f"Experiment Configs = {exp_cfg}")
for line in f:
stats = parse_json(line)
info = stats.pop("info")
stats.pop("phase")
stats.pop("epoch")
print("\n" + show_table(stats, info) + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/data/show_log.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
#!/usr/bin/env python
import matplotlib.pyplot as plt
fontaxes = {
'family': 'Arial',
'color': 'black',
'weight': 'bold',
'size': 11,
}
fontaxes_title = {
'family': 'Arial',
'color': 'black',
# 'weight': 'bold',
'size': 10,
}
lsmarkersize = 2.5
lslinewidth = 0.6
Error_all = [[[[ 0 for i in range(5)] for j in range(5)] for k in range(2)] for l in range(4)]
Error_stat_all = [[[[ 0 for i in range(5)] for j in range(3)] for k in range(2)] for l in range(4)]
# machine order
# 0 fukushima
# 1 cornell
# 2 potato
# 3 cat
# channel order
# 0 LRU
# 1 SS
path_all = [
["../covert_channel_LRU_1thread_8way/test", #measurement_fukushima",
"../covert_channel_stream_1thread_2bits_8way/test"], #measurement_8way_fukushima"],
## ["../covert_channel_LRU_1thread_8way/measurement_core",
## "../covert_channel_stream_1thread_2bits_8way/measurement_8way_core"],
## ["../covert_channel_LRU_1thread/measurement_202206",
## "../covert_channel_stream_1thread_2bits/measurement_202206"],
## ["../covert_channel_LRU_1thread_ubuntu/measurement_Xeon", #TO DO
## "../covert_channel_stream_1thread_2bits_ubuntu/measurement"]
]
bit_rate_ch = [[6.20606,7.6704]] #,[7.314, 8.904],[6.8267,11.378],[4.26666,7.31428]]
bit_rate_all = [[[0 for j in range(5)] for k in range(2)] for l in range(4)]
for machine_idx in range(1):
for channel_idx in range(2):
# read from file
path = path_all[machine_idx][channel_idx]
for test_idx in range(5):
for bandwidth_idx in range (1,6):
filename = "{}/Error_rate_{}_{}.txt".format(path,bandwidth_idx,test_idx)
f = open(filename, "r")
for line in f:
pass
last_line = line
error = float(line)
Error_all[machine_idx][channel_idx][bandwidth_idx-1][test_idx] = error
# process each bandwitch
for i in range(5):
max_tmp = 0
min_tmp = 1
avg_tmp = 0
for j in range (5):
print(Error_all[machine_idx][channel_idx][i][j], end=" ")
if Error_all[machine_idx][channel_idx][i][j] > max_tmp:
max_tmp = Error_all[machine_idx][channel_idx][i][j]
if Error_all[machine_idx][channel_idx][i][j] < min_tmp:
min_tmp = Error_all[machine_idx][channel_idx][i][j]
avg_tmp = avg_tmp + Error_all[machine_idx][channel_idx][i][j]
avg_tmp = avg_tmp / 5
print(avg_tmp,min_tmp,max_tmp)
Error_stat_all[machine_idx][channel_idx][0][i] = avg_tmp
Error_stat_all[machine_idx][channel_idx][1][i] = max_tmp
Error_stat_all[machine_idx][channel_idx][2][i] = min_tmp
bit_rate_all[machine_idx][channel_idx][i] = bit_rate_ch[machine_idx][channel_idx]/(i+1)
for machine_idx in range(1):
for channel_idx in range(2):
print(bit_rate_all[machine_idx][channel_idx])
#Error_rate_stram=[[0.2177733333, 0.04370133333, 0.01709, 0.007975, 0.005696666667], [0.227539, 0.046631,0.022217,0.009277,0.006592],[0.210693, 0.041016,0.013916,0.007324,0.00415]]
#Error_rate_LRU=[[0.1423338333,0.02587883333,0.003662,0.004801666667,0.0013835],[0.583496, 0.054199, 0.006836, 0.008789, 0.005371],[0.01416, 0.009766, 0.001465, 0.001465,0]]
for machine_idx in range(1):
for channel_idx in range(2):
for i in range(3):
for j in range(5):
Error_stat_all[machine_idx][channel_idx][i][j] = Error_stat_all[machine_idx][channel_idx][i][j]*100
#bit_rate_stream=[113.78, 56.89, 37.92666667, 28.445, 22.756]
#bit_rate_LRU=[68.267,34.1335,22.75566667,17.06675,13.6534]
plt.figure(num=None, figsize=(3.5, 2.5), dpi=300, facecolor='w')
fig,axs = plt.subplots(1, 1)
plt.subplots_adjust(right = 0.98, top =0.88, bottom=0.1,left=0.1,wspace=0.3, hspace=0.5)
#fig,axs = plt.subplots(2, 2)
labels=["LRU addr_based","Stealthy Streamline"]
titles=["Xeon E5-2687W v2"] #,"Core i7-6700", "Core i5-11600K", "Xeon W-1350P"]
colors = ['b.-', 'go-']
colors_error_bar = ['b-', 'g-']
for machine_idx in range(1):
ax=axs#[int(machine_idx/2), machine_idx%2]
for channel_idx in range(2):
ax.plot(Error_stat_all[machine_idx][channel_idx][0], bit_rate_all[machine_idx][channel_idx],colors[channel_idx], linewidth=1, markersize=lsmarkersize, markeredgewidth=0, label=labels[channel_idx])
#error bar
bar_len_y=0.2
for i in range(5):
ax.plot([Error_stat_all[machine_idx][channel_idx][2][i],Error_stat_all[machine_idx][channel_idx][1][i]],[bit_rate_all[machine_idx][channel_idx][i], bit_rate_all[machine_idx][channel_idx][i]], colors_error_bar[channel_idx], linewidth=0.5)
ax.plot([Error_stat_all[machine_idx][channel_idx][2][i],Error_stat_all[machine_idx][channel_idx][2][i]],[bit_rate_all[machine_idx][channel_idx][i]-bar_len_y, bit_rate_all[machine_idx][channel_idx][i]+bar_len_y], colors_error_bar[channel_idx], linewidth=0.5)
ax.plot([Error_stat_all[machine_idx][channel_idx][1][i],Error_stat_all[machine_idx][channel_idx][1][i]],[bit_rate_all[machine_idx][channel_idx][i]-bar_len_y, bit_rate_all[machine_idx][channel_idx][i]+bar_len_y], colors_error_bar[channel_idx], linewidth=0.5)
ax.set_title(titles[machine_idx],fontdict = fontaxes_title) #plt.title('Hor. symmetric')
ax.set_xlim([0,25])
ax.set_ylim([0,12])
ax.set_xlabel("Error rate (%)",fontdict = fontaxes)
ax.set_ylabel('Bit Rate (Mbps)',fontdict = fontaxes)
#plt.tick_params(labelsize=6)
#plt.tight_layout()
if machine_idx ==0:
ax.legend(ncol=2, bbox_to_anchor=(2,1.4),prop={'size': 12})
#plt.show()
#plt.savefig('stealthy_streamline_error.pdf')
plt.savefig('stealthy_streamline_error.png')
| AutoCAT-main | src/stealthy_streamline/plot/plot_error_rate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
#!/usr/bin/env python
Error_all = [[ 0 for i in range(5)] for j in range(5)]
for test_idx in range(5):
for bandwidth_idx in range (1,6):
filename = "Error_rate_{}_{}.txt".format(bandwidth_idx,test_idx)
f = open(filename, "r")
for line in f:
pass
last_line = line
error = float(line)
Error_all[bandwidth_idx-1][test_idx] = error
for i in range(5):
for j in range (5):
print(Error_all[i][j], end=" ")
print()
| AutoCAT-main | src/stealthy_streamline/process_error_rate_1thread/collect_stat.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import argparse
import json
import os
import random
import signal
import sys
import time
import urllib
from torch import nn, optim
from torchvision import models, datasets, transforms
import torch
import torchvision
parser = argparse.ArgumentParser(description='Evaluate resnet50 features on ImageNet')
parser.add_argument('data', type=Path, metavar='DIR',
help='path to dataset')
parser.add_argument('pretrained', type=Path, metavar='FILE',
help='path to pretrained model')
parser.add_argument('--weights', default='freeze', type=str,
choices=('finetune', 'freeze'),
help='finetune or freeze resnet weights')
parser.add_argument('--train-percent', default=100, type=int,
choices=(100, 10, 1),
help='size of traing set in percent')
parser.add_argument('--workers', default=8, type=int, metavar='N',
help='number of data loader workers')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch-size', default=256, type=int, metavar='N',
help='mini-batch size')
parser.add_argument('--lr-backbone', default=0.0, type=float, metavar='LR',
help='backbone base learning rate')
parser.add_argument('--lr-classifier', default=0.3, type=float, metavar='LR',
help='classifier base learning rate')
parser.add_argument('--weight-decay', default=1e-6, type=float, metavar='W',
help='weight decay')
parser.add_argument('--print-freq', default=100, type=int, metavar='N',
help='print frequency')
parser.add_argument('--checkpoint-dir', default='./checkpoint/lincls/', type=Path,
metavar='DIR', help='path to checkpoint directory')
def main():
args = parser.parse_args()
if args.train_percent in {1, 10}:
args.train_files = urllib.request.urlopen(f'https://raw.githubusercontent.com/google-research/simclr/master/imagenet_subsets/{args.train_percent}percent.txt').readlines()
args.ngpus_per_node = torch.cuda.device_count()
if 'SLURM_JOB_ID' in os.environ:
signal.signal(signal.SIGUSR1, handle_sigusr1)
signal.signal(signal.SIGTERM, handle_sigterm)
# single-node distributed training
args.rank = 0
args.dist_url = f'tcp://localhost:{random.randrange(49152, 65535)}'
args.world_size = args.ngpus_per_node
torch.multiprocessing.spawn(main_worker, (args,), args.ngpus_per_node)
def main_worker(gpu, args):
args.rank += gpu
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
args.checkpoint_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(args.checkpoint_dir / 'stats.txt', 'a', buffering=1)
print(' '.join(sys.argv))
print(' '.join(sys.argv), file=stats_file)
torch.cuda.set_device(gpu)
torch.backends.cudnn.benchmark = True
model = models.resnet50().cuda(gpu)
state_dict = torch.load(args.pretrained, map_location='cpu')
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
assert missing_keys == ['fc.weight', 'fc.bias'] and unexpected_keys == []
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
if args.weights == 'freeze':
model.requires_grad_(False)
model.fc.requires_grad_(True)
classifier_parameters, model_parameters = [], []
for name, param in model.named_parameters():
if name in {'fc.weight', 'fc.bias'}:
classifier_parameters.append(param)
else:
model_parameters.append(param)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
criterion = nn.CrossEntropyLoss().cuda(gpu)
param_groups = [dict(params=classifier_parameters, lr=args.lr_classifier)]
if args.weights == 'finetune':
param_groups.append(dict(params=model_parameters, lr=args.lr_backbone))
optimizer = optim.SGD(param_groups, 0, momentum=0.9, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
# automatically resume from checkpoint if it exists
if (args.checkpoint_dir / 'checkpoint.pth').is_file():
ckpt = torch.load(args.checkpoint_dir / 'checkpoint.pth',
map_location='cpu')
start_epoch = ckpt['epoch']
best_acc = ckpt['best_acc']
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
scheduler.load_state_dict(ckpt['scheduler'])
else:
start_epoch = 0
best_acc = argparse.Namespace(top1=0, top5=0)
# Data loading code
traindir = args.data / 'train'
valdir = args.data / 'val'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
if args.train_percent in {1, 10}:
train_dataset.samples = []
for fname in args.train_files:
fname = fname.decode().strip()
cls = fname.split('_')[0]
train_dataset.samples.append(
(traindir / cls / fname, train_dataset.class_to_idx[cls]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
kwargs = dict(batch_size=args.batch_size // args.world_size, num_workers=args.workers, pin_memory=True)
train_loader = torch.utils.data.DataLoader(train_dataset, sampler=train_sampler, **kwargs)
val_loader = torch.utils.data.DataLoader(val_dataset, **kwargs)
start_time = time.time()
for epoch in range(start_epoch, args.epochs):
# train
if args.weights == 'finetune':
model.train()
elif args.weights == 'freeze':
model.eval()
else:
assert False
train_sampler.set_epoch(epoch)
for step, (images, target) in enumerate(train_loader, start=epoch * len(train_loader)):
output = model(images.cuda(gpu, non_blocking=True))
loss = criterion(output, target.cuda(gpu, non_blocking=True))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % args.print_freq == 0:
torch.distributed.reduce(loss.div_(args.world_size), 0)
if args.rank == 0:
pg = optimizer.param_groups
lr_classifier = pg[0]['lr']
lr_backbone = pg[1]['lr'] if len(pg) == 2 else 0
stats = dict(epoch=epoch, step=step, lr_backbone=lr_backbone,
lr_classifier=lr_classifier, loss=loss.item(),
time=int(time.time() - start_time))
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
# evaluate
model.eval()
if args.rank == 0:
top1 = AverageMeter('Acc@1')
top5 = AverageMeter('Acc@5')
with torch.no_grad():
for images, target in val_loader:
output = model(images.cuda(gpu, non_blocking=True))
acc1, acc5 = accuracy(output, target.cuda(gpu, non_blocking=True), topk=(1, 5))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
best_acc.top1 = max(best_acc.top1, top1.avg)
best_acc.top5 = max(best_acc.top5, top5.avg)
stats = dict(epoch=epoch, acc1=top1.avg, acc5=top5.avg, best_acc1=best_acc.top1, best_acc5=best_acc.top5)
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
# sanity check
if args.weights == 'freeze':
reference_state_dict = torch.load(args.pretrained, map_location='cpu')
model_state_dict = model.module.state_dict()
for k in reference_state_dict:
assert torch.equal(model_state_dict[k].cpu(), reference_state_dict[k]), k
scheduler.step()
if args.rank == 0:
state = dict(
epoch=epoch + 1, best_acc=best_acc, model=model.state_dict(),
optimizer=optimizer.state_dict(), scheduler=scheduler.state_dict())
torch.save(state, args.checkpoint_dir / 'checkpoint.pth')
def handle_sigusr1(signum, frame):
os.system(f'scontrol requeue {os.getenv("SLURM_JOB_ID")}')
exit()
def handle_sigterm(signum, frame):
pass
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| barlowtwins-main | evaluate.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.