python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import argparse
import math
import json
import re
from datetime import datetime
from typing import Any, Dict, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
JSON_REGEX = re.compile("{.+}")
def parse_json(line: str) -> Optional[Dict[str, Any]]:
m = JSON_REGEX.search(line)
return None if m is None else json.loads(m.group())
def get_value(val: Union[float, Dict[str, float]]) -> float:
return val["mean"] if isinstance(val, dict) else val
def plot(log_file: str,
phase: str,
xkey: str,
ykey: str,
fig_file: Optional[str] = None) -> None:
x = []
y = []
with open(log_file, "r") as f:
line = f.readline()
# cfg = parse_json(line)
for line in f:
stats = parse_json(line)
if stats is None:
continue
cur_phase = stats.get("phase", None)
if cur_phase == phase:
x.append(get_value(stats[xkey]))
y.append(get_value(stats[ykey]))
# y.append(math.log(get_value(stats[ykey])))
# y.append(get_value(stats["gap"]) / get_value(stats["episode_length"]))
x = np.array(x)
y = np.array(y)
plt.plot(x, y, label=ykey)
plt.xlabel(xkey)
plt.ylabel(ykey)
plt.legend()
if fig_file is not None:
plt.savefig(fig_file)
else:
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", type=str, help="log file to plot")
parser.add_argument("--phase",
default="Eval",
type=str,
help="phase to plot.")
parser.add_argument("--xkey",
default="epoch",
type=str,
help="x values to plot.")
parser.add_argument("--ykey",
default="episode_return",
type=str,
help="y values to plot.")
parser.add_argument("--fig_file",
default=None,
type=str,
help="figure file to save.")
flags = parser.parse_intermixed_args()
plot(flags.log_file, flags.phase, flags.xkey, flags.ykey, flags.fig_file)
if __name__ == "__main__":
main()
| AutoCAT-main | src/cyclone_data/draw_figure.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
from cache_ppo_mlp_model import CachePPOMlpModel
from cache_ppo_lstm_model import CachePPOLstmModel
from cache_ppo_transformer_model import CachePPOTransformerModel
def get_model(cfg: Dict[str, Any],
window_size: int,
output_dim: int,
checkpoint: Optional[str] = None) -> nn.Module:
cfg.args.step_dim = window_size
if "window_size" in cfg.args:
cfg.args.window_size = window_size
cfg.args.output_dim = output_dim
model = None
if cfg.type == "mlp":
model = CachePPOMlpModel(**cfg.args)
elif cfg.type == "lstm":
model = CachePPOLstmModel(**cfg.args)
elif cfg.type == "transformer":
model = CachePPOTransformerModel(**cfg.args)
if model is not None and checkpoint is not None:
params = torch.load(checkpoint)
model.load_state_dict(params)
return model
| AutoCAT-main | src/rlmeta/model_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
from typing import Dict, Optional, Sequence
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
cyclone_attack = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
cyclone_attack += timestep.info.get("cyclone_attack", 0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"cyclone_attack": cyclone_attack,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env, agent, victim_addr=victim_addr)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env, agent, victim_addr=-1)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cyclone.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvWrapperFactory
from metric_callbacks import MetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_attack")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = MetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvWrapperFactory(OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes, keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| AutoCAT-main | src/rlmeta/train_ppo_attack.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Author: Mulong Luo
# date: 2022.6.28
# usage: to train the svm classifier of cycloen by feeding
# the date from TextbookAgent as malicious traces
# and spec traces for benign traces
import logging
from typing import Dict
import hydra
import torch
import torch.nn
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append("/home/mulong/RL_SCA/src/CacheSimulator/src")
import rlmeta.utils.nested_utils as nested_utils
import numpy as np
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
from textbook_attacker import TextbookAgent
# from cache_guessing_game_env_impl import CacheGuessingGameEnv
# from cchunter_wrapper import CCHunterWrapper
from cache_env_wrapper import CacheEnvWrapperFactory, CacheEnvCycloneWrapperFactory
from cyclone_wrapper import CycloneWrapper
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
class SpecAgent():
def __init__(self, env_config, trace_file):
self.local_step = 0
self.lat = []
self.no_prime = False # set to true after first prime
if "cache_configs" in env_config:
#self.logger.info('Load config from JSON')
self.configs = env_config["cache_configs"]
self.num_ways = self.configs['cache_1']['associativity']
self.cache_size = self.configs['cache_1']['blocks']
attacker_addr_s = env_config["attacker_addr_s"] if "attacker_addr_s" in env_config else 4
attacker_addr_e = env_config["attacker_addr_e"] if "attacker_addr_e" in env_config else 7
victim_addr_s = env_config["victim_addr_s"] if "victim_addr_s" in env_config else 0
victim_addr_e = env_config["victim_addr_e"] if "victim_addr_e" in env_config else 3
flush_inst = env_config["flush_inst"] if "flush_inst" in env_config else False
self.allow_empty_victim_access = env_config["allow_empty_victim_access"] if "allow_empty_victim_access" in env_config else False
assert(self.num_ways == 1) # currently only support direct-map cache
assert(flush_inst == False) # do not allow flush instruction
assert(attacker_addr_e - attacker_addr_s == victim_addr_e - victim_addr_s ) # address space must be shared
#must be no shared address space
assert( ( attacker_addr_e + 1 == victim_addr_s ) or ( victim_addr_e + 1 == attacker_addr_s ) )
assert(self.allow_empty_victim_access == False)
self.trace_file = trace_file
# load the data SPEC bengin traces
self.fp = open(self.trace_file)
line = self.fp.readline().split()
self.domain_id_0 = line[0]
self.domain_id_1 = line[0]
line = self.fp.readline().split()
while line != '':
self.domain_id_1 = line[0]
if self.domain_id_1 != self.domain_id_0:
break
line = self.fp.readline().split()
self.fp.close()
self.fp = open(self.trace_file)
def act(self, timestep):
info = {}
line = self.fp.readline().split()
if len(line) == 0:
action = self.cache_size
addr = 0#addr % self.cache_size
info={"file_done" : True}
return action, info
domain_id = line[0]
cache_line_size = 8
addr = int( int(line[3], 16) / cache_line_size )
print(addr)
if domain_id == self.domain_id_0: # attacker access
action = addr % self.cache_size
info ={}
else: # domain_id = self.domain_id_1: # victim access
action = self.cache_size
addr = addr % self.cache_size
info={"reset_victim_addr": True, "victim_addr": addr}
return action, info
@hydra.main(config_path="./config", config_name="sample_cyclone")
def main(cfg):
repeat = 80000
trace_file = '/home/mulong/remix3.txt'
svm_data_path = 'autocat.svm.txt' #trace_file + '.svm.txt'
#create env
cfg.env_config['verbose'] = 1
# generate dataset for malicious traces
cfg.env_config['cyclone_collect_data'] = True
cfg.env_config['cyclone_malicious_trace'] = True
env_fac = CacheEnvCycloneWrapperFactory(cfg.env_config)
env = env_fac(index=0)
env.svm_data_path = svm_data_path
fp = open(svm_data_path,'w')
fp.close()
agent = TextbookAgent(cfg.env_config)
episode_length = 0
episode_return = 0.0
for i in range(repeat):
timestep = env.reset()
num_guess = 0
num_correct = 0
while not timestep.done:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep.observation.unsqueeze_(0)
action, info = agent.act(timestep)
action = Action(action, info)
# unbatch the action
victim_addr = env._env.victim_address
timestep = env.step(action)
obs, reward, done, info = timestep
if "guess_correct" in info:
num_guess += 1
if info["guess_correct"]:
print(f"victim_address! {victim_addr} correct guess! {info['guess_correct']}")
num_correct += 1
else:
correct = False
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
env.reset(save_data=True) # save data to file
# generate benign traces
'''
cfg.env_config['cyclone_collect_data'] = True
cfg.env_config['cyclone_malicious_trace'] = False
env_fac = CacheEnvCycloneWrapperFactory(cfg.env_config)
env = env_fac(index=0)
print("mix.txt opened!")
agent = SpecAgent(cfg.env_config, trace_file)
episode_length = 0
episode_return = 0.0
file_done = False
# generate dataset for benign traces
iter = 0
while not file_done:
#for i in range(repeat):
timestep = env.reset()
num_guess = 0
num_correct = 0
done = False
count = 0
iter += 1
while not done:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep.observation.unsqueeze_(0)
action, info = agent.act(timestep)
if "file_done" in info:
file_done = True
break
if "victim_addr" in info:
print(info["victim_addr"])
#env.set_victim(info["victim_addr"])
env._env.set_victim(info["victim_addr"])
action = Action(action, info)
else:
action = Action(action, info)
# unbatch the action
victim_addr = env._env.victim_address
timestep = env.step(action)
obs, reward, done, info = timestep
count += 1
#if count % 10 == 0:
#action = Action(agent.cache_size * 2, {})
#timestep = env.step(action)
#obs, reward, done, info = timestep
if count == 160:
action = Action(agent.cache_size * 2, {})
timestep = env.step(action)
obs, reward, done, info = timestep
done = True
count = 0
#if "guess_correct" in info:
# num_guess += 1
# if info["guess_correct"]:
# print(f"victim_address! {victim_addr} correct guess! {info['guess_correct']}")
# num_correct += 1
# else:
# correct = False
#agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
env.reset(save_data=True) # save data to file
'''
#cfg.env_config['cyclone_malicious_trace'] = False
#env_fac = CacheEnvCCHunterWrapperFactory(cfg.env_config)
#env = env_fac(index=0)
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/cyclone_svm_trainer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(
env: Env,
agent: PPOAgent,
victim_addr: int = -1,
threshold: Union[float, Sequence[float]] = 0.75) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
autocorr_n = (env.env.env._env.cache_size *
env.env.env.cc_hunter_check_length)
max_ac = max_autocorr(env.env.cc_hunter_history, autocorr_n)
if isinstance(threshold, float):
threshold = (threshold, )
detect = [max_ac >= t for t in threshold]
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"max_autocorr": max_ac,
}
for t, d in zip(threshold, detect):
metrics[f"detect_rate-{t}"] = d
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False,
threshold: Union[float, Sequence[float]] = 0.75) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env,
agent,
cfg.num_episodes,
cfg.seed,
threshold=cfg.threshold)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cchunter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
class MetricCallbacks(EpisodeCallbacks):
def __init__(self):
super().__init__()
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
info = timestep.info
if info["is_guess"]:
self._custom_metrics["correct_rate"] = float(info["guess_correct"])
class CCHunterMetricCallbacks(EpisodeCallbacks):
def __init__(self):
super().__init__()
def on_episode_start(self, index: int) -> None:
self.tot_guess = 0
self.acc_guess = 0
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
info = timestep.info
if info["is_guess"]:
self.tot_guess += 1
self.acc_guess += int(info["guess_correct"])
if timestep.terminated or timestep.truncated:
self._custom_metrics["total_guess"] = self.tot_guess
if self.tot_guess > 0:
self._custom_metrics[
"correct_rate"] = self.acc_guess / self.tot_guess
if "cc_hunter_attack" in info:
self._custom_metrics["cc_hunter_attack"] = float(
info["cc_hunter_attack"])
class CycloneMetricCallbacks(EpisodeCallbacks):
def __init__(self):
super().__init__()
def on_episode_start(self, index: int) -> None:
self.tot_guess = 0
self.acc_guess = 0
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
info = timestep.info
if info["is_guess"]:
self.tot_guess += 1
self.acc_guess += int(info["guess_correct"])
if timestep.terminated or timestep.truncated:
self._custom_metrics["total_guess"] = self.tot_guess
if self.tot_guess > 0:
self._custom_metrics[
"correct_rate"] = self.acc_guess / self.tot_guess
if "cyclone_attack" in info:
self._custom_metrics["cyclone_attack"] = float(
info["cyclone_attack"])
| AutoCAT-main | src/rlmeta/metric_callbacks.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
class CachePPOTransformerModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
# self.window_size = window_size
self.action_embed_dim = action_embed_dim
self.step_embed_dim = step_embed_dim
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
# self.linear_o = nn.Linear(self.hidden_dim * self.window_size,
# self.hidden_dim)
encoder_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim,
nhead=8,
dropout=0.0)
self.encoder = nn.TransformerEncoder(encoder_layer, self.num_layers)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
def make_one_hot(self, src: torch.Tensor, num_classes: int,
mask: torch.Tensor) -> torch.Tensor:
# mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor, embed: nn.Embedding,
mask: torch.Tensor) -> torch.Tensor:
# mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
l, v, act, stp = torch.unbind(obs, dim=-1)
mask = (stp == -1)
l = self.make_one_hot(l, self.latency_dim, mask)
v = self.make_one_hot(v, self.victim_acc_dim, mask)
act = self.make_embedding(act, self.action_embed, mask)
stp = self.make_embedding(stp, self.step_embed, mask)
x = torch.cat((l, v, act, stp), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
h = self.encoder(x)
h = h.mean(dim=0)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| AutoCAT-main | src/rlmeta/cache_ppo_transformer_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from cProfile import label
from tkinter import font
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
fontaxes = {
'family': 'Arial',
'color': 'black',
'weight': 'bold',
'size': 8,
}
fontaxes_title = {
'family': 'Arial',
'color': 'black',
'weight': 'bold',
'size': 9,
}
lsmarkersize = 2.5
lslinewidth = 0.6
plt.figure(num=None, figsize=(3.5, 1.5), dpi=200, facecolor='w')
plt.subplots_adjust(right = 0.99, top =0.90, bottom=0.24, left=0.15, wspace=0.2, hspace=0.2)
# Without CCHunter, generated with python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess97_cchunter100/ppo_agent-53.pth
# trace4 = [0, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 2, 1, 2]
# trace3 = [0, 0, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 2, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 2]
# trace2 = [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2]
# trace1 = [1, 2, 1, 2, 0, 0, 0, 1, 2, 0, 1, 2]
# trace4= [1, 1, 2, 0, 1, 2, 0, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 0, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 0, 0, 0, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
# trace3= [1, 0, 2, 0, 0, 0, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 0, 0, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 2, 0, 0, 2]
# trace2= [1, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 0, 2, 0, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 2, 0, 1, 2]
# trace1= [1, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 0, 2, 1, 2, 0, 0, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 0, 2, 1, 2, 0, 2, 0, 0, 0, 1, 2, 0, 1, 2, 1, 2]
# Without CCHUnter, generated with python sample_cchunter.py c
# heckpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess99_cchunter100/ppo_agent-338.pth
trace4 = [1, 0, 2, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 2, 1, 2, 0, 2, 0, 1, 2, 2, 1, 2, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 2, 0, 2]
trace3 = [1, 0, 2, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 0, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2]
trace2 = [1, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 2, 0, 1, 2, 1, 2, 1, 0, 2, 1, 2, 0, 2, 1, 2]
trace1 = [0, 1, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 2, 0, 2]
# With CCHUnter, generated with python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess95_cchunter0/ppo_agent-699.pth
# trace4= [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace3= [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace2= [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace1= [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# With CCHUnter, nondeterministic, generated with python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess95_cchunter0/ppo_agent-699.pth
# trace4 = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace3 = [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace2 = [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace1 = [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]
# trace = trace1 + trace2 + trace3 + trace4
trace = trace1
# With CCHunter
# ctrace4 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
# ctrace3 = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
# ctrace2 = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2]
# ctrace1 = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2]
# With CCHunter
# ctrace4 = [1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 0, 0, 1, 2, 1, 2, 2, 1, 2, 2, 0, 1, 2, 2, 1, 0, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 1, 0, 0, 2]
# ctrace3 = [1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 2, 0, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 0, 0, 1, 2, 0, 0, 2, 2, 0, 0, 1, 2, 2, 1, 0, 2, 2, 1, 0, 0, 2, 2, 1, 0, 0, 2, 2, 2, 2, 1, 0, 2, 2]
# ctrace2 = [1, 2, 0, 0, 0, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 1, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 1, 2, 0, 2, 1, 2, 2, 1, 2, 0, 1, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 2, 2, 1, 0, 0, 2, 2, 2, 2, 0, 1, 2, 2]
# ctrace1 = [1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 0, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 0, 0, 1, 2, 0, 2, 2, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 1, 2, 2, 0, 0, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 0, 1, 2, 1, 0, 2, 2, 0, 0, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 2, 2]
# [0, 0, 2, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 0, 2, 0, 1, 2, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 0, 0, 2, 0, 0, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 2, 0, 1, 2, 1, 2, 0, 0, 2, 0, 0, 1, 2, 2, 0, 2]
# ctrace = ctrace1 + ctrace2 + ctrace3 + ctrace4
# With CCHunter, deterministic, 0.13333333333333333: python sample_cchunter.py checkpoint=/media/research/yl3469/RLSCA/CacheSimulator/data/guess99_cchunter11/ppo_agent-458.pth
ctrace4 = [1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 0, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 0, 1, 2, 2, 1, 2, 1, 0, 1, 2, 2, 1, 0, 0, 2, 2, 2, 2, 1, 0, 0, 2]
ctrace3 = [0, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 0, 0, 0, 2, 2, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 0, 0, 0, 2, 2, 1, 2, 0, 1, 2, 1, 2, 2, 1, 2, 0, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 0, 0, 2, 2, 1, 0, 0, 2, 2, 2, 2, 1, 0, 2, 2]
ctrace2 = [0, 2, 0, 0, 1, 2, 0, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 1, 2, 2, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 1, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 0, 1, 2, 0, 1, 2, 1, 2, 0, 2, 1, 2, 2, 0, 1, 2, 0, 1, 2, 1, 0, 2, 2, 1, 0, 0, 2, 2, 0, 0, 0, 2, 2, 2, 2, 1, 0, 0, 2]
ctrace1 = [0, 2, 0, 1, 2, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 2, 2, 0, 0, 1, 2, 1, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 1, 2, 2, 0, 1, 2, 0, 0, 2, 2, 0, 0, 1, 2, 2, 1, 0, 2, 2, 1, 0, 1, 2, 2, 1, 0, 0, 2, 2, 2, 2, 0, 0, 1, 2]
ctrace = ctrace1
mask = [i != 2 for i in trace]
trace_lean = [i for i, v in zip(trace, mask) if v]
mask = [i != 2 for i in ctrace]
ctrace_lean = [i for i, v in zip(ctrace, mask) if v]
def calculate_autocorrelation_coefficients(x, lags):
"""
Calculate the autocorrelation coefficients for the given data and lags.
"""
# n = len(x)
series = pd.Series([i[0] for i in x])
# print("Series is:\n", series)
# print("series correlation:\n",series.autocorr())
# data = np.asarray(x)
# print(data)
# x_mean = np.mean(data)
# y_mean = np.mean(data)
# rho = np.zeros(lags)
# for lag in range(0, lags):
# x_m = data[:-lag]
# y_m = data[lag:]
# x_m -= x_mean
# y_m -= y_mean
# rho[lag] = np.sum(x_m * y_m) / (n - lag)
return series.autocorr(lags)
def autocorrelation_plot_forked(series, ax=None, n_lags=None, change_deno=False, change_core=False, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
n_lags: maximum number of lags to show. Default is len(series)
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n_full = len(series)
if n_full <= 2:
raise ValueError("""len(series) = %i but should be > 2
to maintain at least 2 points of intersection when autocorrelating
with lags"""%n_full)
# Calculate the maximum number of lags permissible
# Subtract 2 to keep at least 2 points of intersection,
# otherwise pandas.Series.autocorr will throw a warning about insufficient
# degrees of freedom
n_maxlags = n_full - 2
# calculate the actual number of lags
if n_lags is None:
# Choosing a reasonable number of lags varies between datasets,
# but if the data longer than 200 points, limit this to 100 lags as a
# reasonable default for plotting when n_lags is not specified
n_lags = min(n_maxlags, 100)
else:
if n_lags > n_maxlags:
raise ValueError("n_lags should be < %i (i.e. len(series)-2)"%n_maxlags)
if ax is None:
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0), label=label)
if not change_core:
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n_full)
def r(h):
deno = n_full if not change_deno else (n_full - h)
return ((data[:n_full - h] - mean) *
(data[h:] - mean)).sum() / float(deno) / c0
else:
def r(h):
return series.autocorr(lag=h)
x = np.arange(n_lags) + 1
# y = lmap(r, x)
y = np.array([r(xi) for xi in x])
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=0.8, linestyle='--', color='grey')
# ax.axhline(y=z95 / np.sqrt(n_full), color='grey')
ax.axhline(y=0.0, color='black')
# ax.axhline(y=-z95 / np.sqrt(n_full), color='grey')
# ax.axhline(y=-z99 / np.sqrt(n_full), linestyle='--', color='grey')
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
data = pd.Series(trace)
cdata = pd.Series(ctrace)
# data = pd.Series(trace_lean)
# cdata = pd.Series(ctrace_lean)
ax = autocorrelation_plot_forked(data, n_lags=len(data)-2, change_deno=True, label='Baseline')
autocorrelation_plot_forked(cdata, ax = ax, n_lags=len(cdata)-2, change_deno=True, label='With Autocorrelation Based Detection Penalty')
plt.tick_params(labelsize=6)
ax.set_xlabel("Lag (p)",fontdict = fontaxes)
ax.set_ylabel("Autocorrelation",fontdict = fontaxes)
ax.legend(prop={'size': 6})
plt.savefig('cchunter_hit_trace_{}_acf.pdf'.format(0)) | AutoCAT-main | src/rlmeta/cchunter_plot.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from textbook_attacker import TextbookAgent
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
# act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(
env: Env,
agent: PPOAgent,
victim_addr: int = -1,
threshold: Union[float, Sequence[float]] = 0.75) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
autocorr_n = (env.env.env._env.cache_size *
env.env.env.cc_hunter_check_length)
max_ac = max_autocorr(env.env.cc_hunter_history, autocorr_n)
if isinstance(threshold, float):
threshold = (threshold, )
detect = [max_ac >= t for t in threshold]
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"max_autocorr": max_ac,
}
for t, d in zip(threshold, detect):
metrics[f"detect_rate-{t}"] = d
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False,
threshold: Union[float, Sequence[float]] = 0.75) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
### Load model
##model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
## env.action_space.n, cfg.checkpoint)
##model.eval()
# Create agent
#agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
agent = TextbookAgent(cfg.env_config)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cchunter_textbook.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# script for plotting figure on paper
import logging
from typing import Dict
#import hydra
#import torch
#import torch.nn
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append("/home/mulong/RL_SCA/src/CacheSimulator/src")
#import rlmeta.utils.nested_utils as nested_utils
import numpy as np
#from rlmeta.agents.ppo.ppo_agent import PPOAgent
#from rlmeta.core.types import Action
#from rlmeta.envs.env import Env
#from rlmeta.utils.stats_dict import StatsDict
#from cchunter_wrapper import CCHunterWrapper
#from cache_env_wrapper import CacheEnvWrapperFactory
#from cache_ppo_model import CachePPOModel
#from cache_ppo_transformer_model import CachePPOTransformerModel
#from textbook_attacker import TextbookAgent
# from cache_guessing_game_env_impl import CacheGuessingGameEnv
# from cchunter_wrapper import CCHunterWrapper
#from cache_env_wrapper import CacheEnvWrapperFactory
#from cache_ppo_model import CachePPOModel
#from cache_ppo_transformer_model import CachePPOTransformerModel
#from cache_ppo_transformer_periodic_model import CachePPOTransformerPeriodicModel
import matplotlib.pyplot as plt
import pandas as pd
#from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
import matplotlib.font_manager as font_manager
from autocorrelation import autocorrelation
fontaxes = {
'family': 'Arial',
# 'color': 'black',
'weight': 'bold',
#'size': 6,
}
fontaxes_title = {
'family': 'Arial',
# 'color': 'black',
'weight': 'bold',
# 'size': 9,
}
font = font_manager.FontProperties(family='Arial',
weight='bold',
style='normal')
def autocorrelation_plot_forked(series, ax=None, n_lags=None, change_deno=False, change_core=False, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
n_lags: maximum number of lags to show. Default is len(series)
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n_full = len(series)
if n_full <= 2:
raise ValueError("""len(series) = %i but should be > 2
to maintain at least 2 points of intersection when autocorrelating
with lags"""%n_full)
# Calculate the maximum number of lags permissible
# Subtract 2 to keep at least 2 points of intersection,
# otherwise pandas.Series.autocorr will throw a warning about insufficient
# degrees of freedom
n_maxlags = n_full #- 2
# calculate the actual number of lags
if n_lags is None:
# Choosing a reasonable number of lags varies between datasets,
# but if the data longer than 200 points, limit this to 100 lags as a
# reasonable default for plotting when n_lags is not specified
n_lags = min(n_maxlags, 100)
else:
if n_lags > n_maxlags:
raise ValueError("n_lags should be < %i (i.e. len(series)-2)"%n_maxlags)
if ax is None:
ax = plt.gca(xlim=(0, n_lags), ylim=(-1.1, 1.6))
if not change_core:
data = np.asarray(series)
def r(h: int) -> float:
return autocorrelation(data, h)
else:
def r(h):
return series.autocorr(lag=h)
# x = np.arange(n_lags) + 1
x = np.arange(n_lags)
# y = lmap(r, x)
y = np.array([r(xi) for xi in x])
print(y)
print(f"y = {y}")
print(f"y_max = {np.max(y[1:])}")
z95 = 1.959963984540054
z99 = 2.5758293035489004
# ax.axhline(y=-z95 / np.sqrt(n_full), color='grey')
# ax.axhline(y=-z99 / np.sqrt(n_full), linestyle='--', color='grey')
ax.set_xlabel("Lag (p)", fontdict = fontaxes)
ax.set_ylabel("Autocorrelation \n Coefficient", fontdict = fontaxes)
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
def main():
plt.figure(num=None, figsize=(5, 2), dpi=300, facecolor='w')
series_human = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
#series_baseline = [1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# sampled from python sample_cchunter.py checkpoint=/home/ml2558/CacheSimulator/src/rlmeta/data/table8/hpca_ae_exp_8_baseline_new/exp1/ppo_agent-499.pth num_episodes=1
series_baseline = [0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
#series_l2 = [0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1]
# sampled from python sample_cchunter.py checkpoint=/home/ml2558/CacheSimulator/src/rlmeta/data/table8/hpca_ae_exp_8_autocor_new/exp1/ppo_agent-499.pth num_episodes=1
series_l2 = [0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0]
#series_l2 = [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1]
for i in range(0, len(series_baseline)):
series_baseline[i] += 1.2
for i in range(0, len(series_l2)):
series_l2[i] += 2.4
series_human = series_human[0:50]
series_baseline = series_baseline[0:50]
series_l2 = series_l2[0:50]
ax = plt.subplot(121)
ax.set_xlim([0, 48] )
ax.set_ylim([-0.1, 3.7])
ax.set_yticks([])
plt.tick_params(left=False)
text_x = -10
ax.text(text_x, 0.15, 'A->V', fontproperties=font)
ax.text(text_x, 0.85, 'V->A', fontproperties=font)
ax.text(text_x, 0.15+1.2, 'A->V',fontproperties=font)
ax.text(text_x, 0.85+1.2, 'V->A',fontproperties=font)
ax.text(text_x, 0.15+2.4, 'A->V', fontproperties=font)
ax.text(text_x, 0.85+2.4, 'V->A',fontproperties=font)
#ax.set_xlim([0, 60])
ax.plot(series_human)#, linewidth=4 )
ax.plot(series_baseline)
ax.plot(series_l2)
ax.set_xlabel("Number of cache conflicts", fontdict = fontaxes)
ax.legend(prop={'size': 6, 'family': 'Arial', 'weight':'bold'})
ax.legend(['textbook', 'RL_baseline', 'RL_autocor'], ncol=3,bbox_to_anchor=(2.2,1.28), prop=font)
data_human = pd.Series(series_human)
data_baseline = pd.Series(series_baseline)
data_l2 = pd.Series(series_l2)
cache_size = 4
#plt.figure(num=None, figsize=(5.2, 2), dpi=300, facecolor='w')
#plt.subplots_adjust(right = 0.98, top =0.97, bottom=0.24,left=0.13,wspace=0, hspace=0.2)
ax = plt.subplot(122)
autocorrelation_plot_forked(data_human,ax=ax, n_lags= 8 * cache_size, change_deno=True) #consider removing -2
autocorrelation_plot_forked(data_baseline, ax=ax,n_lags= 8 * cache_size, change_deno=True) #consider removing -2
autocorrelation_plot_forked(data_l2, ax=ax, n_lags= 8 * cache_size, change_deno=True) #consider removing -2
#plt.legend(['textbook', 'RL_baseline', 'RL_autocor'], ncol=3, prop=font)
plt.plot([0,40],[0.75,0.75], linestyle='--', color='grey')
# ax.axhline(y=z95 / np.sqrt(n_full), color='grey')
plt.plot([0,40],[0,0], color='black')
ax.set_xlim([0, 32] )
ax.yaxis.set_label_coords(-0.09, .5)
#plt.savefig('cchunter_compare.pdf')
#plt.savefig('cchunter_compare.png')
plt.subplots_adjust(right = 0.999, top =0.85, bottom=0.22,left=0.085,wspace=0.28, hspace=0.2)
plt.savefig('event_train.pdf')
plt.savefig('event_train.png')
if __name__ == "__main__":
main()
'''
human
Reset...(also the cache state)
victim address 3
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.79
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.8
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.81
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.82
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.83
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.84
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.85
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.86
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.87
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.88
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.89
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.9
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.91
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.92
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.93
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.94
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.95
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.96
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.97
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.98
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.99
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Episode number of guess: 26
Episode number of corrects: 26
correct rate: 1.0
bandwidth rate: 0.1625
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.98113208 0.96223727 -0.94339623 0.92447455 -0.90566038
0.88671182 -0.86792453 0.84894909 -0.83018868 0.81118637 -0.79245283
0.77342364 -0.75471698 0.73566091 -0.71698113 0.69789819 -0.67924528
0.66013546 -0.64150943 0.62237274 -0.60377358 0.58461001 -0.56603774
0.54684728 -0.52830189 0.50908456 -0.49056604 0.47132183 -0.45283019
0.4335591 -0.41509434]
y_max = 0.9622372735580283
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 104
Total number of corrects: 104
Episode total: 640
correct rate: 1.0
bandwidth rate: 0.1625
'''
'''
l2
Reset...(also the cache state)
victim address 3
Step...
victim access 3
Step...
acceee 5 miss
Step...
acceee 4 miss
Step...
acceee 6 miss
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 5 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
acceee 7 miss
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
acceee 7 miss
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 6 hit
Step...
access 4 hit
Step...
access 5 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 7 miss
Step...
victim access 1
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 6 hit
Step...
access 4 hit
Step...
access 5 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 7 miss
Step...
victim access 0
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
victim access 0
Step...
victim access 0
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 4 hit
Step...
access 4 hit
Step...
access 4 hit
Step...
access 4 hit
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Episode number of guess: 32
Episode number of corrects: 32
correct rate: 1.0
bandwidth rate: 0.19753086419753085
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.6823596 0.42214715 -0.34085761 0.25558463 -0.17101461
0.0498516 -0.0011716 -0.01648051 -0.03524565 0.08539014 -0.13729204
0.1872608 -0.30731079 0.42507615 -0.40935718 0.35528782 -0.30748653
0.25324143 -0.13764352 0.01525033 0.03219948 0.01689057 -0.00187456
-0.01718347 -0.06820667 0.08468718 -0.10228072 0.11858549 -0.1040967
0.08451144 -0.13817074]
y_max = 0.42507615402640003
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 134
Total number of corrects: 134
Episode total: 648
correct rate: 1.0
bandwidth rate: 0.20679012345679013
'''
'''
baseline
Reset...(also the cache state)
victim address 3
Step...
acceee 4 miss
Step...
acceee 7 miss
Step...
acceee 6 miss
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 7 hit
Step...
access 6 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 7 hit
Step...
access 6 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 2 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 2 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Episode number of guess: 38
Episode number of corrects: 38
correct rate: 1.0
bandwidth rate: 0.2360248447204969
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.92995169 0.94312692 -0.92874396 0.91403162 -0.89975845
0.88493632 -0.87077295 0.85584102 -0.84178744 0.82674572 -0.81280193
0.79765042 -0.78381643 0.76855512 -0.75483092 0.73945982 -0.72584541
0.71036451 -0.6968599 0.68126921 -0.6678744 0.65217391 -0.63888889
0.62307861 -0.60990338 0.59398331 -0.58091787 0.56488801 -0.55193237
0.53579271 -0.52294686]
y_max = 0.9431269213877909
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 147
Total number of corrects: 147
Episode total: 643
correct rate: 1.0
bandwidth rate: 0.2286158631415241
'''
| AutoCAT-main | src/rlmeta/plot_cchunter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
from typing import Dict, Optional
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvWrapperFactory
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1,
reset_cache_state: bool = False) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
if victim_addr == -1:
timestep = env.reset(reset_cache_state=reset_cache_state)
else:
timestep = env.reset(victim_address=victim_addr,
reset_cache_state=reset_cache_state)
agent.observe_init(timestep)
while not timestep.terminated or timestep.truncated:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
# Only correct guess has positive reward.
correct_rate = float(episode_return > 0.0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"correct_rate": correct_rate,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
reset_cache_state=reset_cache_state)
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
reset_cache_state=reset_cache_state)
metrics.extend(cur_metrics)
return metrics
@hydra.main(config_path="./config", config_name="sample_attack")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvWrapperFactory(OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed,
cfg.reset_cache_state)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_attack.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
from metric_callbacks import CCHunterMetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_cchunter")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = CCHunterMetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| AutoCAT-main | src/rlmeta/train_ppo_cchunter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import argparse
import math
import json
import re
from datetime import datetime
from typing import Any, Dict, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
JSON_REGEX = re.compile("{.+}")
def parse_json(line: str) -> Optional[Dict[str, Any]]:
m = JSON_REGEX.search(line)
return None if m is None else json.loads(m.group())
def get_value(val: Union[float, Dict[str, float]]) -> float:
return val["mean"] if isinstance(val, dict) else val
def plot(log_file: str,
phase: str,
xkey: str,
ykey: str,
fig_file: Optional[str] = None) -> None:
x = []
y = []
with open(log_file, "r") as f:
line = f.readline()
# cfg = parse_json(line)
for line in f:
stats = parse_json(line)
if stats is None:
continue
cur_phase = stats.get("phase", None)
if cur_phase == phase:
x.append(get_value(stats[xkey]))
y.append(get_value(stats[ykey]))
# y.append(math.log(get_value(stats[ykey])))
# y.append(get_value(stats["gap"]) / get_value(stats["episode_length"]))
x = np.array(x)
y = np.array(y)
plt.plot(x, y, label=ykey)
plt.xlabel(xkey)
plt.ylabel(ykey)
plt.legend()
if fig_file is not None:
plt.savefig(fig_file)
else:
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", type=str, help="log file to plot")
parser.add_argument("--phase",
default="Eval",
type=str,
help="phase to plot.")
parser.add_argument("--xkey",
default="epoch",
type=str,
help="x values to plot.")
parser.add_argument("--ykey",
default="episode_return",
type=str,
help="y values to plot.")
parser.add_argument("--fig_file",
default=None,
type=str,
help="figure file to save.")
flags = parser.parse_intermixed_args()
plot(flags.log_file, flags.phase, flags.xkey, flags.ykey, flags.fig_file)
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/plot_figure_remap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models.backbone import CacheBackbone
class CachePPOMlpModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
window_size: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.backbone = CacheBackbone(latency_dim, victim_acc_dim, action_dim,
step_dim, window_size, action_embed_dim,
step_embed_dim, hidden_dim, num_layers)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
h = self.backbone(obs)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if self._device is None:
self._device = next(self.parameters()).device
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| AutoCAT-main | src/rlmeta/cache_ppo_mlp_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
import logging
from typing import Dict, Optional, Sequence
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from textbook_attacker import TextbookAgent
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
# act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
cyclone_attack = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
cyclone_attack += timestep.info.get("cyclone_attack", 0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"cyclone_attack": cyclone_attack,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env, agent, victim_addr=victim_addr)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env, agent, victim_addr=-1)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
#model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
# env.action_space.n, cfg.checkpoint)
#model.eval()
# Create agent
agent = TextbookAgent(
cfg.env_config
) #PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/sample_cyclone_textbook.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Any, Dict
from rlmeta.envs.env import Env, EnvFactory
from rlmeta.envs.gym_wrapper import GymWrapper
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cache_guessing_game_env_impl import CacheGuessingGameEnv
from cchunter_wrapper import CCHunterWrapper
from cyclone_wrapper import CycloneWrapper
class CacheEnvWrapperFactory(EnvFactory):
def __init__(self, env_config: Dict[str, Any]) -> None:
self._env_config = env_config
@property
def env_config(self) -> Dict[str, Any]:
return self._env_config
def __call__(self, index: int, *args, **kwargs) -> Env:
env = CacheGuessingGameEnv(self.env_config)
env = GymWrapper(env, old_step_api=True)
return env
class CacheEnvCCHunterWrapperFactory(EnvFactory):
def __init__(self, env_config: Dict[str, Any]) -> None:
self._env_config = env_config
@property
def env_config(self) -> Dict[str, Any]:
return self._env_config
def __call__(self, index: int, *args, **kwargs) -> Env:
# env = CacheGuessingGameEnv(self.env_config)
env = CCHunterWrapper(self.env_config)
env = GymWrapper(env, old_step_api=True)
return env
class CacheEnvCycloneWrapperFactory(EnvFactory):
def __init__(self, env_config: Dict[str, Any]) -> None:
self._env_config = env_config
@property
def env_config(self) -> Dict[str, Any]:
return self._env_config
def __call__(self, index: int, *args, **kwargs) -> Env:
# env = CacheGuessingGameEnv(self.env_config)
env = CycloneWrapper(self.env_config)
env = GymWrapper(env, old_step_api=True)
return env
| AutoCAT-main | src/rlmeta/cache_env_wrapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
class CachePPOLstmModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
# self.window_size = window_size
self.action_embed_dim = action_embed_dim
# self.step_embed_dim = step_embed_dim
# self.input_dim = (self.latency_dim + self.victim_acc_dim +
# self.action_embed_dim + self.step_embed_dim)
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
# self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
self.encoder = nn.LSTM(
self.hidden_dim,
self.hidden_dim,
self.num_layers,
bias=False, # Disable bias for pre-padding sequence
bidirectional=False)
self.linear_a = nn.Linear(2 * self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(2 * self.hidden_dim, 1)
self._device = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
obs = torch.flip(obs, dims=(1, )) # Reverse input to pre-padding
l, v, act, _ = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
# stp = self.make_embedding(stp, self.step_embed)
x = torch.cat((l, v, act), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
_, (h, c) = self.encoder(x)
h = h.mean(dim=0)
c = c.mean(dim=0)
h = torch.cat((h, c), dim=-1)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| AutoCAT-main | src/rlmeta/cache_ppo_lstm_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# a textbook prime probe attacker that serve as the agent
# which can have high reward for the cache guessing game
# used to generate the attack sequence that can be detected by cchunter
# currently it only works for the direct-map cache (associativity=1)
class TextbookAgent():
# the config is the same as the config cor cache_guessing_game_env_impl
def __init__(self, env_config):
self.local_step = 0
self.lat = []
self.no_prime = False # set to true after first prime
if "cache_configs" in env_config:
#self.logger.info('Load config from JSON')
self.configs = env_config["cache_configs"]
self.num_ways = self.configs['cache_1']['associativity']
self.cache_size = self.configs['cache_1']['blocks']
attacker_addr_s = env_config["attacker_addr_s"] if "attacker_addr_s" in env_config else 4
attacker_addr_e = env_config["attacker_addr_e"] if "attacker_addr_e" in env_config else 7
victim_addr_s = env_config["victim_addr_s"] if "victim_addr_s" in env_config else 0
victim_addr_e = env_config["victim_addr_e"] if "victim_addr_e" in env_config else 3
flush_inst = env_config["flush_inst"] if "flush_inst" in env_config else False
self.allow_empty_victim_access = env_config["allow_empty_victim_access"] if "allow_empty_victim_access" in env_config else False
assert(self.num_ways == 1) # currently only support direct-map cache
assert(flush_inst == False) # do not allow flush instruction
assert(attacker_addr_e - attacker_addr_s == victim_addr_e - victim_addr_s ) # address space must be shared
#must be no shared address space
assert( ( attacker_addr_e + 1 == victim_addr_s ) or ( victim_addr_e + 1 == attacker_addr_s ) )
assert(self.allow_empty_victim_access == False)
# initialize the agent with an observation
def observe_init(self, timestep):
# initialization doing nothing
self.local_step = 0
self.lat = []
self.no_prime = False
return
# returns an action
def act(self, timestep):
info = {}
# do prime
if self.local_step < self.cache_size - ( self.cache_size if self.no_prime else 0 ):#- 1:
action = self.local_step # do prime
self.local_step += 1
return action, info
elif self.local_step == self.cache_size - (self.cache_size if self.no_prime else 0 ):#- 1: # do victim trigger
action = self.cache_size # do victim access
self.local_step += 1
return action, info
elif self.local_step < 2 * self.cache_size + 1 -(self.cache_size if self.no_prime else 0 ):#- 1 - 1:# do probe
action = self.local_step - ( self.cache_size + 1 - (self.cache_size if self.no_prime else 0 ) )#- 1 )
self.local_step += 1
#timestep,state i state
# timestep.state[0] is [r victim_accessesd original_action self_count]
#self.lat.append(timestep.observation[0][0][0])
#print(timestep.observation)
return action, info
elif self.local_step == 2 * self.cache_size + 1 - (self.cache_size if self.no_prime else 0 ):# - 1 - 1: # do guess and terminate
# timestep is the observation from last step
# first timestep not useful
action = 2 * self.cache_size # default assume that last is miss
for addr in range(1, len(self.lat)):
if self.lat[addr].int() == 1: # miss
action = addr + self.cache_size
break
self.local_step = 0
self.lat=[]
self.no_prime = True
return action, info
else:
assert(False)
# is it useful for non-ML agent or not???
def observe(self, action, timestep):
if self.local_step < 2 * self.cache_size + 1 + 1 - (self.cache_size if self.no_prime else 0 ) and self.local_step > self.cache_size - (self.cache_size if self.no_prime else 0 ):#- 1:
## self.local_step += 1
self.lat.append(timestep.observation[0][0])
return
| AutoCAT-main | src/rlmeta/textbook_attacker.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import seaborn as sns
data=[[2, 6, 1, 1, 1, 2, 1, 1, 9, 5, 1, 4, 1, 8, 0, 2, 2, 0, 6, 1, 2, 2, 0, 0, 1, 2, 1, 1, 2, 4, 3, 3, 1, 0, 1, 2, 0, 3, 2, 1], [2, 2, 1, 2, 2, 1, 1, 1, 0, 1, 3, 2, 1, 0, 5, 1, 1, 0, 1, 1, 0, 3, 1, 5, 2, 5, 0, 3, 1, 0, 1, 1, 2, 4, 4, 1, 3, 0, 1, 2], [1, 0, 4, 1, 2, 0, 6, 4, 2, 1, 4, 1, 3, 1, 7, 3, 1, 7, 2, 4, 5, 1, 3, 2, 1, 3, 4, 1, 1, 1, 6, 5, 3, 1, 4, 2, 2, 2, 1, 1], [1, 1, 1, 4, 2, 4, 1, 2, 0, 1, 1, 0, 1, 1, 0, 1, 2, 2, 0, 3, 2, 0, 6, 1, 3, 0, 3, 2, 2, 2, 0, 1, 1, 3, 0, 3, 3, 6, 3, 4]]
p=sns.heatmap(self.cyclone_heatmap, vmin=0, vmax=20)
p.set_xlabel('Time intervals (40 cycles)')
p.set_ylabel('Set index')
fig= p.get_figure()
fig.set_size_inches(3, 3)
| AutoCAT-main | src/rlmeta/plot_heatmap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from metric_callbacks import CycloneMetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_cyclone")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = CycloneMetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| AutoCAT-main | src/rlmeta/train_ppo_cyclone.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import argparse
import json
import re
from tabulate import tabulate
from typing import Any, Dict, Optional, Union
from rlmeta.utils.stats_dict import StatsItem, StatsDict
JSON_REGEX = re.compile("{.+}")
def parse_json(line: str) -> Optional[Dict[str, Any]]:
m = JSON_REGEX.search(line)
return None if m is None else json.loads(m.group())
def show_table(stats: Dict[str, Any], info: Optional[str] = None) -> tabulate:
if info is None:
head = ["key", "mean", "std", "min", "max", "count"]
else:
head = ["info", "key", "mean", "std", "min", "max", "count"]
data = []
for k, v in stats.items():
if isinstance(v, dict):
row = [k, v["mean"], v["std"], v["min"], v["max"], v["count"]]
else:
row = [k, v, 0.0, v, v, 1]
if info is not None:
row = [info] + row
data.append(row)
return tabulate(data,
head,
numalign="right",
stralign="right",
floatfmt=".8f")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", type=str, help="log file to plot")
flags = parser.parse_intermixed_args()
with open(flags.log_file, "r") as f:
line = f.readline()
exp_cfg = parse_json(line)
print(f"Experiment Configs = {exp_cfg}")
for line in f:
stats = parse_json(line)
info = stats.pop("info")
stats.pop("phase")
stats.pop("epoch")
print("\n" + show_table(stats, info) + "\n")
if __name__ == "__main__":
main()
| AutoCAT-main | src/rlmeta/data/show_log.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
#!/usr/bin/env python
import matplotlib.pyplot as plt
fontaxes = {
'family': 'Arial',
'color': 'black',
'weight': 'bold',
'size': 11,
}
fontaxes_title = {
'family': 'Arial',
'color': 'black',
# 'weight': 'bold',
'size': 10,
}
lsmarkersize = 2.5
lslinewidth = 0.6
Error_all = [[[[ 0 for i in range(5)] for j in range(5)] for k in range(2)] for l in range(4)]
Error_stat_all = [[[[ 0 for i in range(5)] for j in range(3)] for k in range(2)] for l in range(4)]
# machine order
# 0 fukushima
# 1 cornell
# 2 potato
# 3 cat
# channel order
# 0 LRU
# 1 SS
path_all = [
["../covert_channel_LRU_1thread_8way/test", #measurement_fukushima",
"../covert_channel_stream_1thread_2bits_8way/test"], #measurement_8way_fukushima"],
## ["../covert_channel_LRU_1thread_8way/measurement_core",
## "../covert_channel_stream_1thread_2bits_8way/measurement_8way_core"],
## ["../covert_channel_LRU_1thread/measurement_202206",
## "../covert_channel_stream_1thread_2bits/measurement_202206"],
## ["../covert_channel_LRU_1thread_ubuntu/measurement_Xeon", #TO DO
## "../covert_channel_stream_1thread_2bits_ubuntu/measurement"]
]
bit_rate_ch = [[6.20606,7.6704]] #,[7.314, 8.904],[6.8267,11.378],[4.26666,7.31428]]
bit_rate_all = [[[0 for j in range(5)] for k in range(2)] for l in range(4)]
for machine_idx in range(1):
for channel_idx in range(2):
# read from file
path = path_all[machine_idx][channel_idx]
for test_idx in range(5):
for bandwidth_idx in range (1,6):
filename = "{}/Error_rate_{}_{}.txt".format(path,bandwidth_idx,test_idx)
f = open(filename, "r")
for line in f:
pass
last_line = line
error = float(line)
Error_all[machine_idx][channel_idx][bandwidth_idx-1][test_idx] = error
# process each bandwitch
for i in range(5):
max_tmp = 0
min_tmp = 1
avg_tmp = 0
for j in range (5):
print(Error_all[machine_idx][channel_idx][i][j], end=" ")
if Error_all[machine_idx][channel_idx][i][j] > max_tmp:
max_tmp = Error_all[machine_idx][channel_idx][i][j]
if Error_all[machine_idx][channel_idx][i][j] < min_tmp:
min_tmp = Error_all[machine_idx][channel_idx][i][j]
avg_tmp = avg_tmp + Error_all[machine_idx][channel_idx][i][j]
avg_tmp = avg_tmp / 5
print(avg_tmp,min_tmp,max_tmp)
Error_stat_all[machine_idx][channel_idx][0][i] = avg_tmp
Error_stat_all[machine_idx][channel_idx][1][i] = max_tmp
Error_stat_all[machine_idx][channel_idx][2][i] = min_tmp
bit_rate_all[machine_idx][channel_idx][i] = bit_rate_ch[machine_idx][channel_idx]/(i+1)
for machine_idx in range(1):
for channel_idx in range(2):
print(bit_rate_all[machine_idx][channel_idx])
#Error_rate_stram=[[0.2177733333, 0.04370133333, 0.01709, 0.007975, 0.005696666667], [0.227539, 0.046631,0.022217,0.009277,0.006592],[0.210693, 0.041016,0.013916,0.007324,0.00415]]
#Error_rate_LRU=[[0.1423338333,0.02587883333,0.003662,0.004801666667,0.0013835],[0.583496, 0.054199, 0.006836, 0.008789, 0.005371],[0.01416, 0.009766, 0.001465, 0.001465,0]]
for machine_idx in range(1):
for channel_idx in range(2):
for i in range(3):
for j in range(5):
Error_stat_all[machine_idx][channel_idx][i][j] = Error_stat_all[machine_idx][channel_idx][i][j]*100
#bit_rate_stream=[113.78, 56.89, 37.92666667, 28.445, 22.756]
#bit_rate_LRU=[68.267,34.1335,22.75566667,17.06675,13.6534]
plt.figure(num=None, figsize=(3.5, 2.5), dpi=300, facecolor='w')
fig,axs = plt.subplots(1, 1)
plt.subplots_adjust(right = 0.98, top =0.88, bottom=0.1,left=0.1,wspace=0.3, hspace=0.5)
#fig,axs = plt.subplots(2, 2)
labels=["LRU addr_based","Stealthy Streamline"]
titles=["Xeon E5-2687W v2"] #,"Core i7-6700", "Core i5-11600K", "Xeon W-1350P"]
colors = ['b.-', 'go-']
colors_error_bar = ['b-', 'g-']
for machine_idx in range(1):
ax=axs#[int(machine_idx/2), machine_idx%2]
for channel_idx in range(2):
ax.plot(Error_stat_all[machine_idx][channel_idx][0], bit_rate_all[machine_idx][channel_idx],colors[channel_idx], linewidth=1, markersize=lsmarkersize, markeredgewidth=0, label=labels[channel_idx])
#error bar
bar_len_y=0.2
for i in range(5):
ax.plot([Error_stat_all[machine_idx][channel_idx][2][i],Error_stat_all[machine_idx][channel_idx][1][i]],[bit_rate_all[machine_idx][channel_idx][i], bit_rate_all[machine_idx][channel_idx][i]], colors_error_bar[channel_idx], linewidth=0.5)
ax.plot([Error_stat_all[machine_idx][channel_idx][2][i],Error_stat_all[machine_idx][channel_idx][2][i]],[bit_rate_all[machine_idx][channel_idx][i]-bar_len_y, bit_rate_all[machine_idx][channel_idx][i]+bar_len_y], colors_error_bar[channel_idx], linewidth=0.5)
ax.plot([Error_stat_all[machine_idx][channel_idx][1][i],Error_stat_all[machine_idx][channel_idx][1][i]],[bit_rate_all[machine_idx][channel_idx][i]-bar_len_y, bit_rate_all[machine_idx][channel_idx][i]+bar_len_y], colors_error_bar[channel_idx], linewidth=0.5)
ax.set_title(titles[machine_idx],fontdict = fontaxes_title) #plt.title('Hor. symmetric')
ax.set_xlim([0,25])
ax.set_ylim([0,12])
ax.set_xlabel("Error rate (%)",fontdict = fontaxes)
ax.set_ylabel('Bit Rate (Mbps)',fontdict = fontaxes)
#plt.tick_params(labelsize=6)
#plt.tight_layout()
if machine_idx ==0:
ax.legend(ncol=2, bbox_to_anchor=(2,1.4),prop={'size': 12})
#plt.show()
#plt.savefig('stealthy_streamline_error.pdf')
plt.savefig('stealthy_streamline_error.png')
| AutoCAT-main | src/stealthy_streamline/plot/plot_error_rate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
#!/usr/bin/env python
Error_all = [[ 0 for i in range(5)] for j in range(5)]
for test_idx in range(5):
for bandwidth_idx in range (1,6):
filename = "Error_rate_{}_{}.txt".format(bandwidth_idx,test_idx)
f = open(filename, "r")
for line in f:
pass
last_line = line
error = float(line)
Error_all[bandwidth_idx-1][test_idx] = error
for i in range(5):
for j in range (5):
print(Error_all[i][j], end=" ")
print()
| AutoCAT-main | src/stealthy_streamline/process_error_rate_1thread/collect_stat.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import argparse
import json
import os
import random
import signal
import sys
import time
import urllib
from torch import nn, optim
from torchvision import models, datasets, transforms
import torch
import torchvision
parser = argparse.ArgumentParser(description='Evaluate resnet50 features on ImageNet')
parser.add_argument('data', type=Path, metavar='DIR',
help='path to dataset')
parser.add_argument('pretrained', type=Path, metavar='FILE',
help='path to pretrained model')
parser.add_argument('--weights', default='freeze', type=str,
choices=('finetune', 'freeze'),
help='finetune or freeze resnet weights')
parser.add_argument('--train-percent', default=100, type=int,
choices=(100, 10, 1),
help='size of traing set in percent')
parser.add_argument('--workers', default=8, type=int, metavar='N',
help='number of data loader workers')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch-size', default=256, type=int, metavar='N',
help='mini-batch size')
parser.add_argument('--lr-backbone', default=0.0, type=float, metavar='LR',
help='backbone base learning rate')
parser.add_argument('--lr-classifier', default=0.3, type=float, metavar='LR',
help='classifier base learning rate')
parser.add_argument('--weight-decay', default=1e-6, type=float, metavar='W',
help='weight decay')
parser.add_argument('--print-freq', default=100, type=int, metavar='N',
help='print frequency')
parser.add_argument('--checkpoint-dir', default='./checkpoint/lincls/', type=Path,
metavar='DIR', help='path to checkpoint directory')
def main():
args = parser.parse_args()
if args.train_percent in {1, 10}:
args.train_files = urllib.request.urlopen(f'https://raw.githubusercontent.com/google-research/simclr/master/imagenet_subsets/{args.train_percent}percent.txt').readlines()
args.ngpus_per_node = torch.cuda.device_count()
if 'SLURM_JOB_ID' in os.environ:
signal.signal(signal.SIGUSR1, handle_sigusr1)
signal.signal(signal.SIGTERM, handle_sigterm)
# single-node distributed training
args.rank = 0
args.dist_url = f'tcp://localhost:{random.randrange(49152, 65535)}'
args.world_size = args.ngpus_per_node
torch.multiprocessing.spawn(main_worker, (args,), args.ngpus_per_node)
def main_worker(gpu, args):
args.rank += gpu
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
args.checkpoint_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(args.checkpoint_dir / 'stats.txt', 'a', buffering=1)
print(' '.join(sys.argv))
print(' '.join(sys.argv), file=stats_file)
torch.cuda.set_device(gpu)
torch.backends.cudnn.benchmark = True
model = models.resnet50().cuda(gpu)
state_dict = torch.load(args.pretrained, map_location='cpu')
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
assert missing_keys == ['fc.weight', 'fc.bias'] and unexpected_keys == []
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
if args.weights == 'freeze':
model.requires_grad_(False)
model.fc.requires_grad_(True)
classifier_parameters, model_parameters = [], []
for name, param in model.named_parameters():
if name in {'fc.weight', 'fc.bias'}:
classifier_parameters.append(param)
else:
model_parameters.append(param)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
criterion = nn.CrossEntropyLoss().cuda(gpu)
param_groups = [dict(params=classifier_parameters, lr=args.lr_classifier)]
if args.weights == 'finetune':
param_groups.append(dict(params=model_parameters, lr=args.lr_backbone))
optimizer = optim.SGD(param_groups, 0, momentum=0.9, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
# automatically resume from checkpoint if it exists
if (args.checkpoint_dir / 'checkpoint.pth').is_file():
ckpt = torch.load(args.checkpoint_dir / 'checkpoint.pth',
map_location='cpu')
start_epoch = ckpt['epoch']
best_acc = ckpt['best_acc']
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
scheduler.load_state_dict(ckpt['scheduler'])
else:
start_epoch = 0
best_acc = argparse.Namespace(top1=0, top5=0)
# Data loading code
traindir = args.data / 'train'
valdir = args.data / 'val'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
if args.train_percent in {1, 10}:
train_dataset.samples = []
for fname in args.train_files:
fname = fname.decode().strip()
cls = fname.split('_')[0]
train_dataset.samples.append(
(traindir / cls / fname, train_dataset.class_to_idx[cls]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
kwargs = dict(batch_size=args.batch_size // args.world_size, num_workers=args.workers, pin_memory=True)
train_loader = torch.utils.data.DataLoader(train_dataset, sampler=train_sampler, **kwargs)
val_loader = torch.utils.data.DataLoader(val_dataset, **kwargs)
start_time = time.time()
for epoch in range(start_epoch, args.epochs):
# train
if args.weights == 'finetune':
model.train()
elif args.weights == 'freeze':
model.eval()
else:
assert False
train_sampler.set_epoch(epoch)
for step, (images, target) in enumerate(train_loader, start=epoch * len(train_loader)):
output = model(images.cuda(gpu, non_blocking=True))
loss = criterion(output, target.cuda(gpu, non_blocking=True))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % args.print_freq == 0:
torch.distributed.reduce(loss.div_(args.world_size), 0)
if args.rank == 0:
pg = optimizer.param_groups
lr_classifier = pg[0]['lr']
lr_backbone = pg[1]['lr'] if len(pg) == 2 else 0
stats = dict(epoch=epoch, step=step, lr_backbone=lr_backbone,
lr_classifier=lr_classifier, loss=loss.item(),
time=int(time.time() - start_time))
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
# evaluate
model.eval()
if args.rank == 0:
top1 = AverageMeter('Acc@1')
top5 = AverageMeter('Acc@5')
with torch.no_grad():
for images, target in val_loader:
output = model(images.cuda(gpu, non_blocking=True))
acc1, acc5 = accuracy(output, target.cuda(gpu, non_blocking=True), topk=(1, 5))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
best_acc.top1 = max(best_acc.top1, top1.avg)
best_acc.top5 = max(best_acc.top5, top5.avg)
stats = dict(epoch=epoch, acc1=top1.avg, acc5=top5.avg, best_acc1=best_acc.top1, best_acc5=best_acc.top5)
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
# sanity check
if args.weights == 'freeze':
reference_state_dict = torch.load(args.pretrained, map_location='cpu')
model_state_dict = model.module.state_dict()
for k in reference_state_dict:
assert torch.equal(model_state_dict[k].cpu(), reference_state_dict[k]), k
scheduler.step()
if args.rank == 0:
state = dict(
epoch=epoch + 1, best_acc=best_acc, model=model.state_dict(),
optimizer=optimizer.state_dict(), scheduler=scheduler.state_dict())
torch.save(state, args.checkpoint_dir / 'checkpoint.pth')
def handle_sigusr1(signum, frame):
os.system(f'scontrol requeue {os.getenv("SLURM_JOB_ID")}')
exit()
def handle_sigterm(signum, frame):
pass
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| barlowtwins-main | evaluate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchvision.models.resnet import resnet50 as _resnet50
dependencies = ['torch', 'torchvision']
def resnet50(pretrained=True, **kwargs):
model = _resnet50(pretrained=False, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/barlowtwins/ep1000_bs2048_lrw0.2_lrb0.0048_lambd0.0051/resnet50.pth'
state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model
| barlowtwins-main | hubconf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import argparse
import json
import math
import os
import random
import signal
import subprocess
import sys
import time
from PIL import Image, ImageOps, ImageFilter
from torch import nn, optim
import torch
import torchvision
import torchvision.transforms as transforms
parser = argparse.ArgumentParser(description='Barlow Twins Training')
parser.add_argument('data', type=Path, metavar='DIR',
help='path to dataset')
parser.add_argument('--workers', default=8, type=int, metavar='N',
help='number of data loader workers')
parser.add_argument('--epochs', default=1000, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch-size', default=2048, type=int, metavar='N',
help='mini-batch size')
parser.add_argument('--learning-rate-weights', default=0.2, type=float, metavar='LR',
help='base learning rate for weights')
parser.add_argument('--learning-rate-biases', default=0.0048, type=float, metavar='LR',
help='base learning rate for biases and batch norm parameters')
parser.add_argument('--weight-decay', default=1e-6, type=float, metavar='W',
help='weight decay')
parser.add_argument('--lambd', default=0.0051, type=float, metavar='L',
help='weight on off-diagonal terms')
parser.add_argument('--projector', default='8192-8192-8192', type=str,
metavar='MLP', help='projector MLP')
parser.add_argument('--print-freq', default=100, type=int, metavar='N',
help='print frequency')
parser.add_argument('--checkpoint-dir', default='./checkpoint/', type=Path,
metavar='DIR', help='path to checkpoint directory')
def main():
args = parser.parse_args()
args.ngpus_per_node = torch.cuda.device_count()
if 'SLURM_JOB_ID' in os.environ:
# single-node and multi-node distributed training on SLURM cluster
# requeue job on SLURM preemption
signal.signal(signal.SIGUSR1, handle_sigusr1)
signal.signal(signal.SIGTERM, handle_sigterm)
# find a common host name on all nodes
# assume scontrol returns hosts in the same order on all nodes
cmd = 'scontrol show hostnames ' + os.getenv('SLURM_JOB_NODELIST')
stdout = subprocess.check_output(cmd.split())
host_name = stdout.decode().splitlines()[0]
args.rank = int(os.getenv('SLURM_NODEID')) * args.ngpus_per_node
args.world_size = int(os.getenv('SLURM_NNODES')) * args.ngpus_per_node
args.dist_url = f'tcp://{host_name}:58472'
else:
# single-node distributed training
args.rank = 0
args.dist_url = 'tcp://localhost:58472'
args.world_size = args.ngpus_per_node
torch.multiprocessing.spawn(main_worker, (args,), args.ngpus_per_node)
def main_worker(gpu, args):
args.rank += gpu
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
args.checkpoint_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(args.checkpoint_dir / 'stats.txt', 'a', buffering=1)
print(' '.join(sys.argv))
print(' '.join(sys.argv), file=stats_file)
torch.cuda.set_device(gpu)
torch.backends.cudnn.benchmark = True
model = BarlowTwins(args).cuda(gpu)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
param_weights = []
param_biases = []
for param in model.parameters():
if param.ndim == 1:
param_biases.append(param)
else:
param_weights.append(param)
parameters = [{'params': param_weights}, {'params': param_biases}]
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
optimizer = LARS(parameters, lr=0, weight_decay=args.weight_decay,
weight_decay_filter=True,
lars_adaptation_filter=True)
# automatically resume from checkpoint if it exists
if (args.checkpoint_dir / 'checkpoint.pth').is_file():
ckpt = torch.load(args.checkpoint_dir / 'checkpoint.pth',
map_location='cpu')
start_epoch = ckpt['epoch']
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
else:
start_epoch = 0
dataset = torchvision.datasets.ImageFolder(args.data / 'train', Transform())
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
assert args.batch_size % args.world_size == 0
per_device_batch_size = args.batch_size // args.world_size
loader = torch.utils.data.DataLoader(
dataset, batch_size=per_device_batch_size, num_workers=args.workers,
pin_memory=True, sampler=sampler)
start_time = time.time()
scaler = torch.cuda.amp.GradScaler()
for epoch in range(start_epoch, args.epochs):
sampler.set_epoch(epoch)
for step, ((y1, y2), _) in enumerate(loader, start=epoch * len(loader)):
y1 = y1.cuda(gpu, non_blocking=True)
y2 = y2.cuda(gpu, non_blocking=True)
adjust_learning_rate(args, optimizer, loader, step)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
loss = model.forward(y1, y2)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if step % args.print_freq == 0:
if args.rank == 0:
stats = dict(epoch=epoch, step=step,
lr_weights=optimizer.param_groups[0]['lr'],
lr_biases=optimizer.param_groups[1]['lr'],
loss=loss.item(),
time=int(time.time() - start_time))
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
if args.rank == 0:
# save checkpoint
state = dict(epoch=epoch + 1, model=model.state_dict(),
optimizer=optimizer.state_dict())
torch.save(state, args.checkpoint_dir / 'checkpoint.pth')
if args.rank == 0:
# save final model
torch.save(model.module.backbone.state_dict(),
args.checkpoint_dir / 'resnet50.pth')
def adjust_learning_rate(args, optimizer, loader, step):
max_steps = args.epochs * len(loader)
warmup_steps = 10 * len(loader)
base_lr = args.batch_size / 256
if step < warmup_steps:
lr = base_lr * step / warmup_steps
else:
step -= warmup_steps
max_steps -= warmup_steps
q = 0.5 * (1 + math.cos(math.pi * step / max_steps))
end_lr = base_lr * 0.001
lr = base_lr * q + end_lr * (1 - q)
optimizer.param_groups[0]['lr'] = lr * args.learning_rate_weights
optimizer.param_groups[1]['lr'] = lr * args.learning_rate_biases
def handle_sigusr1(signum, frame):
os.system(f'scontrol requeue {os.getenv("SLURM_JOB_ID")}')
exit()
def handle_sigterm(signum, frame):
pass
def off_diagonal(x):
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
class BarlowTwins(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.backbone = torchvision.models.resnet50(zero_init_residual=True)
self.backbone.fc = nn.Identity()
# projector
sizes = [2048] + list(map(int, args.projector.split('-')))
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i + 1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.projector = nn.Sequential(*layers)
# normalization layer for the representations z1 and z2
self.bn = nn.BatchNorm1d(sizes[-1], affine=False)
def forward(self, y1, y2):
z1 = self.projector(self.backbone(y1))
z2 = self.projector(self.backbone(y2))
# empirical cross-correlation matrix
c = self.bn(z1).T @ self.bn(z2)
# sum the cross-correlation matrix between all gpus
c.div_(self.args.batch_size)
torch.distributed.all_reduce(c)
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = off_diagonal(c).pow_(2).sum()
loss = on_diag + self.args.lambd * off_diag
return loss
class LARS(optim.Optimizer):
def __init__(self, params, lr, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=False, lars_adaptation_filter=False):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
def exclude_bias_and_norm(self, p):
return p.ndim == 1
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if not g['weight_decay_filter'] or not self.exclude_bias_and_norm(p):
dp = dp.add(p, alpha=g['weight_decay'])
if not g['lars_adaptation_filter'] or not self.exclude_bias_and_norm(p):
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
class GaussianBlur(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
sigma = random.random() * 1.9 + 0.1
return img.filter(ImageFilter.GaussianBlur(sigma))
else:
return img
class Solarization(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
class Transform:
def __init__(self):
self.transform = transforms.Compose([
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=1.0),
Solarization(p=0.0),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.transform_prime = transforms.Compose([
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=0.1),
Solarization(p=0.2),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __call__(self, x):
y1 = self.transform(x)
y2 = self.transform_prime(x)
return y1, y2
if __name__ == '__main__':
main()
| barlowtwins-main | main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import datetime
import logging
import math
import time
import sys
from torch.distributed.distributed_c10d import reduce
from utils.ap_calculator import APCalculator
from utils.misc import SmoothedValue
from utils.dist import (
all_gather_dict,
all_reduce_average,
is_primary,
reduce_dict,
barrier,
)
def compute_learning_rate(args, curr_epoch_normalized):
assert curr_epoch_normalized <= 1.0 and curr_epoch_normalized >= 0.0
if (
curr_epoch_normalized <= (args.warm_lr_epochs / args.max_epoch)
and args.warm_lr_epochs > 0
):
# Linear Warmup
curr_lr = args.warm_lr + curr_epoch_normalized * args.max_epoch * (
(args.base_lr - args.warm_lr) / args.warm_lr_epochs
)
else:
# Cosine Learning Rate Schedule
curr_lr = args.final_lr + 0.5 * (args.base_lr - args.final_lr) * (
1 + math.cos(math.pi * curr_epoch_normalized)
)
return curr_lr
def adjust_learning_rate(args, optimizer, curr_epoch):
curr_lr = compute_learning_rate(args, curr_epoch)
for param_group in optimizer.param_groups:
param_group["lr"] = curr_lr
return curr_lr
def train_one_epoch(
args,
curr_epoch,
model,
optimizer,
criterion,
dataset_config,
dataset_loader,
logger,
):
ap_calculator = APCalculator(
dataset_config=dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=dataset_config.class2type,
exact_eval=False,
)
curr_iter = curr_epoch * len(dataset_loader)
max_iters = args.max_epoch * len(dataset_loader)
net_device = next(model.parameters()).device
time_delta = SmoothedValue(window_size=10)
loss_avg = SmoothedValue(window_size=10)
model.train()
barrier()
for batch_idx, batch_data_label in enumerate(dataset_loader):
curr_time = time.time()
curr_lr = adjust_learning_rate(args, optimizer, curr_iter / max_iters)
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].to(net_device)
# Forward pass
optimizer.zero_grad()
inputs = {
"point_clouds": batch_data_label["point_clouds"],
"point_cloud_dims_min": batch_data_label["point_cloud_dims_min"],
"point_cloud_dims_max": batch_data_label["point_cloud_dims_max"],
}
outputs = model(inputs)
# Compute loss
loss, loss_dict = criterion(outputs, batch_data_label)
loss_reduced = all_reduce_average(loss)
loss_dict_reduced = reduce_dict(loss_dict)
if not math.isfinite(loss_reduced.item()):
logging.info(f"Loss in not finite. Training will be stopped.")
sys.exit(1)
loss.backward()
if args.clip_gradient > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_gradient)
optimizer.step()
if curr_iter % args.log_metrics_every == 0:
# This step is slow. AP is computed approximately and locally during training.
# It will gather outputs and ground truth across all ranks.
# It is memory intensive as point_cloud ground truth is a large tensor.
# If GPU memory is not an issue, uncomment the following lines.
# outputs["outputs"] = all_gather_dict(outputs["outputs"])
# batch_data_label = all_gather_dict(batch_data_label)
ap_calculator.step_meter(outputs, batch_data_label)
time_delta.update(time.time() - curr_time)
loss_avg.update(loss_reduced.item())
# logging
if is_primary() and curr_iter % args.log_every == 0:
mem_mb = torch.cuda.max_memory_allocated() / (1024 ** 2)
eta_seconds = (max_iters - curr_iter) * time_delta.avg
eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
f"Epoch [{curr_epoch}/{args.max_epoch}]; Iter [{curr_iter}/{max_iters}]; Loss {loss_avg.avg:0.2f}; LR {curr_lr:0.2e}; Iter time {time_delta.avg:0.2f}; ETA {eta_str}; Mem {mem_mb:0.2f}MB"
)
logger.log_scalars(loss_dict_reduced, curr_iter, prefix="Train_details/")
train_dict = {}
train_dict["lr"] = curr_lr
train_dict["memory"] = mem_mb
train_dict["loss"] = loss_avg.avg
train_dict["batch_time"] = time_delta.avg
logger.log_scalars(train_dict, curr_iter, prefix="Train/")
curr_iter += 1
barrier()
return ap_calculator
@torch.no_grad()
def evaluate(
args,
curr_epoch,
model,
criterion,
dataset_config,
dataset_loader,
logger,
curr_train_iter,
):
# ap calculator is exact for evaluation. This is slower than the ap calculator used during training.
ap_calculator = APCalculator(
dataset_config=dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=dataset_config.class2type,
exact_eval=True,
)
curr_iter = 0
net_device = next(model.parameters()).device
num_batches = len(dataset_loader)
time_delta = SmoothedValue(window_size=10)
loss_avg = SmoothedValue(window_size=10)
model.eval()
barrier()
epoch_str = f"[{curr_epoch}/{args.max_epoch}]" if curr_epoch > 0 else ""
for batch_idx, batch_data_label in enumerate(dataset_loader):
curr_time = time.time()
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].to(net_device)
inputs = {
"point_clouds": batch_data_label["point_clouds"],
"point_cloud_dims_min": batch_data_label["point_cloud_dims_min"],
"point_cloud_dims_max": batch_data_label["point_cloud_dims_max"],
}
outputs = model(inputs)
# Compute loss
loss_str = ""
if criterion is not None:
loss, loss_dict = criterion(outputs, batch_data_label)
loss_reduced = all_reduce_average(loss)
loss_dict_reduced = reduce_dict(loss_dict)
loss_avg.update(loss_reduced.item())
loss_str = f"Loss {loss_avg.avg:0.2f};"
# Memory intensive as it gathers point cloud GT tensor across all ranks
outputs["outputs"] = all_gather_dict(outputs["outputs"])
batch_data_label = all_gather_dict(batch_data_label)
ap_calculator.step_meter(outputs, batch_data_label)
time_delta.update(time.time() - curr_time)
if is_primary() and curr_iter % args.log_every == 0:
mem_mb = torch.cuda.max_memory_allocated() / (1024 ** 2)
print(
f"Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; {loss_str} Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB"
)
test_dict = {}
test_dict["memory"] = mem_mb
test_dict["batch_time"] = time_delta.avg
if criterion is not None:
test_dict["loss"] = loss_avg.avg
curr_iter += 1
barrier()
if is_primary():
if criterion is not None:
logger.log_scalars(
loss_dict_reduced, curr_train_iter, prefix="Test_details/"
)
logger.log_scalars(test_dict, curr_train_iter, prefix="Test/")
return ap_calculator
| 3detr-main | engine.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
def build_optimizer(args, model):
params_with_decay = []
params_without_decay = []
for name, param in model.named_parameters():
if param.requires_grad is False:
continue
if args.filter_biases_wd and (len(param.shape) == 1 or name.endswith("bias")):
params_without_decay.append(param)
else:
params_with_decay.append(param)
if args.filter_biases_wd:
param_groups = [
{"params": params_without_decay, "weight_decay": 0.0},
{"params": params_with_decay, "weight_decay": args.weight_decay},
]
else:
param_groups = [
{"params": params_with_decay, "weight_decay": args.weight_decay},
]
optimizer = torch.optim.AdamW(param_groups, lr=args.base_lr)
return optimizer
| 3detr-main | optimizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from utils.box_util import generalized_box3d_iou
from utils.dist import all_reduce_average
from utils.misc import huber_loss
from scipy.optimize import linear_sum_assignment
class Matcher(nn.Module):
def __init__(self, cost_class, cost_objectness, cost_giou, cost_center):
"""
Parameters:
cost_class:
Returns:
"""
super().__init__()
self.cost_class = cost_class
self.cost_objectness = cost_objectness
self.cost_giou = cost_giou
self.cost_center = cost_center
@torch.no_grad()
def forward(self, outputs, targets):
batchsize = outputs["sem_cls_prob"].shape[0]
nqueries = outputs["sem_cls_prob"].shape[1]
ngt = targets["gt_box_sem_cls_label"].shape[1]
nactual_gt = targets["nactual_gt"]
# classification cost: batch x nqueries x ngt matrix
pred_cls_prob = outputs["sem_cls_prob"]
gt_box_sem_cls_labels = (
targets["gt_box_sem_cls_label"]
.unsqueeze(1)
.expand(batchsize, nqueries, ngt)
)
class_mat = -torch.gather(pred_cls_prob, 2, gt_box_sem_cls_labels)
# objectness cost: batch x nqueries x 1
objectness_mat = -outputs["objectness_prob"].unsqueeze(-1)
# center cost: batch x nqueries x ngt
center_mat = outputs["center_dist"].detach()
# giou cost: batch x nqueries x ngt
giou_mat = -outputs["gious"].detach()
final_cost = (
self.cost_class * class_mat
+ self.cost_objectness * objectness_mat
+ self.cost_center * center_mat
+ self.cost_giou * giou_mat
)
final_cost = final_cost.detach().cpu().numpy()
assignments = []
# auxiliary variables useful for batched loss computation
batch_size, nprop = final_cost.shape[0], final_cost.shape[1]
per_prop_gt_inds = torch.zeros(
[batch_size, nprop], dtype=torch.int64, device=pred_cls_prob.device
)
proposal_matched_mask = torch.zeros(
[batch_size, nprop], dtype=torch.float32, device=pred_cls_prob.device
)
for b in range(batchsize):
assign = []
if nactual_gt[b] > 0:
assign = linear_sum_assignment(final_cost[b, :, : nactual_gt[b]])
assign = [
torch.from_numpy(x).long().to(device=pred_cls_prob.device)
for x in assign
]
per_prop_gt_inds[b, assign[0]] = assign[1]
proposal_matched_mask[b, assign[0]] = 1
assignments.append(assign)
return {
"assignments": assignments,
"per_prop_gt_inds": per_prop_gt_inds,
"proposal_matched_mask": proposal_matched_mask,
}
class SetCriterion(nn.Module):
def __init__(self, matcher, dataset_config, loss_weight_dict):
super().__init__()
self.dataset_config = dataset_config
self.matcher = matcher
self.loss_weight_dict = loss_weight_dict
semcls_percls_weights = torch.ones(dataset_config.num_semcls + 1)
semcls_percls_weights[-1] = loss_weight_dict["loss_no_object_weight"]
del loss_weight_dict["loss_no_object_weight"]
self.register_buffer("semcls_percls_weights", semcls_percls_weights)
self.loss_functions = {
"loss_sem_cls": self.loss_sem_cls,
"loss_angle": self.loss_angle,
"loss_center": self.loss_center,
"loss_size": self.loss_size,
"loss_giou": self.loss_giou,
# this isn't used during training and is logged for debugging.
# thus, this loss does not have a loss_weight associated with it.
"loss_cardinality": self.loss_cardinality,
}
@torch.no_grad()
def loss_cardinality(self, outputs, targets, assignments):
# Count the number of predictions that are objects
# Cardinality is the error between predicted #objects and ground truth objects
pred_logits = outputs["sem_cls_logits"]
# Count the number of predictions that are NOT "no-object" (which is the last class)
pred_objects = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(pred_objects.float(), targets["nactual_gt"])
return {"loss_cardinality": card_err}
def loss_sem_cls(self, outputs, targets, assignments):
# # Not vectorized version
# pred_logits = outputs["sem_cls_logits"]
# assign = assignments["assignments"]
# sem_cls_targets = torch.ones((pred_logits.shape[0], pred_logits.shape[1]),
# dtype=torch.int64, device=pred_logits.device)
# # initialize to background/no-object class
# sem_cls_targets *= (pred_logits.shape[-1] - 1)
# # use assignments to compute labels for matched boxes
# for b in range(pred_logits.shape[0]):
# if len(assign[b]) > 0:
# sem_cls_targets[b, assign[b][0]] = targets["gt_box_sem_cls_label"][b, assign[b][1]]
# sem_cls_targets = sem_cls_targets.view(-1)
# pred_logits = pred_logits.reshape(sem_cls_targets.shape[0], -1)
# loss = F.cross_entropy(pred_logits, sem_cls_targets, self.semcls_percls_weights, reduction="mean")
pred_logits = outputs["sem_cls_logits"]
gt_box_label = torch.gather(
targets["gt_box_sem_cls_label"], 1, assignments["per_prop_gt_inds"]
)
gt_box_label[assignments["proposal_matched_mask"].int() == 0] = (
pred_logits.shape[-1] - 1
)
loss = F.cross_entropy(
pred_logits.transpose(2, 1),
gt_box_label,
self.semcls_percls_weights,
reduction="mean",
)
return {"loss_sem_cls": loss}
def loss_angle(self, outputs, targets, assignments):
angle_logits = outputs["angle_logits"]
angle_residual = outputs["angle_residual_normalized"]
if targets["num_boxes_replica"] > 0:
gt_angle_label = targets["gt_angle_class_label"]
gt_angle_residual = targets["gt_angle_residual_label"]
gt_angle_residual_normalized = gt_angle_residual / (
np.pi / self.dataset_config.num_angle_bin
)
# # Non vectorized version
# assignments = assignments["assignments"]
# p_angle_logits = []
# p_angle_resid = []
# t_angle_labels = []
# t_angle_resid = []
# for b in range(angle_logits.shape[0]):
# if len(assignments[b]) > 0:
# p_angle_logits.append(angle_logits[b, assignments[b][0]])
# p_angle_resid.append(angle_residual[b, assignments[b][0], gt_angle_label[b][assignments[b][1]]])
# t_angle_labels.append(gt_angle_label[b, assignments[b][1]])
# t_angle_resid.append(gt_angle_residual_normalized[b, assignments[b][1]])
# p_angle_logits = torch.cat(p_angle_logits)
# p_angle_resid = torch.cat(p_angle_resid)
# t_angle_labels = torch.cat(t_angle_labels)
# t_angle_resid = torch.cat(t_angle_resid)
# angle_cls_loss = F.cross_entropy(p_angle_logits, t_angle_labels, reduction="sum")
# angle_reg_loss = huber_loss(p_angle_resid.flatten() - t_angle_resid.flatten()).sum()
gt_angle_label = torch.gather(
gt_angle_label, 1, assignments["per_prop_gt_inds"]
)
angle_cls_loss = F.cross_entropy(
angle_logits.transpose(2, 1), gt_angle_label, reduction="none"
)
angle_cls_loss = (
angle_cls_loss * assignments["proposal_matched_mask"]
).sum()
gt_angle_residual_normalized = torch.gather(
gt_angle_residual_normalized, 1, assignments["per_prop_gt_inds"]
)
gt_angle_label_one_hot = torch.zeros_like(
angle_residual, dtype=torch.float32
)
gt_angle_label_one_hot.scatter_(2, gt_angle_label.unsqueeze(-1), 1)
angle_residual_for_gt_class = torch.sum(
angle_residual * gt_angle_label_one_hot, -1
)
angle_reg_loss = huber_loss(
angle_residual_for_gt_class - gt_angle_residual_normalized, delta=1.0
)
angle_reg_loss = (
angle_reg_loss * assignments["proposal_matched_mask"]
).sum()
angle_cls_loss /= targets["num_boxes"]
angle_reg_loss /= targets["num_boxes"]
else:
angle_cls_loss = torch.zeros(1, device=angle_logits.device).squeeze()
angle_reg_loss = torch.zeros(1, device=angle_logits.device).squeeze()
return {"loss_angle_cls": angle_cls_loss, "loss_angle_reg": angle_reg_loss}
def loss_center(self, outputs, targets, assignments):
center_dist = outputs["center_dist"]
if targets["num_boxes_replica"] > 0:
# # Non vectorized version
# assign = assignments["assignments"]
# center_loss = torch.zeros(1, device=center_dist.device).squeeze()
# for b in range(center_dist.shape[0]):
# if len(assign[b]) > 0:
# center_loss += center_dist[b, assign[b][0], assign[b][1]].sum()
# select appropriate distances by using proposal to gt matching
center_loss = torch.gather(
center_dist, 2, assignments["per_prop_gt_inds"].unsqueeze(-1)
).squeeze(-1)
# zero-out non-matched proposals
center_loss = center_loss * assignments["proposal_matched_mask"]
center_loss = center_loss.sum()
if targets["num_boxes"] > 0:
center_loss /= targets["num_boxes"]
else:
center_loss = torch.zeros(1, device=center_dist.device).squeeze()
return {"loss_center": center_loss}
def loss_giou(self, outputs, targets, assignments):
gious_dist = 1 - outputs["gious"]
# # Non vectorized version
# giou_loss = torch.zeros(1, device=gious_dist.device).squeeze()
# assign = assignments["assignments"]
# for b in range(gious_dist.shape[0]):
# if len(assign[b]) > 0:
# giou_loss += gious_dist[b, assign[b][0], assign[b][1]].sum()
# select appropriate gious by using proposal to gt matching
giou_loss = torch.gather(
gious_dist, 2, assignments["per_prop_gt_inds"].unsqueeze(-1)
).squeeze(-1)
# zero-out non-matched proposals
giou_loss = giou_loss * assignments["proposal_matched_mask"]
giou_loss = giou_loss.sum()
if targets["num_boxes"] > 0:
giou_loss /= targets["num_boxes"]
return {"loss_giou": giou_loss}
def loss_size(self, outputs, targets, assignments):
gt_box_sizes = targets["gt_box_sizes_normalized"]
pred_box_sizes = outputs["size_normalized"]
if targets["num_boxes_replica"] > 0:
# # Non vectorized version
# p_sizes = []
# t_sizes = []
# assign = assignments["assignments"]
# for b in range(pred_box_sizes.shape[0]):
# if len(assign[b]) > 0:
# p_sizes.append(pred_box_sizes[b, assign[b][0]])
# t_sizes.append(gt_box_sizes[b, assign[b][1]])
# p_sizes = torch.cat(p_sizes)
# t_sizes = torch.cat(t_sizes)
# size_loss = F.l1_loss(p_sizes, t_sizes, reduction="sum")
# construct gt_box_sizes as [batch x nprop x 3] matrix by using proposal to gt matching
gt_box_sizes = torch.stack(
[
torch.gather(
gt_box_sizes[:, :, x], 1, assignments["per_prop_gt_inds"]
)
for x in range(gt_box_sizes.shape[-1])
],
dim=-1,
)
size_loss = F.l1_loss(pred_box_sizes, gt_box_sizes, reduction="none").sum(
dim=-1
)
# zero-out non-matched proposals
size_loss *= assignments["proposal_matched_mask"]
size_loss = size_loss.sum()
size_loss /= targets["num_boxes"]
else:
size_loss = torch.zeros(1, device=pred_box_sizes.device).squeeze()
return {"loss_size": size_loss}
def single_output_forward(self, outputs, targets):
gious = generalized_box3d_iou(
outputs["box_corners"],
targets["gt_box_corners"],
targets["nactual_gt"],
rotated_boxes=torch.any(targets["gt_box_angles"] > 0).item(),
needs_grad=(self.loss_weight_dict["loss_giou_weight"] > 0),
)
outputs["gious"] = gious
center_dist = torch.cdist(
outputs["center_normalized"], targets["gt_box_centers_normalized"], p=1
)
outputs["center_dist"] = center_dist
assignments = self.matcher(outputs, targets)
losses = {}
for k in self.loss_functions:
loss_wt_key = k + "_weight"
if (
loss_wt_key in self.loss_weight_dict
and self.loss_weight_dict[loss_wt_key] > 0
) or loss_wt_key not in self.loss_weight_dict:
# only compute losses with loss_wt > 0
# certain losses like cardinality are only logged and have no loss weight
curr_loss = self.loss_functions[k](outputs, targets, assignments)
losses.update(curr_loss)
final_loss = 0
for k in self.loss_weight_dict:
if self.loss_weight_dict[k] > 0:
losses[k.replace("_weight", "")] *= self.loss_weight_dict[k]
final_loss += losses[k.replace("_weight", "")]
return final_loss, losses
def forward(self, outputs, targets):
nactual_gt = targets["gt_box_present"].sum(axis=1).long()
num_boxes = torch.clamp(all_reduce_average(nactual_gt.sum()), min=1).item()
targets["nactual_gt"] = nactual_gt
targets["num_boxes"] = num_boxes
targets[
"num_boxes_replica"
] = nactual_gt.sum().item() # number of boxes on this worker for dist training
loss, loss_dict = self.single_output_forward(outputs["outputs"], targets)
if "aux_outputs" in outputs:
for k in range(len(outputs["aux_outputs"])):
interm_loss, interm_loss_dict = self.single_output_forward(
outputs["aux_outputs"][k], targets
)
loss += interm_loss
for interm_key in interm_loss_dict:
loss_dict[f"{interm_key}_{k}"] = interm_loss_dict[interm_key]
return loss, loss_dict
def build_criterion(args, dataset_config):
matcher = Matcher(
cost_class=args.matcher_cls_cost,
cost_giou=args.matcher_giou_cost,
cost_center=args.matcher_center_cost,
cost_objectness=args.matcher_objectness_cost,
)
loss_weight_dict = {
"loss_giou_weight": args.loss_giou_weight,
"loss_sem_cls_weight": args.loss_sem_cls_weight,
"loss_no_object_weight": args.loss_no_object_weight,
"loss_angle_cls_weight": args.loss_angle_cls_weight,
"loss_angle_reg_weight": args.loss_angle_reg_weight,
"loss_center_weight": args.loss_center_weight,
"loss_size_weight": args.loss_size_weight,
}
criterion = SetCriterion(matcher, dataset_config, loss_weight_dict)
return criterion
| 3detr-main | criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import sys
import pickle
import numpy as np
import torch
from torch.multiprocessing import set_start_method
from torch.utils.data import DataLoader, DistributedSampler
# 3DETR codebase specific imports
from datasets import build_dataset
from engine import evaluate, train_one_epoch
from models import build_model
from optimizer import build_optimizer
from criterion import build_criterion
from utils.dist import init_distributed, is_distributed, is_primary, get_rank, barrier
from utils.misc import my_worker_init_fn
from utils.io import save_checkpoint, resume_if_possible
from utils.logger import Logger
def make_args_parser():
parser = argparse.ArgumentParser("3D Detection Using Transformers", add_help=False)
##### Optimizer #####
parser.add_argument("--base_lr", default=5e-4, type=float)
parser.add_argument("--warm_lr", default=1e-6, type=float)
parser.add_argument("--warm_lr_epochs", default=9, type=int)
parser.add_argument("--final_lr", default=1e-6, type=float)
parser.add_argument("--lr_scheduler", default="cosine", type=str)
parser.add_argument("--weight_decay", default=0.1, type=float)
parser.add_argument("--filter_biases_wd", default=False, action="store_true")
parser.add_argument(
"--clip_gradient", default=0.1, type=float, help="Max L2 norm of the gradient"
)
##### Model #####
parser.add_argument(
"--model_name",
default="3detr",
type=str,
help="Name of the model",
choices=["3detr"],
)
### Encoder
parser.add_argument(
"--enc_type", default="vanilla", choices=["masked", "maskedv2", "vanilla"]
)
# Below options are only valid for vanilla encoder
parser.add_argument("--enc_nlayers", default=3, type=int)
parser.add_argument("--enc_dim", default=256, type=int)
parser.add_argument("--enc_ffn_dim", default=128, type=int)
parser.add_argument("--enc_dropout", default=0.1, type=float)
parser.add_argument("--enc_nhead", default=4, type=int)
parser.add_argument("--enc_pos_embed", default=None, type=str)
parser.add_argument("--enc_activation", default="relu", type=str)
### Decoder
parser.add_argument("--dec_nlayers", default=8, type=int)
parser.add_argument("--dec_dim", default=256, type=int)
parser.add_argument("--dec_ffn_dim", default=256, type=int)
parser.add_argument("--dec_dropout", default=0.1, type=float)
parser.add_argument("--dec_nhead", default=4, type=int)
### MLP heads for predicting bounding boxes
parser.add_argument("--mlp_dropout", default=0.3, type=float)
parser.add_argument(
"--nsemcls",
default=-1,
type=int,
help="Number of semantic object classes. Can be inferred from dataset",
)
### Other model params
parser.add_argument("--preenc_npoints", default=2048, type=int)
parser.add_argument(
"--pos_embed", default="fourier", type=str, choices=["fourier", "sine"]
)
parser.add_argument("--nqueries", default=256, type=int)
parser.add_argument("--use_color", default=False, action="store_true")
##### Set Loss #####
### Matcher
parser.add_argument("--matcher_giou_cost", default=2, type=float)
parser.add_argument("--matcher_cls_cost", default=1, type=float)
parser.add_argument("--matcher_center_cost", default=0, type=float)
parser.add_argument("--matcher_objectness_cost", default=0, type=float)
### Loss Weights
parser.add_argument("--loss_giou_weight", default=0, type=float)
parser.add_argument("--loss_sem_cls_weight", default=1, type=float)
parser.add_argument(
"--loss_no_object_weight", default=0.2, type=float
) # "no object" or "background" class for detection
parser.add_argument("--loss_angle_cls_weight", default=0.1, type=float)
parser.add_argument("--loss_angle_reg_weight", default=0.5, type=float)
parser.add_argument("--loss_center_weight", default=5.0, type=float)
parser.add_argument("--loss_size_weight", default=1.0, type=float)
##### Dataset #####
parser.add_argument(
"--dataset_name", required=True, type=str, choices=["scannet", "sunrgbd"]
)
parser.add_argument(
"--dataset_root_dir",
type=str,
default=None,
help="Root directory containing the dataset files. \
If None, default values from scannet.py/sunrgbd.py are used",
)
parser.add_argument(
"--meta_data_dir",
type=str,
default=None,
help="Root directory containing the metadata files. \
If None, default values from scannet.py/sunrgbd.py are used",
)
parser.add_argument("--dataset_num_workers", default=4, type=int)
parser.add_argument("--batchsize_per_gpu", default=8, type=int)
##### Training #####
parser.add_argument("--start_epoch", default=-1, type=int)
parser.add_argument("--max_epoch", default=720, type=int)
parser.add_argument("--eval_every_epoch", default=10, type=int)
parser.add_argument("--seed", default=0, type=int)
##### Testing #####
parser.add_argument("--test_only", default=False, action="store_true")
parser.add_argument("--test_ckpt", default=None, type=str)
##### I/O #####
parser.add_argument("--checkpoint_dir", default=None, type=str)
parser.add_argument("--log_every", default=10, type=int)
parser.add_argument("--log_metrics_every", default=20, type=int)
parser.add_argument("--save_separate_checkpoint_every_epoch", default=100, type=int)
##### Distributed Training #####
parser.add_argument("--ngpus", default=1, type=int)
parser.add_argument("--dist_url", default="tcp://localhost:12345", type=str)
return parser
def do_train(
args,
model,
model_no_ddp,
optimizer,
criterion,
dataset_config,
dataloaders,
best_val_metrics,
):
"""
Main training loop.
This trains the model for `args.max_epoch` epochs and tests the model after every `args.eval_every_epoch`.
We always evaluate the final checkpoint and report both the final AP and best AP on the val set.
"""
num_iters_per_epoch = len(dataloaders["train"])
num_iters_per_eval_epoch = len(dataloaders["test"])
print(f"Model is {model}")
print(f"Training started at epoch {args.start_epoch} until {args.max_epoch}.")
print(f"One training epoch = {num_iters_per_epoch} iters.")
print(f"One eval epoch = {num_iters_per_eval_epoch} iters.")
final_eval = os.path.join(args.checkpoint_dir, "final_eval.txt")
final_eval_pkl = os.path.join(args.checkpoint_dir, "final_eval.pkl")
if os.path.isfile(final_eval):
print(f"Found final eval file {final_eval}. Skipping training.")
return
logger = Logger(args.checkpoint_dir)
for epoch in range(args.start_epoch, args.max_epoch):
if is_distributed():
dataloaders["train_sampler"].set_epoch(epoch)
aps = train_one_epoch(
args,
epoch,
model,
optimizer,
criterion,
dataset_config,
dataloaders["train"],
logger,
)
# latest checkpoint is always stored in checkpoint.pth
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename="checkpoint.pth",
)
metrics = aps.compute_metrics()
metric_str = aps.metrics_to_str(metrics, per_class=False)
metrics_dict = aps.metrics_to_dict(metrics)
curr_iter = epoch * len(dataloaders["train"])
if is_primary():
print("==" * 10)
print(f"Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}")
print("==" * 10)
logger.log_scalars(metrics_dict, curr_iter, prefix="Train/")
if (
epoch > 0
and args.save_separate_checkpoint_every_epoch > 0
and epoch % args.save_separate_checkpoint_every_epoch == 0
):
# separate checkpoints are stored as checkpoint_{epoch}.pth
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
)
if epoch % args.eval_every_epoch == 0 or epoch == (args.max_epoch - 1):
ap_calculator = evaluate(
args,
epoch,
model,
criterion,
dataset_config,
dataloaders["test"],
logger,
curr_iter,
)
metrics = ap_calculator.compute_metrics()
ap25 = metrics[0.25]["mAP"]
metric_str = ap_calculator.metrics_to_str(metrics, per_class=True)
metrics_dict = ap_calculator.metrics_to_dict(metrics)
if is_primary():
print("==" * 10)
print(f"Evaluate Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}")
print("==" * 10)
logger.log_scalars(metrics_dict, curr_iter, prefix="Test/")
if is_primary() and (
len(best_val_metrics) == 0 or best_val_metrics[0.25]["mAP"] < ap25
):
best_val_metrics = metrics
filename = "checkpoint_best.pth"
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename=filename,
)
print(
f"Epoch [{epoch}/{args.max_epoch}] saved current best val checkpoint at {filename}; ap25 {ap25}"
)
# always evaluate last checkpoint
epoch = args.max_epoch - 1
curr_iter = epoch * len(dataloaders["train"])
ap_calculator = evaluate(
args,
epoch,
model,
criterion,
dataset_config,
dataloaders["test"],
logger,
curr_iter,
)
metrics = ap_calculator.compute_metrics()
metric_str = ap_calculator.metrics_to_str(metrics)
if is_primary():
print("==" * 10)
print(f"Evaluate Final [{epoch}/{args.max_epoch}]; Metrics {metric_str}")
print("==" * 10)
with open(final_eval, "w") as fh:
fh.write("Training Finished.\n")
fh.write("==" * 10)
fh.write("Final Eval Numbers.\n")
fh.write(metric_str)
fh.write("\n")
fh.write("==" * 10)
fh.write("Best Eval Numbers.\n")
fh.write(ap_calculator.metrics_to_str(best_val_metrics))
fh.write("\n")
with open(final_eval_pkl, "wb") as fh:
pickle.dump(metrics, fh)
def test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders):
if args.test_ckpt is None or not os.path.isfile(args.test_ckpt):
f"Please specify a test checkpoint using --test_ckpt. Found invalid value {args.test_ckpt}"
sys.exit(1)
sd = torch.load(args.test_ckpt, map_location=torch.device("cpu"))
model_no_ddp.load_state_dict(sd["model"])
logger = Logger()
criterion = None # do not compute loss for speed-up; Comment out to see test loss
epoch = -1
curr_iter = 0
ap_calculator = evaluate(
args,
epoch,
model,
criterion,
dataset_config,
dataloaders["test"],
logger,
curr_iter,
)
metrics = ap_calculator.compute_metrics()
metric_str = ap_calculator.metrics_to_str(metrics)
if is_primary():
print("==" * 10)
print(f"Test model; Metrics {metric_str}")
print("==" * 10)
def main(local_rank, args):
if args.ngpus > 1:
print(
"Initializing Distributed Training. This is in BETA mode and hasn't been tested thoroughly. Use at your own risk :)"
)
print("To get the maximum speed-up consider reducing evaluations on val set by setting --eval_every_epoch to greater than 50")
init_distributed(
local_rank,
global_rank=local_rank,
world_size=args.ngpus,
dist_url=args.dist_url,
dist_backend="nccl",
)
print(f"Called with args: {args}")
torch.cuda.set_device(local_rank)
np.random.seed(args.seed + get_rank())
torch.manual_seed(args.seed + get_rank())
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed + get_rank())
datasets, dataset_config = build_dataset(args)
model, _ = build_model(args, dataset_config)
model = model.cuda(local_rank)
model_no_ddp = model
if is_distributed():
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank]
)
criterion = build_criterion(args, dataset_config)
criterion = criterion.cuda(local_rank)
dataloaders = {}
if args.test_only:
dataset_splits = ["test"]
else:
dataset_splits = ["train", "test"]
for split in dataset_splits:
if split == "train":
shuffle = True
else:
shuffle = False
if is_distributed():
sampler = DistributedSampler(datasets[split], shuffle=shuffle)
elif shuffle:
sampler = torch.utils.data.RandomSampler(datasets[split])
else:
sampler = torch.utils.data.SequentialSampler(datasets[split])
dataloaders[split] = DataLoader(
datasets[split],
sampler=sampler,
batch_size=args.batchsize_per_gpu,
num_workers=args.dataset_num_workers,
worker_init_fn=my_worker_init_fn,
)
dataloaders[split + "_sampler"] = sampler
if args.test_only:
criterion = None # faster evaluation
test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders)
else:
assert (
args.checkpoint_dir is not None
), f"Please specify a checkpoint dir using --checkpoint_dir"
if is_primary() and not os.path.isdir(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir, exist_ok=True)
optimizer = build_optimizer(args, model_no_ddp)
loaded_epoch, best_val_metrics = resume_if_possible(
args.checkpoint_dir, model_no_ddp, optimizer
)
args.start_epoch = loaded_epoch + 1
do_train(
args,
model,
model_no_ddp,
optimizer,
criterion,
dataset_config,
dataloaders,
best_val_metrics,
)
def launch_distributed(args):
world_size = args.ngpus
if world_size == 1:
main(local_rank=0, args=args)
else:
torch.multiprocessing.spawn(main, nprocs=world_size, args=(args,))
if __name__ == "__main__":
parser = make_args_parser()
args = parser.parse_args()
try:
set_start_method("spawn")
except RuntimeError:
pass
launch_distributed(args)
| 3detr-main | main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modified from https://github.com/facebookresearch/votenet
Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are *half length* of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: Charles R. Qi
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
import utils.pc_util as pc_util
from utils.random_cuboid import RandomCuboid
from utils.pc_util import shift_scale_points, scale_points
from utils.box_util import (
flip_axis_to_camera_tensor,
get_3d_box_batch_tensor,
flip_axis_to_camera_np,
get_3d_box_batch_np,
)
MEAN_COLOR_RGB = np.array([0.5, 0.5, 0.5]) # sunrgbd color is in 0~1
DATA_PATH_V1 = "" ## Replace with path to dataset
DATA_PATH_V2 = "" ## Not used in the codebase.
class SunrgbdDatasetConfig(object):
def __init__(self):
self.num_semcls = 10
self.num_angle_bin = 12
self.max_num_obj = 64
self.type2class = {
"bed": 0,
"table": 1,
"sofa": 2,
"chair": 3,
"toilet": 4,
"desk": 5,
"dresser": 6,
"night_stand": 7,
"bookshelf": 8,
"bathtub": 9,
}
self.class2type = {self.type2class[t]: t for t in self.type2class}
self.type2onehotclass = {
"bed": 0,
"table": 1,
"sofa": 2,
"chair": 3,
"toilet": 4,
"desk": 5,
"dresser": 6,
"night_stand": 7,
"bookshelf": 8,
"bathtub": 9,
}
def angle2class(self, angle):
"""Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
returns class [0,1,...,N-1] and a residual number such that
class*(2pi/N) + number = angle
"""
num_class = self.num_angle_bin
angle = angle % (2 * np.pi)
assert angle >= 0 and angle <= 2 * np.pi
angle_per_class = 2 * np.pi / float(num_class)
shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)
class_id = int(shifted_angle / angle_per_class)
residual_angle = shifted_angle - (
class_id * angle_per_class + angle_per_class / 2
)
return class_id, residual_angle
def class2angle(self, pred_cls, residual, to_label_format=True):
"""Inverse function to angle2class"""
num_class = self.num_angle_bin
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle > np.pi:
angle = angle - 2 * np.pi
return angle
def class2angle_batch(self, pred_cls, residual, to_label_format=True):
num_class = self.num_angle_bin
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format:
mask = angle > np.pi
angle[mask] = angle[mask] - 2 * np.pi
return angle
def class2anglebatch_tensor(self, pred_cls, residual, to_label_format=True):
return self.class2angle_batch(pred_cls, residual, to_label_format)
def box_parametrization_to_corners(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_tensor(box_center_unnorm)
boxes = get_3d_box_batch_tensor(box_size, box_angle, box_center_upright)
return boxes
def box_parametrization_to_corners_np(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_np(box_center_unnorm)
boxes = get_3d_box_batch_np(box_size, box_angle, box_center_upright)
return boxes
def my_compute_box_3d(self, center, size, heading_angle):
R = pc_util.rotz(-1 * heading_angle)
l, w, h = size
x_corners = [-l, l, l, -l, -l, l, l, -l]
y_corners = [w, w, -w, -w, w, w, -w, -w]
z_corners = [h, h, h, h, -h, -h, -h, -h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
class SunrgbdDetectionDataset(Dataset):
def __init__(
self,
dataset_config,
split_set="train",
root_dir=None,
num_points=20000,
use_color=False,
use_height=False,
use_v1=True,
augment=False,
use_random_cuboid=True,
random_cuboid_min_points=30000,
):
assert num_points <= 50000
assert split_set in ["train", "val", "trainval"]
self.dataset_config = dataset_config
self.use_v1 = use_v1
if root_dir is None:
root_dir = DATA_PATH_V1 if use_v1 else DATA_PATH_V2
self.data_path = root_dir + "_%s" % (split_set)
if split_set in ["train", "val"]:
self.scan_names = sorted(
list(
set([os.path.basename(x)[0:6] for x in os.listdir(self.data_path)])
)
)
elif split_set in ["trainval"]:
# combine names from both
sub_splits = ["train", "val"]
all_paths = []
for sub_split in sub_splits:
data_path = self.data_path.replace("trainval", sub_split)
basenames = sorted(
list(set([os.path.basename(x)[0:6] for x in os.listdir(data_path)]))
)
basenames = [os.path.join(data_path, x) for x in basenames]
all_paths.extend(basenames)
all_paths.sort()
self.scan_names = all_paths
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
self.use_random_cuboid = use_random_cuboid
self.random_cuboid_augmentor = RandomCuboid(
min_points=random_cuboid_min_points,
aspect=0.75,
min_crop=0.75,
max_crop=1.0,
)
self.center_normalizing_range = [
np.zeros((1, 3), dtype=np.float32),
np.ones((1, 3), dtype=np.float32),
]
self.max_num_obj = 64
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
scan_name = self.scan_names[idx]
if scan_name.startswith("/"):
scan_path = scan_name
else:
scan_path = os.path.join(self.data_path, scan_name)
point_cloud = np.load(scan_path + "_pc.npz")["pc"] # Nx6
bboxes = np.load(scan_path + "_bbox.npy") # K,8
if not self.use_color:
point_cloud = point_cloud[:, 0:3]
else:
assert point_cloud.shape[1] == 6
point_cloud = point_cloud[:, 0:6]
point_cloud[:, 3:] = point_cloud[:, 3:] - MEAN_COLOR_RGB
if self.use_height:
floor_height = np.percentile(point_cloud[:, 2], 0.99)
height = point_cloud[:, 2] - floor_height
point_cloud = np.concatenate(
[point_cloud, np.expand_dims(height, 1)], 1
) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:, 0] = -1 * point_cloud[:, 0]
bboxes[:, 0] = -1 * bboxes[:, 0]
bboxes[:, 6] = np.pi - bboxes[:, 6]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random() * np.pi / 3) - np.pi / 6 # -30 ~ +30 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], np.transpose(rot_mat))
bboxes[:, 0:3] = np.dot(bboxes[:, 0:3], np.transpose(rot_mat))
bboxes[:, 6] -= rot_angle
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:, 3:6] + MEAN_COLOR_RGB
rgb_color *= (
1 + 0.4 * np.random.random(3) - 0.2
) # brightness change for each channel
rgb_color += (
0.1 * np.random.random(3) - 0.05
) # color shift for each channel
rgb_color += np.expand_dims(
(0.05 * np.random.random(point_cloud.shape[0]) - 0.025), -1
) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(
np.random.random(point_cloud.shape[0]) > 0.3, -1
)
point_cloud[:, 3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random() * 0.3 + 0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio, 3), 0)
point_cloud[:, 0:3] *= scale_ratio
bboxes[:, 0:3] *= scale_ratio
bboxes[:, 3:6] *= scale_ratio
if self.use_height:
point_cloud[:, -1] *= scale_ratio[0, 0]
if self.use_random_cuboid:
point_cloud, bboxes, _ = self.random_cuboid_augmentor(
point_cloud, bboxes
)
# ------------------------------- LABELS ------------------------------
angle_classes = np.zeros((self.max_num_obj,), dtype=np.float32)
angle_residuals = np.zeros((self.max_num_obj,), dtype=np.float32)
raw_angles = np.zeros((self.max_num_obj,), dtype=np.float32)
raw_sizes = np.zeros((self.max_num_obj, 3), dtype=np.float32)
label_mask = np.zeros((self.max_num_obj))
label_mask[0 : bboxes.shape[0]] = 1
max_bboxes = np.zeros((self.max_num_obj, 8))
max_bboxes[0 : bboxes.shape[0], :] = bboxes
target_bboxes_mask = label_mask
target_bboxes = np.zeros((self.max_num_obj, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
raw_angles[i] = bbox[6] % 2 * np.pi
box3d_size = bbox[3:6] * 2
raw_sizes[i, :] = box3d_size
angle_class, angle_residual = self.dataset_config.angle2class(bbox[6])
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
corners_3d = self.dataset_config.my_compute_box_3d(
bbox[0:3], bbox[3:6], bbox[6]
)
# compute axis aligned box
xmin = np.min(corners_3d[:, 0])
ymin = np.min(corners_3d[:, 1])
zmin = np.min(corners_3d[:, 2])
xmax = np.max(corners_3d[:, 0])
ymax = np.max(corners_3d[:, 1])
zmax = np.max(corners_3d[:, 2])
target_bbox = np.array(
[
(xmin + xmax) / 2,
(ymin + ymax) / 2,
(zmin + zmax) / 2,
xmax - xmin,
ymax - ymin,
zmax - zmin,
]
)
target_bboxes[i, :] = target_bbox
point_cloud, choices = pc_util.random_sampling(
point_cloud, self.num_points, return_choices=True
)
point_cloud_dims_min = point_cloud.min(axis=0)
point_cloud_dims_max = point_cloud.max(axis=0)
mult_factor = point_cloud_dims_max - point_cloud_dims_min
box_sizes_normalized = scale_points(
raw_sizes.astype(np.float32)[None, ...],
mult_factor=1.0 / mult_factor[None, ...],
)
box_sizes_normalized = box_sizes_normalized.squeeze(0)
box_centers = target_bboxes.astype(np.float32)[:, 0:3]
box_centers_normalized = shift_scale_points(
box_centers[None, ...],
src_range=[
point_cloud_dims_min[None, ...],
point_cloud_dims_max[None, ...],
],
dst_range=self.center_normalizing_range,
)
box_centers_normalized = box_centers_normalized.squeeze(0)
box_centers_normalized = box_centers_normalized * target_bboxes_mask[..., None]
# re-encode angles to be consistent with VoteNet eval
angle_classes = angle_classes.astype(np.int64)
angle_residuals = angle_residuals.astype(np.float32)
raw_angles = self.dataset_config.class2angle_batch(
angle_classes, angle_residuals
)
box_corners = self.dataset_config.box_parametrization_to_corners_np(
box_centers[None, ...],
raw_sizes.astype(np.float32)[None, ...],
raw_angles.astype(np.float32)[None, ...],
)
box_corners = box_corners.squeeze(0)
ret_dict = {}
ret_dict["point_clouds"] = point_cloud.astype(np.float32)
ret_dict["gt_box_corners"] = box_corners.astype(np.float32)
ret_dict["gt_box_centers"] = box_centers.astype(np.float32)
ret_dict["gt_box_centers_normalized"] = box_centers_normalized.astype(
np.float32
)
target_bboxes_semcls = np.zeros((self.max_num_obj))
target_bboxes_semcls[0 : bboxes.shape[0]] = bboxes[:, -1] # from 0 to 9
ret_dict["gt_box_sem_cls_label"] = target_bboxes_semcls.astype(np.int64)
ret_dict["gt_box_present"] = target_bboxes_mask.astype(np.float32)
ret_dict["scan_idx"] = np.array(idx).astype(np.int64)
ret_dict["gt_box_sizes"] = raw_sizes.astype(np.float32)
ret_dict["gt_box_sizes_normalized"] = box_sizes_normalized.astype(np.float32)
ret_dict["gt_box_angles"] = raw_angles.astype(np.float32)
ret_dict["gt_angle_class_label"] = angle_classes
ret_dict["gt_angle_residual_label"] = angle_residuals
ret_dict["point_cloud_dims_min"] = point_cloud_dims_min
ret_dict["point_cloud_dims_max"] = point_cloud_dims_max
return ret_dict
| 3detr-main | datasets/sunrgbd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .scannet import ScannetDetectionDataset, ScannetDatasetConfig
from .sunrgbd import SunrgbdDetectionDataset, SunrgbdDatasetConfig
DATASET_FUNCTIONS = {
"scannet": [ScannetDetectionDataset, ScannetDatasetConfig],
"sunrgbd": [SunrgbdDetectionDataset, SunrgbdDatasetConfig],
}
def build_dataset(args):
dataset_builder = DATASET_FUNCTIONS[args.dataset_name][0]
dataset_config = DATASET_FUNCTIONS[args.dataset_name][1]()
dataset_dict = {
"train": dataset_builder(
dataset_config,
split_set="train",
root_dir=args.dataset_root_dir,
meta_data_dir=args.meta_data_dir,
use_color=args.use_color,
augment=True
),
"test": dataset_builder(
dataset_config,
split_set="val",
root_dir=args.dataset_root_dir,
use_color=args.use_color,
augment=False
),
}
return dataset_dict, dataset_config
| 3detr-main | datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modified from https://github.com/facebookresearch/votenet
Dataset for object bounding box regression.
An axis aligned bounding box is parameterized by (cx,cy,cz) and (dx,dy,dz)
where (cx,cy,cz) is the center point of the box, dx is the x-axis length of the box.
"""
import os
import sys
import numpy as np
import torch
import utils.pc_util as pc_util
from torch.utils.data import Dataset
from utils.box_util import (flip_axis_to_camera_np, flip_axis_to_camera_tensor,
get_3d_box_batch_np, get_3d_box_batch_tensor)
from utils.pc_util import scale_points, shift_scale_points
from utils.random_cuboid import RandomCuboid
IGNORE_LABEL = -100
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
DATASET_ROOT_DIR = "" ## Replace with path to dataset
DATASET_METADATA_DIR = "" ## Replace with path to dataset
class ScannetDatasetConfig(object):
def __init__(self):
self.num_semcls = 18
self.num_angle_bin = 1
self.max_num_obj = 64
self.type2class = {
"cabinet": 0,
"bed": 1,
"chair": 2,
"sofa": 3,
"table": 4,
"door": 5,
"window": 6,
"bookshelf": 7,
"picture": 8,
"counter": 9,
"desk": 10,
"curtain": 11,
"refrigerator": 12,
"showercurtrain": 13,
"toilet": 14,
"sink": 15,
"bathtub": 16,
"garbagebin": 17,
}
self.class2type = {self.type2class[t]: t for t in self.type2class}
self.nyu40ids = np.array(
[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
)
self.nyu40id2class = {
nyu40id: i for i, nyu40id in enumerate(list(self.nyu40ids))
}
# Semantic Segmentation Classes. Not used in 3DETR
self.num_class_semseg = 20
self.type2class_semseg = {
"wall": 0,
"floor": 1,
"cabinet": 2,
"bed": 3,
"chair": 4,
"sofa": 5,
"table": 6,
"door": 7,
"window": 8,
"bookshelf": 9,
"picture": 10,
"counter": 11,
"desk": 12,
"curtain": 13,
"refrigerator": 14,
"showercurtrain": 15,
"toilet": 16,
"sink": 17,
"bathtub": 18,
"garbagebin": 19,
}
self.class2type_semseg = {
self.type2class_semseg[t]: t for t in self.type2class_semseg
}
self.nyu40ids_semseg = np.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
)
self.nyu40id2class_semseg = {
nyu40id: i for i, nyu40id in enumerate(list(self.nyu40ids_semseg))
}
def angle2class(self, angle):
raise ValueError("ScanNet does not have rotated bounding boxes.")
def class2anglebatch_tensor(self, pred_cls, residual, to_label_format=True):
zero_angle = torch.zeros(
(pred_cls.shape[0], pred_cls.shape[1]),
dtype=torch.float32,
device=pred_cls.device,
)
return zero_angle
def class2anglebatch(self, pred_cls, residual, to_label_format=True):
zero_angle = np.zeros(pred_cls.shape[0], dtype=np.float32)
return zero_angle
def param2obb(
self,
center,
heading_class,
heading_residual,
size_class,
size_residual,
box_size=None,
):
heading_angle = self.class2angle(heading_class, heading_residual)
if box_size is None:
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle * -1
return obb
def box_parametrization_to_corners(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_tensor(box_center_unnorm)
boxes = get_3d_box_batch_tensor(box_size, box_angle, box_center_upright)
return boxes
def box_parametrization_to_corners_np(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_np(box_center_unnorm)
boxes = get_3d_box_batch_np(box_size, box_angle, box_center_upright)
return boxes
@staticmethod
def rotate_aligned_boxes(input_boxes, rot_mat):
centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6]
new_centers = np.dot(centers, np.transpose(rot_mat))
dx, dy = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0
new_x = np.zeros((dx.shape[0], 4))
new_y = np.zeros((dx.shape[0], 4))
for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]):
crnrs = np.zeros((dx.shape[0], 3))
crnrs[:, 0] = crnr[0] * dx
crnrs[:, 1] = crnr[1] * dy
crnrs = np.dot(crnrs, np.transpose(rot_mat))
new_x[:, i] = crnrs[:, 0]
new_y[:, i] = crnrs[:, 1]
new_dx = 2.0 * np.max(new_x, 1)
new_dy = 2.0 * np.max(new_y, 1)
new_lengths = np.stack((new_dx, new_dy, lengths[:, 2]), axis=1)
return np.concatenate([new_centers, new_lengths], axis=1)
class ScannetDetectionDataset(Dataset):
def __init__(
self,
dataset_config,
split_set="train",
root_dir=None,
meta_data_dir=None,
num_points=40000,
use_color=False,
use_height=False,
augment=False,
use_random_cuboid=True,
random_cuboid_min_points=30000,
):
self.dataset_config = dataset_config
assert split_set in ["train", "val"]
if root_dir is None:
root_dir = DATASET_ROOT_DIR
if meta_data_dir is None:
meta_data_dir = DATASET_METADATA_DIR
self.data_path = root_dir
all_scan_names = list(
set(
[
os.path.basename(x)[0:12]
for x in os.listdir(self.data_path)
if x.startswith("scene")
]
)
)
if split_set == "all":
self.scan_names = all_scan_names
elif split_set in ["train", "val", "test"]:
split_filenames = os.path.join(meta_data_dir, f"scannetv2_{split_set}.txt")
with open(split_filenames, "r") as f:
self.scan_names = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_names)
self.scan_names = [
sname for sname in self.scan_names if sname in all_scan_names
]
print(f"kept {len(self.scan_names)} scans out of {num_scans}")
else:
raise ValueError(f"Unknown split name {split_set}")
self.num_points = num_points
self.use_color = use_color
self.use_height = use_height
self.augment = augment
self.use_random_cuboid = use_random_cuboid
self.random_cuboid_augmentor = RandomCuboid(min_points=random_cuboid_min_points)
self.center_normalizing_range = [
np.zeros((1, 3), dtype=np.float32),
np.ones((1, 3), dtype=np.float32),
]
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
scan_name = self.scan_names[idx]
mesh_vertices = np.load(os.path.join(self.data_path, scan_name) + "_vert.npy")
instance_labels = np.load(
os.path.join(self.data_path, scan_name) + "_ins_label.npy"
)
semantic_labels = np.load(
os.path.join(self.data_path, scan_name) + "_sem_label.npy"
)
instance_bboxes = np.load(os.path.join(self.data_path, scan_name) + "_bbox.npy")
if not self.use_color:
point_cloud = mesh_vertices[:, 0:3] # do not use color for now
pcl_color = mesh_vertices[:, 3:6]
else:
point_cloud = mesh_vertices[:, 0:6]
point_cloud[:, 3:] = (point_cloud[:, 3:] - MEAN_COLOR_RGB) / 256.0
pcl_color = point_cloud[:, 3:]
if self.use_height:
floor_height = np.percentile(point_cloud[:, 2], 0.99)
height = point_cloud[:, 2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)], 1)
# ------------------------------- LABELS ------------------------------
MAX_NUM_OBJ = self.dataset_config.max_num_obj
target_bboxes = np.zeros((MAX_NUM_OBJ, 6), dtype=np.float32)
target_bboxes_mask = np.zeros((MAX_NUM_OBJ), dtype=np.float32)
angle_classes = np.zeros((MAX_NUM_OBJ,), dtype=np.int64)
angle_residuals = np.zeros((MAX_NUM_OBJ,), dtype=np.float32)
raw_sizes = np.zeros((MAX_NUM_OBJ, 3), dtype=np.float32)
raw_angles = np.zeros((MAX_NUM_OBJ,), dtype=np.float32)
if self.augment and self.use_random_cuboid:
(
point_cloud,
instance_bboxes,
per_point_labels,
) = self.random_cuboid_augmentor(
point_cloud, instance_bboxes, [instance_labels, semantic_labels]
)
instance_labels = per_point_labels[0]
semantic_labels = per_point_labels[1]
point_cloud, choices = pc_util.random_sampling(
point_cloud, self.num_points, return_choices=True
)
instance_labels = instance_labels[choices]
semantic_labels = semantic_labels[choices]
sem_seg_labels = np.ones_like(semantic_labels) * IGNORE_LABEL
for _c in self.dataset_config.nyu40ids_semseg:
sem_seg_labels[
semantic_labels == _c
] = self.dataset_config.nyu40id2class_semseg[_c]
pcl_color = pcl_color[choices]
target_bboxes_mask[0 : instance_bboxes.shape[0]] = 1
target_bboxes[0 : instance_bboxes.shape[0], :] = instance_bboxes[:, 0:6]
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:, 0] = -1 * point_cloud[:, 0]
target_bboxes[:, 0] = -1 * target_bboxes[:, 0]
if np.random.random() > 0.5:
# Flipping along the XZ plane
point_cloud[:, 1] = -1 * point_cloud[:, 1]
target_bboxes[:, 1] = -1 * target_bboxes[:, 1]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random() * np.pi / 18) - np.pi / 36 # -5 ~ +5 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], np.transpose(rot_mat))
target_bboxes = self.dataset_config.rotate_aligned_boxes(
target_bboxes, rot_mat
)
raw_sizes = target_bboxes[:, 3:6]
point_cloud_dims_min = point_cloud.min(axis=0)[:3]
point_cloud_dims_max = point_cloud.max(axis=0)[:3]
box_centers = target_bboxes.astype(np.float32)[:, 0:3]
box_centers_normalized = shift_scale_points(
box_centers[None, ...],
src_range=[
point_cloud_dims_min[None, ...],
point_cloud_dims_max[None, ...],
],
dst_range=self.center_normalizing_range,
)
box_centers_normalized = box_centers_normalized.squeeze(0)
box_centers_normalized = box_centers_normalized * target_bboxes_mask[..., None]
mult_factor = point_cloud_dims_max - point_cloud_dims_min
box_sizes_normalized = scale_points(
raw_sizes.astype(np.float32)[None, ...],
mult_factor=1.0 / mult_factor[None, ...],
)
box_sizes_normalized = box_sizes_normalized.squeeze(0)
box_corners = self.dataset_config.box_parametrization_to_corners_np(
box_centers[None, ...],
raw_sizes.astype(np.float32)[None, ...],
raw_angles.astype(np.float32)[None, ...],
)
box_corners = box_corners.squeeze(0)
ret_dict = {}
ret_dict["point_clouds"] = point_cloud.astype(np.float32)
ret_dict["gt_box_corners"] = box_corners.astype(np.float32)
ret_dict["gt_box_centers"] = box_centers.astype(np.float32)
ret_dict["gt_box_centers_normalized"] = box_centers_normalized.astype(
np.float32
)
ret_dict["gt_angle_class_label"] = angle_classes.astype(np.int64)
ret_dict["gt_angle_residual_label"] = angle_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0 : instance_bboxes.shape[0]] = [
self.dataset_config.nyu40id2class[int(x)]
for x in instance_bboxes[:, -1][0 : instance_bboxes.shape[0]]
]
ret_dict["gt_box_sem_cls_label"] = target_bboxes_semcls.astype(np.int64)
ret_dict["gt_box_present"] = target_bboxes_mask.astype(np.float32)
ret_dict["scan_idx"] = np.array(idx).astype(np.int64)
ret_dict["pcl_color"] = pcl_color
ret_dict["gt_box_sizes"] = raw_sizes.astype(np.float32)
ret_dict["gt_box_sizes_normalized"] = box_sizes_normalized.astype(np.float32)
ret_dict["gt_box_angles"] = raw_angles.astype(np.float32)
ret_dict["point_cloud_dims_min"] = point_cloud_dims_min.astype(np.float32)
ret_dict["point_cloud_dims_max"] = point_cloud_dims_max.astype(np.float32)
return ret_dict
| 3detr-main | datasets/scannet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numpy as np
from collections import deque
from typing import List
from utils.dist import is_distributed, barrier, all_reduce_sum
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
@torch.jit.ignore
def to_list_1d(arr) -> List[float]:
arr = arr.detach().cpu().numpy().tolist()
return arr
@torch.jit.ignore
def to_list_3d(arr) -> List[List[List[float]]]:
arr = arr.detach().cpu().numpy().tolist()
return arr
def huber_loss(error, delta=1.0):
"""
Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
x = error = pred - gt or dist(pred,gt)
0.5 * |x|^2 if |x|<=d
0.5 * d^2 + d * (|x|-d) if |x|>d
"""
abs_error = torch.abs(error)
quadratic = torch.clamp(abs_error, max=delta)
linear = abs_error - quadratic
loss = 0.5 * quadratic ** 2 + delta * linear
return loss
# From https://github.com/facebookresearch/detr/blob/master/util/misc.py
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_distributed():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
barrier()
all_reduce_sum(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
| 3detr-main | utils/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy as np
# hacky way to find numpy include path
# replace with actual path if this does not work
np_include_path = np.__file__.replace("__init__.py", "core/include/")
INCLUDE_PATH = [
np_include_path
]
setup(
ext_modules = cythonize(
Extension(
"box_intersection",
sources=["box_intersection.pyx"],
include_dirs=INCLUDE_PATH
)),
)
| 3detr-main | utils/cython_compile.py |
# Copyright (c) Facebook, Inc. and its affiliates.
""" Generic Code for Object Detection Evaluation
Input:
For each class:
For each image:
Predictions: box, score
Groundtruths: box
Output:
For each class:
precision-recal and average precision
Author: Charles R. Qi
Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py
"""
import numpy as np
from utils.box_util import box3d_iou
def voc_ap(rec, prec, use_07_metric=False):
"""ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def get_iou_obb(bb1, bb2):
iou3d, iou2d = box3d_iou(bb1, bb2)
return iou3d
def get_iou_main(get_iou_func, args):
return get_iou_func(*args)
def eval_det_cls(
pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou_obb
):
"""Generic functions to compute precision/recall for object detection
for a single class.
Input:
pred: map of {img_id: [(bbox, score)]} where bbox is numpy array
gt: map of {img_id: [bbox]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if True use VOC07 11 point method
Output:
rec: numpy array of length nd
prec: numpy array of length nd
ap: scalar, average precision
"""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {"bbox": bbox, "det": det}
# pad empty list to all other imgids
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {"bbox": np.array([]), "det": []}
# construct dets
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for box, score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
# if d%100==0: print(d)
R = class_recs[image_ids[d]]
bb = BB[d, ...].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...]))
if iou > ovmax:
ovmax = iou
jmax = j
# print d, ovmax
if ovmax > ovthresh:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
if npos == 0:
rec = np.zeros_like(tp)
else:
rec = tp / float(npos)
# print('NPOS: ', npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def eval_det_cls_wrapper(arguments):
pred, gt, ovthresh, use_07_metric, get_iou_func = arguments
rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap)
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=None):
"""Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred:
pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox, score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
# print('Computing AP for class: ', classname)
rec[classname], prec[classname], ap[classname] = eval_det_cls(
pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func
)
# print(classname, ap[classname])
return rec, prec, ap
from multiprocessing import Pool
def eval_det_multiprocessing(
pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou_obb
):
"""Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred:
pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox, score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
p = Pool(processes=10)
ret_values = p.map(
eval_det_cls_wrapper,
[
(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
for classname in gt.keys()
if classname in pred
],
)
p.close()
for i, classname in enumerate(gt.keys()):
if classname in pred:
rec[classname], prec[classname], ap[classname] = ret_values[i]
else:
rec[classname] = 0
prec[classname] = 0
ap[classname] = 0
# print(classname, ap[classname])
return rec, prec, ap
| 3detr-main | utils/eval_det.py |
# Copyright (c) Facebook, Inc. and its affiliates.
""" Utility functions for processing point clouds.
Author: Charles R. Qi and Or Litany
"""
import os
import sys
import torch
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
# Mesh IO
import trimesh
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
"""Input is NxC, output is num_samplexC"""
if replace is None:
replace = pc.shape[0] < num_sample
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def shift_scale_points(pred_xyz, src_range, dst_range=None):
"""
pred_xyz: B x N x 3
src_range: [[B x 3], [B x 3]] - min and max XYZ coords
dst_range: [[B x 3], [B x 3]] - min and max XYZ coords
"""
if dst_range is None:
dst_range = [
torch.zeros((src_range[0].shape[0], 3), device=src_range[0].device),
torch.ones((src_range[0].shape[0], 3), device=src_range[0].device),
]
if pred_xyz.ndim == 4:
src_range = [x[:, None] for x in src_range]
dst_range = [x[:, None] for x in dst_range]
assert src_range[0].shape[0] == pred_xyz.shape[0]
assert dst_range[0].shape[0] == pred_xyz.shape[0]
assert src_range[0].shape[-1] == pred_xyz.shape[-1]
assert src_range[0].shape == src_range[1].shape
assert dst_range[0].shape == dst_range[1].shape
assert src_range[0].shape == dst_range[1].shape
src_diff = src_range[1][:, None, :] - src_range[0][:, None, :]
dst_diff = dst_range[1][:, None, :] - dst_range[0][:, None, :]
prop_xyz = (
((pred_xyz - src_range[0][:, None, :]) * dst_diff) / src_diff
) + dst_range[0][:, None, :]
return prop_xyz
def scale_points(pred_xyz, mult_factor):
if pred_xyz.ndim == 4:
mult_factor = mult_factor[:, None]
scaled_xyz = pred_xyz * mult_factor[:, None, :]
return scaled_xyz
def rotate_point_cloud(points, rotation_matrix=None):
"""Input: (n,3), Output: (n,3)"""
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array(
[[cosval, sinval, 0], [-sinval, cosval, 0], [0, 0, 1]]
)
ctr = points.mean(axis=0)
rotated_data = np.dot(points - ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
"""Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
def point_cloud_to_bbox(points):
"""Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5 * (mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_oriented_bbox(scene_bbox, out_filename, colors=None):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if colors is not None:
if colors.shape[0] != len(scene_bbox):
colors = [colors for _ in range(len(scene_bbox))]
colors = np.array(colors).astype(np.uint8)
assert colors.shape[0] == len(scene_bbox)
assert colors.shape[1] == 4
scene = trimesh.scene.Scene()
for idx, box in enumerate(scene_bbox):
box_tr = convert_oriented_box_to_trimesh_fmt(box)
if colors is not None:
box_tr.visual.main_color[:] = colors[idx]
box_tr.visual.vertex_colors[:] = colors[idx]
for facet in box_tr.facets:
box_tr.visual.face_colors[facet] = colors[idx]
scene.add_geometry(box_tr)
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[1, 1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0, :] = np.array([cosval, 0, sinval])
rotmat[2, :] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src, tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0, 0, 1], vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3, 3] = 0.5 * src + 0.5 * tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(
trimesh.creation.cylinder(
radius=rad, height=height, sections=res, transform=M
)
)
mesh_list = trimesh.util.concatenate(scene.dump())
trimesh.io.export.export_mesh(mesh_list, "%s.ply" % (filename), file_type="ply")
| 3detr-main | utils/pc_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
from utils.dist import is_primary
def save_checkpoint(
checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename=None,
):
if not is_primary():
return
if filename is None:
filename = f"checkpoint_{epoch:04d}.pth"
checkpoint_name = os.path.join(checkpoint_dir, filename)
sd = {
"model": model_no_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
"best_val_metrics": best_val_metrics,
}
torch.save(sd, checkpoint_name)
def resume_if_possible(checkpoint_dir, model_no_ddp, optimizer):
"""
Resume if checkpoint is available.
Return
- epoch of loaded checkpoint.
"""
epoch = -1
best_val_metrics = {}
if not os.path.isdir(checkpoint_dir):
return epoch, best_val_metrics
last_checkpoint = os.path.join(checkpoint_dir, "checkpoint.pth")
if not os.path.isfile(last_checkpoint):
return epoch, best_val_metrics
sd = torch.load(last_checkpoint, map_location=torch.device("cpu"))
epoch = sd["epoch"]
best_val_metrics = sd["best_val_metrics"]
print(f"Found checkpoint at {epoch}. Resuming.")
model_no_ddp.load_state_dict(sd["model"])
optimizer.load_state_dict(sd["optimizer"])
print(
f"Loaded model and optimizer state at {epoch}. Loaded best val metrics so far."
)
return epoch, best_val_metrics
| 3detr-main | utils/io.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from urllib import request
import torch
import pickle
## Define the weights you want and where to store them
dataset = "scannet"
encoder = "_masked" # or ""
epoch = 1080
base_url = "https://dl.fbaipublicfiles.com/3detr/checkpoints"
local_dir = "/tmp/"
### Downloading the weights
weights_file = f"{dataset}{encoder}_ep{epoch}.pth"
metrics_file = f"{dataset}{encoder}_ep{epoch}_metrics.pkl"
local_weights = os.path.join(local_dir, weights_file)
local_metrics = os.path.join(local_dir, metrics_file)
url = os.path.join(base_url, weights_file)
request.urlretrieve(url, local_weights)
print(f"Downloaded weights from {url} to {local_weights}")
url = os.path.join(base_url, metrics_file)
request.urlretrieve(url, local_metrics)
print(f"Downloaded metrics from {url} to {local_metrics}")
# weights can be simply loaded with pytorch
weights = torch.load(local_weights, map_location=torch.device("cpu"))
print("Weights loaded successfully.")
# metrics can be loaded with pickle
with open(local_metrics, "rb") as fh:
metrics = pickle.load(fh)
print("Metrics loaded successfully.") | 3detr-main | utils/download_weights.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score)
""" Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py
"""
def nms_2d(boxes, overlap_threshold):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
score = boxes[:, 4]
area = (x2 - x1) * (y2 - y1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
suppress = [last - 1]
for pos in range(last - 1):
j = I[pos]
xx1 = max(x1[i], x1[j])
yy1 = max(y1[i], y1[j])
xx2 = min(x2[i], x2[j])
yy2 = min(y2[i], y2[j])
w = xx2 - xx1
h = yy2 - yy1
if w > 0 and h > 0:
o = w * h / area[j]
print("Overlap is", o)
if o > overlap_threshold:
suppress.append(pos)
I = np.delete(I, suppress)
return pick
def nms_2d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
score = boxes[:, 4]
area = (x2 - x1) * (y2 - y1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[: last - 1]])
yy1 = np.maximum(y1[i], y1[I[: last - 1]])
xx2 = np.minimum(x2[i], x2[I[: last - 1]])
yy2 = np.minimum(y2[i], y2[I[: last - 1]])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
if old_type:
o = (w * h) / area[I[: last - 1]]
else:
inter = w * h
o = inter / (area[i] + area[I[: last - 1]] - inter)
I = np.delete(
I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0]))
)
return pick
def nms_3d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
score = boxes[:, 6]
area = (x2 - x1) * (y2 - y1) * (z2 - z1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[: last - 1]])
yy1 = np.maximum(y1[i], y1[I[: last - 1]])
zz1 = np.maximum(z1[i], z1[I[: last - 1]])
xx2 = np.minimum(x2[i], x2[I[: last - 1]])
yy2 = np.minimum(y2[i], y2[I[: last - 1]])
zz2 = np.minimum(z2[i], z2[I[: last - 1]])
l = np.maximum(0, xx2 - xx1)
w = np.maximum(0, yy2 - yy1)
h = np.maximum(0, zz2 - zz1)
if old_type:
o = (l * w * h) / area[I[: last - 1]]
else:
inter = l * w * h
o = inter / (area[i] + area[I[: last - 1]] - inter)
I = np.delete(
I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0]))
)
return pick
def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
score = boxes[:, 6]
cls = boxes[:, 7]
area = (x2 - x1) * (y2 - y1) * (z2 - z1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[: last - 1]])
yy1 = np.maximum(y1[i], y1[I[: last - 1]])
zz1 = np.maximum(z1[i], z1[I[: last - 1]])
xx2 = np.minimum(x2[i], x2[I[: last - 1]])
yy2 = np.minimum(y2[i], y2[I[: last - 1]])
zz2 = np.minimum(z2[i], z2[I[: last - 1]])
cls1 = cls[i]
cls2 = cls[I[: last - 1]]
l = np.maximum(0, xx2 - xx1)
w = np.maximum(0, yy2 - yy1)
h = np.maximum(0, zz2 - zz1)
if old_type:
o = (l * w * h) / area[I[: last - 1]]
else:
inter = l * w * h
o = inter / (area[i] + area[I[: last - 1]] - inter)
o = o * (cls1 == cls2)
I = np.delete(
I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0]))
)
return pick
| 3detr-main | utils/nms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
try:
from tensorboardX import SummaryWriter
except ImportError:
print("Cannot import tensorboard. Will log to txt files only.")
SummaryWriter = None
from utils.dist import is_primary
class Logger(object):
def __init__(self, log_dir=None) -> None:
self.log_dir = log_dir
if SummaryWriter is not None and is_primary():
self.writer = SummaryWriter(self.log_dir)
else:
self.writer = None
def log_scalars(self, scalar_dict, step, prefix=None):
if self.writer is None:
return
for k in scalar_dict:
v = scalar_dict[k]
if isinstance(v, torch.Tensor):
v = v.detach().cpu().item()
if prefix is not None:
k = prefix + k
self.writer.add_scalar(k, v, step)
| 3detr-main | utils/logger.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
def check_aspect(crop_range, aspect_min):
xy_aspect = np.min(crop_range[:2]) / np.max(crop_range[:2])
xz_aspect = np.min(crop_range[[0, 2]]) / np.max(crop_range[[0, 2]])
yz_aspect = np.min(crop_range[1:]) / np.max(crop_range[1:])
return (
(xy_aspect >= aspect_min)
or (xz_aspect >= aspect_min)
or (yz_aspect >= aspect_min)
)
class RandomCuboid(object):
"""
RandomCuboid augmentation from DepthContrast [https://arxiv.org/abs/2101.02691]
We slightly modify this operation to account for object detection.
This augmentation randomly crops a cuboid from the input and
ensures that the cropped cuboid contains at least one bounding box
"""
def __init__(
self,
min_points,
aspect=0.8,
min_crop=0.5,
max_crop=1.0,
box_filter_policy="center",
):
self.aspect = aspect
self.min_crop = min_crop
self.max_crop = max_crop
self.min_points = min_points
self.box_filter_policy = box_filter_policy
def __call__(self, point_cloud, target_boxes, per_point_labels=None):
range_xyz = np.max(point_cloud[:, 0:3], axis=0) - np.min(
point_cloud[:, 0:3], axis=0
)
for _ in range(100):
crop_range = self.min_crop + np.random.rand(3) * (
self.max_crop - self.min_crop
)
if not check_aspect(crop_range, self.aspect):
continue
sample_center = point_cloud[np.random.choice(len(point_cloud)), 0:3]
new_range = range_xyz * crop_range / 2.0
max_xyz = sample_center + new_range
min_xyz = sample_center - new_range
upper_idx = (
np.sum((point_cloud[:, 0:3] <= max_xyz).astype(np.int32), 1) == 3
)
lower_idx = (
np.sum((point_cloud[:, 0:3] >= min_xyz).astype(np.int32), 1) == 3
)
new_pointidx = (upper_idx) & (lower_idx)
if np.sum(new_pointidx) < self.min_points:
continue
new_point_cloud = point_cloud[new_pointidx, :]
# filtering policy is the only modification from DepthContrast
if self.box_filter_policy == "center":
# remove boxes whose center does not lie within the new_point_cloud
new_boxes = target_boxes
if (
target_boxes.sum() > 0
): # ground truth contains no bounding boxes. Common in SUNRGBD.
box_centers = target_boxes[:, 0:3]
new_pc_min_max = np.min(new_point_cloud[:, 0:3], axis=0), np.max(
new_point_cloud[:, 0:3], axis=0
)
keep_boxes = np.logical_and(
np.all(box_centers >= new_pc_min_max[0], axis=1),
np.all(box_centers <= new_pc_min_max[1], axis=1),
)
if keep_boxes.sum() == 0:
# current data augmentation removes all boxes in the pointcloud. fail!
continue
new_boxes = target_boxes[keep_boxes]
if per_point_labels is not None:
new_per_point_labels = [x[new_pointidx] for x in per_point_labels]
else:
new_per_point_labels = None
# if we are here, all conditions are met. return boxes
return new_point_cloud, new_boxes, new_per_point_labels
# fallback
return point_cloud, target_boxes, per_point_labels
| 3detr-main | utils/random_cuboid.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
from typing import List
try:
from box_intersection import batch_intersect
except ImportError:
print("Could not import cythonized batch_intersection")
batch_intersect = None
import numpy as np
from scipy.spatial import ConvexHull
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
# diff_cp = cp2 - cp1
# diff_p = p - cp1
# diff_p = diff_p[[1, 0]]
# mult = diff_cp * diff_p
# return mult[0] > mult[1]
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
# dc = cp1 - cp2
# dp = s - e
# n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
# n2 = s[0] * e[1] - s[1] * e[0]
# n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
# return (n1 * dp - n2 * dc) * n3
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return(outputList)
def helper_computeIntersection(cp1: torch.Tensor, cp2: torch.Tensor, s: torch.Tensor, e: torch.Tensor):
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
# return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
return torch.stack([(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3])
def helper_inside(cp1: torch.Tensor, cp2: torch.Tensor, p: torch.Tensor):
ineq = (cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
return ineq.item()
def polygon_clip_unnest(subjectPolygon: torch.Tensor, clipPolygon: torch.Tensor):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])]
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList.copy()
outputList.clear()
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if helper_inside(cp1, cp2, e):
if not helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
outputList.append(e)
elif helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
s = e
cp1 = cp2
if len(outputList) == 0:
# return None
break
return outputList
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def poly_area_tensor(x, y):
return 0.5*torch.abs(torch.dot(x,torch.roll(y,1))-torch.dot(y,torch.roll(x,1)))
def box3d_vol_tensor(corners):
EPS = 1e-6
reshape = False
B, K = corners.shape[0], corners.shape[1]
if len(corners.shape) == 4:
# batch x prop x 8 x 3
reshape = True
corners = corners.view(-1, 8, 3)
a = torch.sqrt((corners[:, 0, :] - corners[:, 1, :]).pow(2).sum(dim=1).clamp(min=EPS))
b = torch.sqrt((corners[:, 1, :] - corners[:, 2, :]).pow(2).sum(dim=1).clamp(min=EPS))
c = torch.sqrt((corners[:, 0, :] - corners[:, 4, :]).pow(2).sum(dim=1).clamp(min=EPS))
vols = a * b * c
if reshape:
vols = vols.view(B, K)
return vols
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def enclosing_box3d_vol(corners1, corners2):
"""
volume of enclosing axis-aligned box
"""
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners2.shape[2] == 8
assert corners2.shape[3] == 3
EPS = 1e-6
corners1 = corners1.clone()
corners2 = corners2.clone()
# flip Y axis, since it is negative
corners1[:, :, :, 1] *= -1
corners2[:, :, :, 1] *= -1
# min_a = torch.min(corners1[:, :, 0, :][:, :, None, :] , corners2[:, :, 0, :][:, None, :, :])
# max_a = torch.max(corners1[:, :, 1, :][:, :, None, :] , corners2[:, :, 1, :][:, None, :, :])
# a = (max_a - min_a).pow(2).sum(dim=3).clamp(min=EPS).sqrt()
# min_b = torch.min(corners1[:, :, 1, :][:, :, None, :] , corners2[:, :, 1, :][:, None, :, :])
# max_b = torch.max(corners1[:, :, 2, :][:, :, None, :] , corners2[:, :, 2, :][:, None, :, :])
# b = (max_b - min_b).pow(2).sum(dim=3).clamp(min=EPS).sqrt()
# min_c = torch.min(corners1[:, :, 0, :][:, :, None, :] , corners2[:, :, 0, :][:, None, :, :])
# max_c = torch.max(corners1[:, :, 4, :][:, :, None, :] , corners2[:, :, 4, :][:, None, :, :])
# c = (max_c - min_c).pow(2).sum(dim=3).clamp(min=EPS).sqrt()
# vol = a * b * c
al_xmin = torch.min( torch.min(corners1[:, :, :, 0], dim=2).values[:, :, None], torch.min(corners2[:, :, :, 0], dim=2).values[:, None, :])
al_ymin = torch.max( torch.max(corners1[:, :, :, 1], dim=2).values[:, :, None], torch.max(corners2[:, :, :, 1], dim=2).values[:, None, :])
al_zmin = torch.min( torch.min(corners1[:, :, :, 2], dim=2).values[:, :, None], torch.min(corners2[:, :, :, 2], dim=2).values[:, None, :])
al_xmax = torch.max( torch.max(corners1[:, :, :, 0], dim=2).values[:, :, None], torch.max(corners2[:, :, :, 0], dim=2).values[:, None, :])
al_ymax = torch.min( torch.min(corners1[:, :, :, 1], dim=2).values[:, :, None], torch.min(corners2[:, :, :, 1], dim=2).values[:, None, :])
al_zmax = torch.max( torch.max(corners1[:, :, :, 2], dim=2).values[:, :, None], torch.max(corners2[:, :, :, 2], dim=2).values[:, None, :])
diff_x = torch.abs(al_xmax - al_xmin)
diff_y = torch.abs(al_ymax - al_ymin)
diff_z = torch.abs(al_zmax - al_zmin)
vol = diff_x * diff_y * diff_z
return vol
def is_clockwise(p):
x = p[:,0]
y = p[:,1]
return np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)) > 0
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
'''
rect1 = [(corners1[i,0], corners1[i,2]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,2]) for i in range(3,-1,-1)]
inter, inter_area = convex_hull_intersection(rect1, rect2)
# corner points are in counter clockwise order
# area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
# area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
# iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[0,1], corners2[0,1])
ymin = max(corners1[4,1], corners2[4,1])
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
union = (vol1 + vol2 - inter_vol)
iou = inter_vol / union
return iou, union
@torch.jit.ignore
def to_list_1d(arr) -> List[float]:
arr = arr.detach().cpu().numpy().tolist()
return arr
@torch.jit.ignore
def to_list_3d(arr) -> List[List[List[float]]]:
arr = arr.detach().cpu().numpy().tolist()
return arr
def generalized_box3d_iou_tensor_non_diff(corners1: torch.Tensor, corners2: torch.Tensor, nums_k2: torch.Tensor, rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
approximate: bool = True):
if batch_intersect is None:
return generalized_box3d_iou_tensor_jit(corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only)
else:
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0,1][:, :, None], corners2[:, :, 0,1][:, None, :])
ymin = torch.max(corners1[:, :, 4,1][:, :, None], corners2[:, :, 4,1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0,2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, : ,:])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, : ,:])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b]:] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2*EPS) * (sum_vols > 4*EPS)
if rotated_boxes:
inter_areas = np.zeros((B, K1, K2), dtype=np.float32)
rect1 = rect1.cpu().detach().numpy()
rect2 = rect2.cpu().detach().numpy()
nums_k2_np = nums_k2.cpu().numpy()
non_rot_inter_areas_np = non_rot_inter_areas.cpu().detach().numpy()
batch_intersect(rect1, rect2, non_rot_inter_areas_np, nums_k2_np, inter_areas, approximate)
inter_areas = torch.from_numpy(inter_areas)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = - (1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b,:,:nums_k2[b]] = 1
gious *= mask
return gious
def generalized_box3d_iou_tensor(corners1: torch.Tensor, corners2: torch.Tensor, nums_k2: torch.Tensor, rotated_boxes: bool = True,
return_inter_vols_only: bool = False, no_grad: bool = False):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
The return IOU is differentiable
"""
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0,1][:, :, None], corners2[:, :, 0,1][:, None, :])
ymin = torch.max(corners1[:, :, 4,1][:, :, None], corners2[:, :, 4,1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0,2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, : ,:])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, : ,:])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b]:] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2*EPS) * (sum_vols > 4*EPS)
if rotated_boxes:
inter_areas = torch.zeros((B, K1, K2), dtype=torch.float32)
rect1 = rect1.cpu()
rect2 = rect2.cpu()
nums_k2_np = to_list_1d(nums_k2)
non_rot_inter_areas_np = to_list_3d(non_rot_inter_areas)
for b in range(B):
for k1 in range(K1):
for k2 in range(K2):
if nums_k2 is not None and k2 >= nums_k2_np[b]:
break
if non_rot_inter_areas_np[b][k1][k2] == 0:
continue
##### compute volume of intersection
# inter = polygon_clip(rect1[b, k1], rect2[b, k2])
inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2])
# if inter is None:
# if len(inter) == 0:
# # area = torch.zeros(1, dtype=torch.float32, device=inter_areas.device).squeeze()
# # area = 0
# continue
# else:
if len(inter) > 0:
# inter = torch.stack(inter)
# xs = inter[:, 0]
# ys = inter[:, 1]
xs = torch.stack([x[0] for x in inter])
ys = torch.stack([x[1] for x in inter])
# area = poly_area_tensor(xs, ys)
inter_areas[b,k1,k2] = torch.abs(torch.dot(xs,torch.roll(ys,1))-torch.dot(ys,torch.roll(xs,1)))
inter_areas.mul_(0.5)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = - (1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b,:,:nums_k2[b]] = 1
gious *= mask
return gious
generalized_box3d_iou_tensor_jit = torch.jit.script(generalized_box3d_iou_tensor)
def enclosing_box3d_convex_hull(corners1, corners2, nums_k2, mask, enclosing_vols=None):
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
if enclosing_vols is None:
enclosing_vols = np.zeros((B, K1, K2)).astype(np.float32)
for b in range(B):
for k1 in range(K1):
for k2 in range(K2):
if nums_k2 is not None and k2 >= nums_k2[b]:
break
if mask is not None and mask[b,k1,k2] <= 0:
continue
hull = ConvexHull(np.vstack([corners1[b, k1], corners2[b, k2]]))
enclosing_vols[b, k1, k2] = hull.volume
return enclosing_vols
enclosing_box3d_convex_hull_numba = autojit(enclosing_box3d_convex_hull)
# enclosing_box3d_convex_hull_numba = enclosing_box3d_convex_hull
def generalized_box3d_iou_convex_hull_nondiff_tensor(corners1: torch.Tensor, corners2: torch.Tensor, nums_k2: torch.Tensor, rotated_boxes: bool = True):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
The return IOU is differentiable
"""
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
EPS = 1e-8
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
inter_vols = generalized_box3d_iou_tensor_jit(corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only=True)
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
if rotated_boxes:
corners1_np = corners1.detach().cpu().numpy()
corners2_np = corners2.detach().cpu().numpy()
mask = inter_vols.detach().cpu().numpy()
nums_k2 = nums_k2.cpu().numpy()
enclosing_vols_np = enclosing_vols.detach().cpu().numpy()
enclosing_vols = enclosing_box3d_convex_hull_numba(corners1_np, corners2_np, nums_k2, mask, enclosing_vols_np)
enclosing_vols = torch.from_numpy(enclosing_vols).to(corners1.device)
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = - (1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
good_boxes = (enclosing_vols > 2*EPS) * (sum_vols > 4*EPS)
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=corners1.device, dtype=torch.float32)
for b in range(B):
mask[b,:,:nums_k2[b]] = 1
gious *= mask
return gious
def generalized_box3d_iou(corners1, corners2, nums_k2=None):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
mask:
Returns:
B x K1 x K2 matrix of generalized IOU
"""
# GenIOU = IOU - (C - sum_of_vols)/ C
# where C = vol of convex_hull containing all points
# degenerate boxes gives inf / nan results
# so do an early check
#TODO:
assert corners1.ndim == 4
assert corners2.ndim == 4
assert corners1.shape[0] == corners2.shape[0]
B, K1, _ , _ = corners1.shape
_, K2, _, _ = corners2.shape
gious = torch.zeros((B, K1, K2), dtype=torch.float32)
corners1_np = corners1.detach().cpu().numpy()
corners2_np = corners2.detach().cpu().numpy()
for b in range(B):
for i in range(K1):
for j in range(K2):
if nums_k2 is not None and j >= nums_k2[b]:
break
iou, sum_of_vols = box3d_iou(corners1_np[b, i], corners2_np[b, j])
hull = ConvexHull(np.vstack([corners1_np[b, i], corners2_np[b, j]]))
C = hull.volume
giou = iou - (C - sum_of_vols) / C
gious[b, i, j] = giou
return gious
# -----------------------------------------------------------
# Convert from box parameters to
# -----------------------------------------------------------
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape)+[3,3]))
c = np.cos(t)
s = np.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def get_3d_box(box_size, heading_angle, center):
''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
'''
R = roty(heading_angle)
l,w,h = box_size
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
corners_3d[0,:] = corners_3d[0,:] + center[0];
corners_3d[1,:] = corners_3d[1,:] + center[1];
corners_3d[2,:] = corners_3d[2,:] + center[2];
corners_3d = np.transpose(corners_3d)
return corners_3d
def get_3d_box_batch(box_size, heading_angle, center):
''' box_size: [x1,x2,...,xn,3] -- box dimensions without flipping [X, Y, Z] -- l, w, h
heading_angle: [x1,x2,...,xn] -- theta in radians
center: [x1,x2,...,xn,3] -- center point has been flipped to camera axis [X, -Z, Y]
Return:
[x1,x3,...,xn,8,3]
'''
input_shape = heading_angle.shape
R = roty_batch(heading_angle)
l = np.expand_dims(box_size[...,0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[...,1], -1)
h = np.expand_dims(box_size[...,2], -1)
corners_3d = np.zeros(tuple(list(input_shape)+[8,3]))
corners_3d[...,:,0] = np.concatenate((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1)
corners_3d[...,:,1] = np.concatenate((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1)
corners_3d[...,:,2] = np.concatenate((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
def roty_batch_tensor(t):
input_shape = t.shape
output = torch.zeros(tuple(list(input_shape)+[3,3]), dtype=torch.float32, device=t.device)
c = torch.cos(t)
s = torch.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def flip_axis_to_camera_tensor(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = torch.clone(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[...,1] *= -1
return pc2
def get_3d_box_batch_tensor(box_size, heading_angle, center):
assert isinstance(box_size, torch.Tensor)
assert isinstance(heading_angle, torch.Tensor)
assert isinstance(center, torch.Tensor)
reshape_final = False
if heading_angle.ndim == 2:
assert box_size.ndim == 3
assert center.ndim == 3
bsize = box_size.shape[0]
nprop = box_size.shape[1]
box_size = box_size.view(-1, box_size.shape[-1])
heading_angle = heading_angle.view(-1)
center = center.reshape(-1, 3)
reshape_final = True
input_shape = heading_angle.shape
R = roty_batch_tensor(heading_angle)
l = torch.unsqueeze(box_size[...,0], -1) # [x1,...,xn,1]
w = torch.unsqueeze(box_size[...,1], -1)
h = torch.unsqueeze(box_size[...,2], -1)
corners_3d = torch.zeros(tuple(list(input_shape)+[8,3]), device=box_size.device, dtype=torch.float32)
corners_3d[...,:,0] = torch.cat((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1)
corners_3d[...,:,1] = torch.cat((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1)
corners_3d[...,:,2] = torch.cat((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = torch.matmul(corners_3d, R.permute(tlist))
corners_3d += torch.unsqueeze(center, -2)
if reshape_final:
corners_3d = corners_3d.reshape(bsize, nprop, 8, 3)
return corners_3d
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
if __name__=='__main__':
# Function for polygon ploting
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
def plot_polys(plist,scale=500.0):
fig, ax = plt.subplots()
patches = []
for p in plist:
poly = Polygon(np.array(p)/scale, True)
patches.append(poly)
pc = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.5)
colors = 100*np.random.rand(len(patches))
pc.set_array(np.array(colors))
ax.add_collection(pc)
plt.show()
# Demo on ConvexHull
points = np.random.rand(30, 2) # 30 random points in 2-D
hull = ConvexHull(points)
# **In 2D "volume" is is area, "area" is perimeter
print(('Hull area: ', hull.volume))
for simplex in hull.simplices:
print(simplex)
# Demo on convex hull overlaps
sub_poly = [(0,0),(300,0),(300,300),(0,300)]
clip_poly = [(150,150),(300,300),(150,450),(0,300)]
inter_poly = polygon_clip(sub_poly, clip_poly)
print(poly_area(np.array(inter_poly)[:,0], np.array(inter_poly)[:,1]))
# Test convex hull interaction function
rect1 = [(50,0),(50,300),(300,300),(300,0)]
rect2 = [(150,150),(300,300),(150,450),(0,300)]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
if inter is not None:
print(poly_area(np.array(inter)[:,0], np.array(inter)[:,1]))
print('------------------')
rect1 = [(0.30026005199835404, 8.9408694211408424), \
(-1.1571105364358421, 9.4686676477075533), \
(0.1777082043006144, 13.154404877812102), \
(1.6350787927348105, 12.626606651245391)]
rect1 = [rect1[0], rect1[3], rect1[2], rect1[1]]
rect2 = [(0.23908745901608636, 8.8551095691132886), \
(-1.2771419487733995, 9.4269062966181956), \
(0.13138836963152717, 13.161896351296868), \
(1.647617777421013, 12.590099623791961)]
rect2 = [rect2[0], rect2[3], rect2[2], rect2[1]]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
| 3detr-main | utils/box_ops3d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
""" Helper functions for calculating 2D and 3D bounding box IoU.
Collected and written by Charles R. Qi
Last modified: Apr 2021 by Ishan Misra
"""
import torch
import numpy as np
from scipy.spatial import ConvexHull, Delaunay
from utils.misc import to_list_1d, to_list_3d
try:
from utils.box_intersection import box_intersection
except ImportError:
print(
"Could not import cythonized box intersection. Consider compiling box_intersection.pyx for faster training."
)
box_intersection = None
def in_hull(p, hull):
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p) >= 0
def extract_pc_in_box3d(pc, box3d):
"""pc: (N,3), box3d: (8,3)"""
box3d_roi_inds = in_hull(pc[:, 0:3], box3d)
return pc[box3d_roi_inds, :], box3d_roi_inds
def polygon_clip(subjectPolygon, clipPolygon):
"""Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return outputList
def poly_area(x, y):
"""Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates"""
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def convex_hull_intersection(p1, p2):
"""Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1, p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
"""corners: (8,3) no assumption on axis direction"""
a = np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2))
b = np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2))
c = np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2))
return a * b * c
def is_clockwise(p):
x = p[:, 0]
y = p[:, 1]
return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0
def box3d_iou(corners1, corners2):
"""Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
"""
# corner points are in counter clockwise order
rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)]
rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)]
area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1])
area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area / (area1 + area2 - inter_area)
ymax = min(corners1[0, 1], corners2[0, 1])
ymin = max(corners1[4, 1], corners2[4, 1])
inter_vol = inter_area * max(0.0, ymax - ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two 2D bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1["x1"] < bb1["x2"]
assert bb1["y1"] < bb1["y2"]
assert bb2["x1"] < bb2["x2"]
assert bb2["y1"] < bb2["y2"]
# determine the coordinates of the intersection rectangle
x_left = max(bb1["x1"], bb2["x1"])
y_top = max(bb1["y1"], bb2["y1"])
x_right = min(bb1["x2"], bb2["x2"])
y_bottom = min(bb1["y2"], bb2["y2"])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1["x2"] - bb1["x1"]) * (bb1["y2"] - bb1["y1"])
bb2_area = (bb2["x2"] - bb2["x1"]) * (bb2["y2"] - bb2["y1"])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def box2d_iou(box1, box2):
"""Compute 2D bounding box IoU.
Input:
box1: tuple of (xmin,ymin,xmax,ymax)
box2: tuple of (xmin,ymin,xmax,ymax)
Output:
iou: 2D IoU scalar
"""
return get_iou(
{"x1": box1[0], "y1": box1[1], "x2": box1[2], "y2": box1[3]},
{"x1": box2[0], "y1": box2[1], "x2": box2[2], "y2": box2[3]},
)
# -----------------------------------------------------------
# Convert from box parameters to
# -----------------------------------------------------------
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def get_3d_box(box_size, heading_angle, center):
"""box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
"""
R = roty(heading_angle)
l, w, h = box_size
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + center[0]
corners_3d[1, :] = corners_3d[1, :] + center[1]
corners_3d[2, :] = corners_3d[2, :] + center[2]
corners_3d = np.transpose(corners_3d)
return corners_3d
def flip_axis_to_camera_np(pc):
"""Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
"""
pc2 = pc.copy()
pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y
pc2[..., 1] *= -1
return pc2
def get_3d_box_batch_np(box_size, angle, center):
input_shape = angle.shape
R = roty_batch(angle)
l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[..., 1], -1)
h = np.expand_dims(box_size[..., 2], -1)
corners_3d = np.zeros(tuple(list(input_shape) + [8, 3]))
corners_3d[..., :, 0] = np.concatenate(
(l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1
)
corners_3d[..., :, 1] = np.concatenate(
(h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1
)
corners_3d[..., :, 2] = np.concatenate(
(w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1
)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape) + 1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
def flip_axis_to_camera_tensor(pc):
"""Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
"""
pc2 = torch.clone(pc)
pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y
pc2[..., 1] *= -1
return pc2
def roty_batch_tensor(t):
input_shape = t.shape
output = torch.zeros(
tuple(list(input_shape) + [3, 3]), dtype=torch.float32, device=t.device
)
c = torch.cos(t)
s = torch.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def get_3d_box_batch_tensor(box_size, angle, center):
assert isinstance(box_size, torch.Tensor)
assert isinstance(angle, torch.Tensor)
assert isinstance(center, torch.Tensor)
reshape_final = False
if angle.ndim == 2:
assert box_size.ndim == 3
assert center.ndim == 3
bsize = box_size.shape[0]
nprop = box_size.shape[1]
box_size = box_size.reshape(-1, box_size.shape[-1])
angle = angle.reshape(-1)
center = center.reshape(-1, 3)
reshape_final = True
input_shape = angle.shape
R = roty_batch_tensor(angle)
l = torch.unsqueeze(box_size[..., 0], -1) # [x1,...,xn,1]
w = torch.unsqueeze(box_size[..., 1], -1)
h = torch.unsqueeze(box_size[..., 2], -1)
corners_3d = torch.zeros(
tuple(list(input_shape) + [8, 3]), device=box_size.device, dtype=torch.float32
)
corners_3d[..., :, 0] = torch.cat(
(l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1
)
corners_3d[..., :, 1] = torch.cat(
(h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1
)
corners_3d[..., :, 2] = torch.cat(
(w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1
)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape) + 1, len(input_shape)]
corners_3d = torch.matmul(corners_3d, R.permute(tlist))
corners_3d += torch.unsqueeze(center, -2)
if reshape_final:
corners_3d = corners_3d.reshape(bsize, nprop, 8, 3)
return corners_3d
def get_3d_box_batch(box_size, angle, center):
"""box_size: [x1,x2,...,xn,3]
angle: [x1,x2,...,xn]
center: [x1,x2,...,xn,3]
Return:
[x1,x3,...,xn,8,3]
"""
input_shape = angle.shape
R = roty_batch(angle)
l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[..., 1], -1)
h = np.expand_dims(box_size[..., 2], -1)
corners_3d = np.zeros(tuple(list(input_shape) + [8, 3]))
corners_3d[..., :, 0] = np.concatenate(
(l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1
)
corners_3d[..., :, 1] = np.concatenate(
(h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1
)
corners_3d[..., :, 2] = np.concatenate(
(w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1
)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape) + 1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
####### GIoU related operations. Differentiable #############
def helper_computeIntersection(
cp1: torch.Tensor, cp2: torch.Tensor, s: torch.Tensor, e: torch.Tensor
):
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
# return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
return torch.stack([(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3])
def helper_inside(cp1: torch.Tensor, cp2: torch.Tensor, p: torch.Tensor):
ineq = (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
return ineq.item()
def polygon_clip_unnest(subjectPolygon: torch.Tensor, clipPolygon: torch.Tensor):
"""Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])]
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList.copy()
outputList.clear()
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if helper_inside(cp1, cp2, e):
if not helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
outputList.append(e)
elif helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
s = e
cp1 = cp2
if len(outputList) == 0:
# return None
break
return outputList
def box3d_vol_tensor(corners):
EPS = 1e-6
reshape = False
B, K = corners.shape[0], corners.shape[1]
if len(corners.shape) == 4:
# batch x prop x 8 x 3
reshape = True
corners = corners.view(-1, 8, 3)
a = torch.sqrt(
(corners[:, 0, :] - corners[:, 1, :]).pow(2).sum(dim=1).clamp(min=EPS)
)
b = torch.sqrt(
(corners[:, 1, :] - corners[:, 2, :]).pow(2).sum(dim=1).clamp(min=EPS)
)
c = torch.sqrt(
(corners[:, 0, :] - corners[:, 4, :]).pow(2).sum(dim=1).clamp(min=EPS)
)
vols = a * b * c
if reshape:
vols = vols.view(B, K)
return vols
def enclosing_box3d_vol(corners1, corners2):
"""
volume of enclosing axis-aligned box
"""
assert len(corners1.shape) == 4
assert len(corners2.shape) == 4
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners2.shape[2] == 8
assert corners2.shape[3] == 3
EPS = 1e-6
corners1 = corners1.clone()
corners2 = corners2.clone()
# flip Y axis, since it is negative
corners1[:, :, :, 1] *= -1
corners2[:, :, :, 1] *= -1
al_xmin = torch.min(
torch.min(corners1[:, :, :, 0], dim=2).values[:, :, None],
torch.min(corners2[:, :, :, 0], dim=2).values[:, None, :],
)
al_ymin = torch.max(
torch.max(corners1[:, :, :, 1], dim=2).values[:, :, None],
torch.max(corners2[:, :, :, 1], dim=2).values[:, None, :],
)
al_zmin = torch.min(
torch.min(corners1[:, :, :, 2], dim=2).values[:, :, None],
torch.min(corners2[:, :, :, 2], dim=2).values[:, None, :],
)
al_xmax = torch.max(
torch.max(corners1[:, :, :, 0], dim=2).values[:, :, None],
torch.max(corners2[:, :, :, 0], dim=2).values[:, None, :],
)
al_ymax = torch.min(
torch.min(corners1[:, :, :, 1], dim=2).values[:, :, None],
torch.min(corners2[:, :, :, 1], dim=2).values[:, None, :],
)
al_zmax = torch.max(
torch.max(corners1[:, :, :, 2], dim=2).values[:, :, None],
torch.max(corners2[:, :, :, 2], dim=2).values[:, None, :],
)
diff_x = torch.abs(al_xmax - al_xmin)
diff_y = torch.abs(al_ymax - al_ymin)
diff_z = torch.abs(al_zmax - al_zmin)
vol = diff_x * diff_y * diff_z
return vol
def generalized_box3d_iou_tensor(
corners1: torch.Tensor,
corners2: torch.Tensor,
nums_k2: torch.Tensor,
rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
"""
assert len(corners1.shape) == 4
assert len(corners2.shape) == 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, 1][:, None, :])
ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, 1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, :, :])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, :, :])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b] :] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS)
if rotated_boxes:
inter_areas = torch.zeros((B, K1, K2), dtype=torch.float32)
rect1 = rect1.cpu()
rect2 = rect2.cpu()
nums_k2_np = to_list_1d(nums_k2)
non_rot_inter_areas_np = to_list_3d(non_rot_inter_areas)
for b in range(B):
for k1 in range(K1):
for k2 in range(K2):
if nums_k2 is not None and k2 >= nums_k2_np[b]:
break
if non_rot_inter_areas_np[b][k1][k2] == 0:
continue
##### compute volume of intersection
inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2])
if len(inter) > 0:
xs = torch.stack([x[0] for x in inter])
ys = torch.stack([x[1] for x in inter])
inter_areas[b, k1, k2] = torch.abs(
torch.dot(xs, torch.roll(ys, 1))
- torch.dot(ys, torch.roll(xs, 1))
)
inter_areas.mul_(0.5)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = -(1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b, :, : nums_k2[b]] = 1
gious *= mask
return gious
generalized_box3d_iou_tensor_jit = torch.jit.script(generalized_box3d_iou_tensor)
def generalized_box3d_iou_cython(
corners1: torch.Tensor,
corners2: torch.Tensor,
nums_k2: torch.Tensor,
rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
"""
assert len(corners1.shape) == 4
assert len(corners2.shape) == 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, 1][:, None, :])
ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, 1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, :, :])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, :, :])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b] :] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS)
if rotated_boxes:
inter_areas = np.zeros((B, K1, K2), dtype=np.float32)
rect1 = rect1.cpu().numpy().astype(np.float32)
rect2 = rect2.cpu().numpy().astype(np.float32)
nums_k2_np = nums_k2.cpu().detach().numpy().astype(np.int32)
non_rot_inter_areas_np = (
non_rot_inter_areas.cpu().detach().numpy().astype(np.float32)
)
box_intersection(
rect1, rect2, non_rot_inter_areas_np, nums_k2_np, inter_areas, True
)
inter_areas = torch.from_numpy(inter_areas)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = -(1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b, :, : nums_k2[b]] = 1
gious *= mask
return gious
def generalized_box3d_iou(
corners1: torch.Tensor,
corners2: torch.Tensor,
nums_k2: torch.Tensor,
rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
needs_grad: bool = False,
):
if needs_grad is True or box_intersection is None:
context = torch.enable_grad if needs_grad else torch.no_grad
with context():
return generalized_box3d_iou_tensor_jit(
corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only
)
else:
# Cythonized implementation of GIoU
with torch.no_grad():
return generalized_box3d_iou_cython(
corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only
)
| 3detr-main | utils/box_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Helper functions and class to calculate Average Precisions for 3D object detection.
"""
import logging
import os
import sys
from collections import OrderedDict
import numpy as np
import scipy.special as scipy_special
import torch
from utils.box_util import (extract_pc_in_box3d, flip_axis_to_camera_np,
get_3d_box, get_3d_box_batch)
from utils.eval_det import eval_det_multiprocessing, get_iou_obb
from utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # depth X,Y,Z = cam X,Z,-Y
pc2[..., 2] *= -1
return pc2
def softmax(x):
"""Numpy function for softmax"""
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape) - 1, keepdims=True))
probs /= np.sum(probs, axis=len(shape) - 1, keepdims=True)
return probs
# This is exactly the same as VoteNet so that we can compare evaluations.
def parse_predictions(
predicted_boxes, sem_cls_probs, objectness_probs, point_cloud, config_dict
):
"""Parse predictions to OBB parameters and suppress overlapping boxes
Args:
end_points: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
sem_cls_probs = sem_cls_probs.detach().cpu().numpy() # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal
pred_sem_cls = np.argmax(sem_cls_probs, -1)
obj_prob = objectness_probs.detach().cpu().numpy()
pred_corners_3d_upright_camera = predicted_boxes.detach().cpu().numpy()
K = pred_corners_3d_upright_camera.shape[1] # K==num_proposal
bsize = pred_corners_3d_upright_camera.shape[0]
nonempty_box_mask = np.ones((bsize, K))
if config_dict["remove_empty_box"]:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = point_cloud.cpu().numpy()[:, :, 0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i, :, :] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box, inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i, j] = 0
if nonempty_box_mask[i].sum() == 0:
nonempty_box_mask[i, obj_prob[i].argmax()] = 1
# -------------------------------------
if "no_nms" in config_dict and config_dict["no_nms"]:
# pred_mask = np.ones((bsize, K))
pred_mask = nonempty_box_mask
elif not config_dict["use_3d_nms"]:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K, 5))
for j in range(K):
boxes_2d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_2d_with_prob[j, 2] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_2d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_2d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_2d_with_prob[j, 4] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_2d_faster(
boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict["use_3d_nms"] and (not config_dict["cls_nms"]):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 7))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 2] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 4] = np.max(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 5] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_3d_faster(
boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict["use_3d_nms"] and config_dict["cls_nms"]:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 8))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 2] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 4] = np.max(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 5] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
boxes_3d_with_prob[j, 7] = pred_sem_cls[
i, j
] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_3d_faster_samecls(
boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
batch_pred_map_cls = (
[]
) # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1)
for i in range(bsize):
if config_dict["per_class_proposal"]:
assert config_dict["use_cls_confidence_only"] is False
cur_list = []
for ii in range(config_dict["dataset_config"].num_semcls):
cur_list += [
(
ii,
pred_corners_3d_upright_camera[i, j],
sem_cls_probs[i, j, ii] * obj_prob[i, j],
)
for j in range(pred_corners_3d_upright_camera.shape[1])
if pred_mask[i, j] == 1
and obj_prob[i, j] > config_dict["conf_thresh"]
]
batch_pred_map_cls.append(cur_list)
elif config_dict["use_cls_confidence_only"]:
batch_pred_map_cls.append(
[
(
pred_sem_cls[i, j].item(),
pred_corners_3d_upright_camera[i, j],
sem_cls_probs[i, j, pred_sem_cls[i, j].item()],
)
for j in range(pred_corners_3d_upright_camera.shape[1])
if pred_mask[i, j] == 1
and obj_prob[i, j] > config_dict["conf_thresh"]
]
)
else:
batch_pred_map_cls.append(
[
(
pred_sem_cls[i, j].item(),
pred_corners_3d_upright_camera[i, j],
obj_prob[i, j],
)
for j in range(pred_corners_3d_upright_camera.shape[1])
if pred_mask[i, j] == 1
and obj_prob[i, j] > config_dict["conf_thresh"]
]
)
return batch_pred_map_cls
def get_ap_config_dict(
remove_empty_box=True,
use_3d_nms=True,
nms_iou=0.25,
use_old_type_nms=False,
cls_nms=True,
per_class_proposal=True,
use_cls_confidence_only=False,
conf_thresh=0.05,
no_nms=False,
dataset_config=None,
):
"""
Default mAP evaluation settings for VoteNet
"""
config_dict = {
"remove_empty_box": remove_empty_box,
"use_3d_nms": use_3d_nms,
"nms_iou": nms_iou,
"use_old_type_nms": use_old_type_nms,
"cls_nms": cls_nms,
"per_class_proposal": per_class_proposal,
"use_cls_confidence_only": use_cls_confidence_only,
"conf_thresh": conf_thresh,
"no_nms": no_nms,
"dataset_config": dataset_config,
}
return config_dict
class APCalculator(object):
"""Calculating Average Precision"""
def __init__(
self,
dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=None,
exact_eval=True,
ap_config_dict=None,
):
"""
Args:
ap_iou_thresh: List of float between 0 and 1.0
IoU threshold to judge whether a prediction is positive.
class2type_map: [optional] dict {class_int:class_name}
"""
self.ap_iou_thresh = ap_iou_thresh
if ap_config_dict is None:
ap_config_dict = get_ap_config_dict(
dataset_config=dataset_config, remove_empty_box=exact_eval
)
self.ap_config_dict = ap_config_dict
self.class2type_map = class2type_map
self.reset()
def make_gt_list(self, gt_box_corners, gt_box_sem_cls_labels, gt_box_present):
batch_gt_map_cls = []
bsize = gt_box_corners.shape[0]
for i in range(bsize):
batch_gt_map_cls.append(
[
(gt_box_sem_cls_labels[i, j].item(), gt_box_corners[i, j])
for j in range(gt_box_corners.shape[1])
if gt_box_present[i, j] == 1
]
)
return batch_gt_map_cls
def step_meter(self, outputs, targets):
if "outputs" in outputs:
outputs = outputs["outputs"]
self.step(
predicted_box_corners=outputs["box_corners"],
sem_cls_probs=outputs["sem_cls_prob"],
objectness_probs=outputs["objectness_prob"],
point_cloud=targets["point_clouds"],
gt_box_corners=targets["gt_box_corners"],
gt_box_sem_cls_labels=targets["gt_box_sem_cls_label"],
gt_box_present=targets["gt_box_present"],
)
def step(
self,
predicted_box_corners,
sem_cls_probs,
objectness_probs,
point_cloud,
gt_box_corners,
gt_box_sem_cls_labels,
gt_box_present,
):
"""
Perform NMS on predicted boxes and threshold them according to score.
Convert GT boxes
"""
gt_box_corners = gt_box_corners.cpu().detach().numpy()
gt_box_sem_cls_labels = gt_box_sem_cls_labels.cpu().detach().numpy()
gt_box_present = gt_box_present.cpu().detach().numpy()
batch_gt_map_cls = self.make_gt_list(
gt_box_corners, gt_box_sem_cls_labels, gt_box_present
)
batch_pred_map_cls = parse_predictions(
predicted_box_corners,
sem_cls_probs,
objectness_probs,
point_cloud,
self.ap_config_dict,
)
self.accumulate(batch_pred_map_cls, batch_gt_map_cls)
def accumulate(self, batch_pred_map_cls, batch_gt_map_cls):
"""Accumulate one batch of prediction and groundtruth.
Args:
batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...]
batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...]
should have the same length with batch_pred_map_cls (batch_size)
"""
bsize = len(batch_pred_map_cls)
assert bsize == len(batch_gt_map_cls)
for i in range(bsize):
self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]
self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]
self.scan_cnt += 1
def compute_metrics(self):
"""Use accumulated predictions and groundtruths to compute Average Precision."""
overall_ret = OrderedDict()
for ap_iou_thresh in self.ap_iou_thresh:
ret_dict = OrderedDict()
rec, prec, ap = eval_det_multiprocessing(
self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh
)
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
ret_dict["%s Average Precision" % (clsname)] = ap[key]
ap_vals = np.array(list(ap.values()), dtype=np.float32)
ap_vals[np.isnan(ap_vals)] = 0
ret_dict["mAP"] = ap_vals.mean()
rec_list = []
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
try:
ret_dict["%s Recall" % (clsname)] = rec[key][-1]
rec_list.append(rec[key][-1])
except:
ret_dict["%s Recall" % (clsname)] = 0
rec_list.append(0)
ret_dict["AR"] = np.mean(rec_list)
overall_ret[ap_iou_thresh] = ret_dict
return overall_ret
def __str__(self):
overall_ret = self.compute_metrics()
return self.metrics_to_str(overall_ret)
def metrics_to_str(self, overall_ret, per_class=True):
mAP_strs = []
AR_strs = []
per_class_metrics = []
for ap_iou_thresh in self.ap_iou_thresh:
mAP = overall_ret[ap_iou_thresh]["mAP"] * 100
mAP_strs.append(f"{mAP:.2f}")
ar = overall_ret[ap_iou_thresh]["AR"] * 100
AR_strs.append(f"{ar:.2f}")
if per_class:
# per-class metrics
per_class_metrics.append("-" * 5)
per_class_metrics.append(f"IOU Thresh={ap_iou_thresh}")
for x in list(overall_ret[ap_iou_thresh].keys()):
if x == "mAP" or x == "AR":
pass
else:
met_str = f"{x}: {overall_ret[ap_iou_thresh][x]*100:.2f}"
per_class_metrics.append(met_str)
ap_header = [f"mAP{x:.2f}" for x in self.ap_iou_thresh]
ap_str = ", ".join(ap_header)
ap_str += ": " + ", ".join(mAP_strs)
ap_str += "\n"
ar_header = [f"AR{x:.2f}" for x in self.ap_iou_thresh]
ap_str += ", ".join(ar_header)
ap_str += ": " + ", ".join(AR_strs)
if per_class:
per_class_metrics = "\n".join(per_class_metrics)
ap_str += "\n"
ap_str += per_class_metrics
return ap_str
def metrics_to_dict(self, overall_ret):
metrics_dict = {}
for ap_iou_thresh in self.ap_iou_thresh:
metrics_dict[f"mAP_{ap_iou_thresh}"] = (
overall_ret[ap_iou_thresh]["mAP"] * 100
)
metrics_dict[f"AR_{ap_iou_thresh}"] = overall_ret[ap_iou_thresh]["AR"] * 100
return metrics_dict
def reset(self):
self.gt_map_cls = {} # {scan_id: [(classname, bbox)]}
self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]}
self.scan_cnt = 0
| 3detr-main | utils/ap_calculator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle
import torch
import torch.distributed as dist
def is_distributed():
if not dist.is_available() or not dist.is_initialized():
return False
return True
def get_rank():
if not is_distributed():
return 0
return dist.get_rank()
def is_primary():
return get_rank() == 0
def get_world_size():
if not is_distributed():
return 1
return dist.get_world_size()
def barrier():
if not is_distributed():
return
torch.distributed.barrier()
def setup_print_for_distributed(is_primary):
"""
This function disables printing when not in primary process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_primary or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed(gpu_id, global_rank, world_size, dist_url, dist_backend):
torch.cuda.set_device(gpu_id)
print(
f"| distributed init (rank {global_rank}) (world {world_size}): {dist_url}",
flush=True,
)
torch.distributed.init_process_group(
backend=dist_backend,
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
torch.distributed.barrier()
setup_print_for_distributed(is_primary())
def all_reduce_sum(tensor):
if not is_distributed():
return tensor
dim_squeeze = False
if tensor.ndim == 0:
tensor = tensor[None, ...]
dim_squeeze = True
torch.distributed.all_reduce(tensor)
if dim_squeeze:
tensor = tensor.squeeze(0)
return tensor
def all_reduce_average(tensor):
val = all_reduce_sum(tensor)
return val / get_world_size()
# Function from DETR - https://github.com/facebookresearch/detr/blob/master/util/misc.py
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
torch.distributed.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
# Function from https://github.com/facebookresearch/detr/blob/master/util/misc.py
def all_gather_pickle(data, device):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device)
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device=device)
size_list = [torch.tensor([0], device=device) for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
if local_size != max_size:
padding = torch.empty(
size=(max_size - local_size,), dtype=torch.uint8, device=device
)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def all_gather_dict(data):
"""
Run all_gather on data which is a dictionary of Tensors
"""
assert isinstance(data, dict)
gathered_dict = {}
for item_key in data:
if isinstance(data[item_key], torch.Tensor):
if is_distributed():
data[item_key] = data[item_key].contiguous()
tensor_list = [torch.empty_like(data[item_key]) for _ in range(get_world_size())]
dist.all_gather(tensor_list, data[item_key])
gathered_tensor = torch.cat(tensor_list, dim=0)
else:
gathered_tensor = data[item_key]
gathered_dict[item_key] = gathered_tensor
return gathered_dict
| 3detr-main | utils/dist.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from third_party.pointnet2.pointnet2_modules import PointnetSAModuleVotes
from third_party.pointnet2.pointnet2_utils import furthest_point_sample
from utils.pc_util import scale_points, shift_scale_points
from models.helpers import GenericMLP
from models.position_embedding import PositionEmbeddingCoordsSine
from models.transformer import (MaskedTransformerEncoder, TransformerDecoder,
TransformerDecoderLayer, TransformerEncoder,
TransformerEncoderLayer)
class BoxProcessor(object):
"""
Class to convert 3DETR MLP head outputs into bounding boxes
"""
def __init__(self, dataset_config):
self.dataset_config = dataset_config
def compute_predicted_center(self, center_offset, query_xyz, point_cloud_dims):
center_unnormalized = query_xyz + center_offset
center_normalized = shift_scale_points(
center_unnormalized, src_range=point_cloud_dims
)
return center_normalized, center_unnormalized
def compute_predicted_size(self, size_normalized, point_cloud_dims):
scene_scale = point_cloud_dims[1] - point_cloud_dims[0]
scene_scale = torch.clamp(scene_scale, min=1e-1)
size_unnormalized = scale_points(size_normalized, mult_factor=scene_scale)
return size_unnormalized
def compute_predicted_angle(self, angle_logits, angle_residual):
if angle_logits.shape[-1] == 1:
# special case for datasets with no rotation angle
# we still use the predictions so that model outputs are used
# in the backwards pass (DDP may complain otherwise)
angle = angle_logits * 0 + angle_residual * 0
angle = angle.squeeze(-1).clamp(min=0)
else:
angle_per_cls = 2 * np.pi / self.dataset_config.num_angle_bin
pred_angle_class = angle_logits.argmax(dim=-1).detach()
angle_center = angle_per_cls * pred_angle_class
angle = angle_center + angle_residual.gather(
2, pred_angle_class.unsqueeze(-1)
).squeeze(-1)
mask = angle > np.pi
angle[mask] = angle[mask] - 2 * np.pi
return angle
def compute_objectness_and_cls_prob(self, cls_logits):
assert cls_logits.shape[-1] == self.dataset_config.num_semcls + 1
cls_prob = torch.nn.functional.softmax(cls_logits, dim=-1)
objectness_prob = 1 - cls_prob[..., -1]
return cls_prob[..., :-1], objectness_prob
def box_parametrization_to_corners(
self, box_center_unnorm, box_size_unnorm, box_angle
):
return self.dataset_config.box_parametrization_to_corners(
box_center_unnorm, box_size_unnorm, box_angle
)
class Model3DETR(nn.Module):
"""
Main 3DETR model. Consists of the following learnable sub-models
- pre_encoder: takes raw point cloud, subsamples it and projects into "D" dimensions
Input is a Nx3 matrix of N point coordinates
Output is a N'xD matrix of N' point features
- encoder: series of self-attention blocks to extract point features
Input is a N'xD matrix of N' point features
Output is a N''xD matrix of N'' point features.
N'' = N' for regular encoder; N'' = N'//2 for masked encoder
- query computation: samples a set of B coordinates from the N'' points
and outputs a BxD matrix of query features.
- decoder: series of self-attention and cross-attention blocks to produce BxD box features
Takes N''xD features from the encoder and BxD query features.
- mlp_heads: Predicts bounding box parameters and classes from the BxD box features
"""
def __init__(
self,
pre_encoder,
encoder,
decoder,
dataset_config,
encoder_dim=256,
decoder_dim=256,
position_embedding="fourier",
mlp_dropout=0.3,
num_queries=256,
):
super().__init__()
self.pre_encoder = pre_encoder
self.encoder = encoder
if hasattr(self.encoder, "masking_radius"):
hidden_dims = [encoder_dim]
else:
hidden_dims = [encoder_dim, encoder_dim]
self.encoder_to_decoder_projection = GenericMLP(
input_dim=encoder_dim,
hidden_dims=hidden_dims,
output_dim=decoder_dim,
norm_fn_name="bn1d",
activation="relu",
use_conv=True,
output_use_activation=True,
output_use_norm=True,
output_use_bias=False,
)
self.pos_embedding = PositionEmbeddingCoordsSine(
d_pos=decoder_dim, pos_type=position_embedding, normalize=True
)
self.query_projection = GenericMLP(
input_dim=decoder_dim,
hidden_dims=[decoder_dim],
output_dim=decoder_dim,
use_conv=True,
output_use_activation=True,
hidden_use_bias=True,
)
self.decoder = decoder
self.build_mlp_heads(dataset_config, decoder_dim, mlp_dropout)
self.num_queries = num_queries
self.box_processor = BoxProcessor(dataset_config)
def build_mlp_heads(self, dataset_config, decoder_dim, mlp_dropout):
mlp_func = partial(
GenericMLP,
norm_fn_name="bn1d",
activation="relu",
use_conv=True,
hidden_dims=[decoder_dim, decoder_dim],
dropout=mlp_dropout,
input_dim=decoder_dim,
)
# Semantic class of the box
# add 1 for background/not-an-object class
semcls_head = mlp_func(output_dim=dataset_config.num_semcls + 1)
# geometry of the box
center_head = mlp_func(output_dim=3)
size_head = mlp_func(output_dim=3)
angle_cls_head = mlp_func(output_dim=dataset_config.num_angle_bin)
angle_reg_head = mlp_func(output_dim=dataset_config.num_angle_bin)
mlp_heads = [
("sem_cls_head", semcls_head),
("center_head", center_head),
("size_head", size_head),
("angle_cls_head", angle_cls_head),
("angle_residual_head", angle_reg_head),
]
self.mlp_heads = nn.ModuleDict(mlp_heads)
def get_query_embeddings(self, encoder_xyz, point_cloud_dims):
query_inds = furthest_point_sample(encoder_xyz, self.num_queries)
query_inds = query_inds.long()
query_xyz = [torch.gather(encoder_xyz[..., x], 1, query_inds) for x in range(3)]
query_xyz = torch.stack(query_xyz)
query_xyz = query_xyz.permute(1, 2, 0)
# Gater op above can be replaced by the three lines below from the pointnet2 codebase
# xyz_flipped = encoder_xyz.transpose(1, 2).contiguous()
# query_xyz = gather_operation(xyz_flipped, query_inds.int())
# query_xyz = query_xyz.transpose(1, 2)
pos_embed = self.pos_embedding(query_xyz, input_range=point_cloud_dims)
query_embed = self.query_projection(pos_embed)
return query_xyz, query_embed
def _break_up_pc(self, pc):
# pc may contain color/normals.
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def run_encoder(self, point_clouds):
xyz, features = self._break_up_pc(point_clouds)
pre_enc_xyz, pre_enc_features, pre_enc_inds = self.pre_encoder(xyz, features)
# xyz: batch x npoints x 3
# features: batch x channel x npoints
# inds: batch x npoints
# nn.MultiHeadAttention in encoder expects npoints x batch x channel features
pre_enc_features = pre_enc_features.permute(2, 0, 1)
# xyz points are in batch x npointx channel order
enc_xyz, enc_features, enc_inds = self.encoder(
pre_enc_features, xyz=pre_enc_xyz
)
if enc_inds is None:
# encoder does not perform any downsampling
enc_inds = pre_enc_inds
else:
# use gather here to ensure that it works for both FPS and random sampling
enc_inds = torch.gather(pre_enc_inds, 1, enc_inds.type(torch.int64))
return enc_xyz, enc_features, enc_inds
def get_box_predictions(self, query_xyz, point_cloud_dims, box_features):
"""
Parameters:
query_xyz: batch x nqueries x 3 tensor of query XYZ coords
point_cloud_dims: List of [min, max] dims of point cloud
min: batch x 3 tensor of min XYZ coords
max: batch x 3 tensor of max XYZ coords
box_features: num_layers x num_queries x batch x channel
"""
# box_features change to (num_layers x batch) x channel x num_queries
box_features = box_features.permute(0, 2, 3, 1)
num_layers, batch, channel, num_queries = (
box_features.shape[0],
box_features.shape[1],
box_features.shape[2],
box_features.shape[3],
)
box_features = box_features.reshape(num_layers * batch, channel, num_queries)
# mlp head outputs are (num_layers x batch) x noutput x nqueries, so transpose last two dims
cls_logits = self.mlp_heads["sem_cls_head"](box_features).transpose(1, 2)
center_offset = (
self.mlp_heads["center_head"](box_features).sigmoid().transpose(1, 2) - 0.5
)
size_normalized = (
self.mlp_heads["size_head"](box_features).sigmoid().transpose(1, 2)
)
angle_logits = self.mlp_heads["angle_cls_head"](box_features).transpose(1, 2)
angle_residual_normalized = self.mlp_heads["angle_residual_head"](
box_features
).transpose(1, 2)
# reshape outputs to num_layers x batch x nqueries x noutput
cls_logits = cls_logits.reshape(num_layers, batch, num_queries, -1)
center_offset = center_offset.reshape(num_layers, batch, num_queries, -1)
size_normalized = size_normalized.reshape(num_layers, batch, num_queries, -1)
angle_logits = angle_logits.reshape(num_layers, batch, num_queries, -1)
angle_residual_normalized = angle_residual_normalized.reshape(
num_layers, batch, num_queries, -1
)
angle_residual = angle_residual_normalized * (
np.pi / angle_residual_normalized.shape[-1]
)
outputs = []
for l in range(num_layers):
# box processor converts outputs so we can get a 3D bounding box
(
center_normalized,
center_unnormalized,
) = self.box_processor.compute_predicted_center(
center_offset[l], query_xyz, point_cloud_dims
)
angle_continuous = self.box_processor.compute_predicted_angle(
angle_logits[l], angle_residual[l]
)
size_unnormalized = self.box_processor.compute_predicted_size(
size_normalized[l], point_cloud_dims
)
box_corners = self.box_processor.box_parametrization_to_corners(
center_unnormalized, size_unnormalized, angle_continuous
)
# below are not used in computing loss (only for matching/mAP eval)
# we compute them with no_grad() so that distributed training does not complain about unused variables
with torch.no_grad():
(
semcls_prob,
objectness_prob,
) = self.box_processor.compute_objectness_and_cls_prob(cls_logits[l])
box_prediction = {
"sem_cls_logits": cls_logits[l],
"center_normalized": center_normalized.contiguous(),
"center_unnormalized": center_unnormalized,
"size_normalized": size_normalized[l],
"size_unnormalized": size_unnormalized,
"angle_logits": angle_logits[l],
"angle_residual": angle_residual[l],
"angle_residual_normalized": angle_residual_normalized[l],
"angle_continuous": angle_continuous,
"objectness_prob": objectness_prob,
"sem_cls_prob": semcls_prob,
"box_corners": box_corners,
}
outputs.append(box_prediction)
# intermediate decoder layer outputs are only used during training
aux_outputs = outputs[:-1]
outputs = outputs[-1]
return {
"outputs": outputs, # output from last layer of decoder
"aux_outputs": aux_outputs, # output from intermediate layers of decoder
}
def forward(self, inputs, encoder_only=False):
point_clouds = inputs["point_clouds"]
enc_xyz, enc_features, enc_inds = self.run_encoder(point_clouds)
enc_features = self.encoder_to_decoder_projection(
enc_features.permute(1, 2, 0)
).permute(2, 0, 1)
# encoder features: npoints x batch x channel
# encoder xyz: npoints x batch x 3
if encoder_only:
# return: batch x npoints x channels
return enc_xyz, enc_features.transpose(0, 1)
point_cloud_dims = [
inputs["point_cloud_dims_min"],
inputs["point_cloud_dims_max"],
]
query_xyz, query_embed = self.get_query_embeddings(enc_xyz, point_cloud_dims)
# query_embed: batch x channel x npoint
enc_pos = self.pos_embedding(enc_xyz, input_range=point_cloud_dims)
# decoder expects: npoints x batch x channel
enc_pos = enc_pos.permute(2, 0, 1)
query_embed = query_embed.permute(2, 0, 1)
tgt = torch.zeros_like(query_embed)
box_features = self.decoder(
tgt, enc_features, query_pos=query_embed, pos=enc_pos
)[0]
box_predictions = self.get_box_predictions(
query_xyz, point_cloud_dims, box_features
)
return box_predictions
def build_preencoder(args):
mlp_dims = [3 * int(args.use_color), 64, 128, args.enc_dim]
preencoder = PointnetSAModuleVotes(
radius=0.2,
nsample=64,
npoint=args.preenc_npoints,
mlp=mlp_dims,
normalize_xyz=True,
)
return preencoder
def build_encoder(args):
if args.enc_type == "vanilla":
encoder_layer = TransformerEncoderLayer(
d_model=args.enc_dim,
nhead=args.enc_nhead,
dim_feedforward=args.enc_ffn_dim,
dropout=args.enc_dropout,
activation=args.enc_activation,
)
encoder = TransformerEncoder(
encoder_layer=encoder_layer, num_layers=args.enc_nlayers
)
elif args.enc_type in ["masked"]:
encoder_layer = TransformerEncoderLayer(
d_model=args.enc_dim,
nhead=args.enc_nhead,
dim_feedforward=args.enc_ffn_dim,
dropout=args.enc_dropout,
activation=args.enc_activation,
)
interim_downsampling = PointnetSAModuleVotes(
radius=0.4,
nsample=32,
npoint=args.preenc_npoints // 2,
mlp=[args.enc_dim, 256, 256, args.enc_dim],
normalize_xyz=True,
)
masking_radius = [math.pow(x, 2) for x in [0.4, 0.8, 1.2]]
encoder = MaskedTransformerEncoder(
encoder_layer=encoder_layer,
num_layers=3,
interim_downsampling=interim_downsampling,
masking_radius=masking_radius,
)
else:
raise ValueError(f"Unknown encoder type {args.enc_type}")
return encoder
def build_decoder(args):
decoder_layer = TransformerDecoderLayer(
d_model=args.dec_dim,
nhead=args.dec_nhead,
dim_feedforward=args.dec_ffn_dim,
dropout=args.dec_dropout,
)
decoder = TransformerDecoder(
decoder_layer, num_layers=args.dec_nlayers, return_intermediate=True
)
return decoder
def build_3detr(args, dataset_config):
pre_encoder = build_preencoder(args)
encoder = build_encoder(args)
decoder = build_decoder(args)
model = Model3DETR(
pre_encoder,
encoder,
decoder,
dataset_config,
encoder_dim=args.enc_dim,
decoder_dim=args.dec_dim,
mlp_dropout=args.mlp_dropout,
num_queries=args.nqueries,
)
output_processor = BoxProcessor(dataset_config)
return model, output_processor
| 3detr-main | models/model_3detr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .model_3detr import build_3detr
MODEL_FUNCS = {
"3detr": build_3detr,
}
def build_model(args, dataset_config):
model, processor = MODEL_FUNCS[args.model_name](args, dataset_config)
return model, processor | 3detr-main | models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modified from DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
from typing import Optional
import torch
from torch import Tensor, nn
from models.helpers import (ACTIVATION_DICT, NORM_DICT, WEIGHT_INIT_DICT,
get_clones)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers,
norm=None, weight_init_name="xavier_uniform"):
super().__init__()
self.layers = get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self._reset_parameters(weight_init_name)
def _reset_parameters(self, weight_init_name):
func = WEIGHT_INIT_DICT[weight_init_name]
for p in self.parameters():
if p.dim() > 1:
func(p)
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
xyz: Optional [Tensor] = None,
transpose_swap: Optional[bool] = False,
):
if transpose_swap:
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
if pos is not None:
pos = pos.flatten(2).permute(2, 0, 1)
output = src
orig_mask = mask
if orig_mask is not None and isinstance(orig_mask, list):
assert len(orig_mask) == len(self.layers)
elif orig_mask is not None:
orig_mask = [mask for _ in range(len(self.layers))]
for idx, layer in enumerate(self.layers):
if orig_mask is not None:
mask = orig_mask[idx]
# mask must be tiled to num_heads of the transformer
bsz, n, n = mask.shape
nhead = layer.nhead
mask = mask.unsqueeze(1)
mask = mask.repeat(1, nhead, 1, 1)
mask = mask.view(bsz * nhead, n, n)
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
if transpose_swap:
output = output.permute(1, 2, 0).view(bs, c, h, w).contiguous()
xyz_inds = None
return xyz, output, xyz_inds
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm_fn_name="ln",
return_intermediate=False,
weight_init_name="xavier_uniform"):
super().__init__()
self.layers = get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = None
if norm_fn_name is not None:
self.norm = NORM_DICT[norm_fn_name](self.layers[0].linear2.out_features)
self.return_intermediate = return_intermediate
self._reset_parameters(weight_init_name)
def _reset_parameters(self, weight_init_name):
func = WEIGHT_INIT_DICT[weight_init_name]
for p in self.parameters():
if p.dim() > 1:
func(p)
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
transpose_swap: Optional [bool] = False,
return_attn_weights: Optional [bool] = False,
):
if transpose_swap:
bs, c, h, w = memory.shape
memory = memory.flatten(2).permute(2, 0, 1) # memory: bs, c, t -> t, b, c
if pos is not None:
pos = pos.flatten(2).permute(2, 0, 1)
output = tgt
intermediate = []
attns = []
for layer in self.layers:
output, attn = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos,
return_attn_weights=return_attn_weights)
if self.return_intermediate:
intermediate.append(self.norm(output))
if return_attn_weights:
attns.append(attn)
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if return_attn_weights:
attns = torch.stack(attns)
if self.return_intermediate:
return torch.stack(intermediate), attns
return output, attns
class MaskedTransformerEncoder(TransformerEncoder):
def __init__(self, encoder_layer, num_layers, masking_radius, interim_downsampling,
norm=None, weight_init_name="xavier_uniform"):
super().__init__(encoder_layer, num_layers, norm=norm, weight_init_name=weight_init_name)
assert len(masking_radius) == num_layers
self.masking_radius = masking_radius
self.interim_downsampling = interim_downsampling
def compute_mask(self, xyz, radius, dist=None):
with torch.no_grad():
if dist is None or dist.shape[1] != xyz.shape[1]:
dist = torch.cdist(xyz, xyz, p=2)
# entries that are True in the mask do not contribute to self-attention
# so points outside the radius are not considered
mask = dist >= radius
return mask, dist
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
xyz: Optional [Tensor] = None,
transpose_swap: Optional[bool] = False,
):
if transpose_swap:
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
if pos is not None:
pos = pos.flatten(2).permute(2, 0, 1)
output = src
xyz_dist = None
xyz_inds = None
for idx, layer in enumerate(self.layers):
mask = None
if self.masking_radius[idx] > 0:
mask, xyz_dist = self.compute_mask(xyz, self.masking_radius[idx], xyz_dist)
# mask must be tiled to num_heads of the transformer
bsz, n, n = mask.shape
nhead = layer.nhead
mask = mask.unsqueeze(1)
mask = mask.repeat(1, nhead, 1, 1)
mask = mask.view(bsz * nhead, n, n)
output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
if idx == 0 and self.interim_downsampling:
# output is npoints x batch x channel. make batch x channel x npoints
output = output.permute(1, 2, 0)
xyz, output, xyz_inds = self.interim_downsampling(xyz, output)
# swap back
output = output.permute(2, 0, 1)
if self.norm is not None:
output = self.norm(output)
if transpose_swap:
output = output.permute(1, 2, 0).view(bs, c, h, w).contiguous()
return xyz, output, xyz_inds
def extra_repr(self):
radius_str = ", ".join(["%.2f"%(x) for x in self.masking_radius])
return f"masking_radius={radius_str}"
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead=4, dim_feedforward=128,
dropout=0.1, dropout_attn=None,
activation="relu", normalize_before=True, norm_name="ln",
use_ffn=True,
ffn_use_bias=True):
super().__init__()
if dropout_attn is None:
dropout_attn = dropout
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout_attn)
self.use_ffn = use_ffn
if self.use_ffn:
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, bias=ffn_use_bias)
self.dropout = nn.Dropout(dropout, inplace=True)
self.linear2 = nn.Linear(dim_feedforward, d_model, bias=ffn_use_bias)
self.norm2 = NORM_DICT[norm_name](d_model)
self.norm2 = NORM_DICT[norm_name](d_model)
self.dropout2 = nn.Dropout(dropout, inplace=True)
self.norm1 = NORM_DICT[norm_name](d_model)
self.dropout1 = nn.Dropout(dropout, inplace=True)
self.activation = ACTIVATION_DICT[activation]()
self.normalize_before = normalize_before
self.nhead = nhead
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
value = src
src2 = self.self_attn(q, k, value=value, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
if self.use_norm_fn_on_input:
src = self.norm1(src)
if self.use_ffn:
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
return_attn_weights: Optional [Tensor] = False):
src2 = self.norm1(src)
value = src2
q = k = self.with_pos_embed(src2, pos)
src2, attn_weights = self.self_attn(q, k, value=value, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
if self.use_ffn:
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
if return_attn_weights:
return src, attn_weights
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
return_attn_weights: Optional [Tensor] = False):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos, return_attn_weights)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
def extra_repr(self):
st = ""
if hasattr(self.self_attn, "dropout"):
st += f"attn_dr={self.self_attn.dropout}"
return st
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead=4, dim_feedforward=256,
dropout=0.1, dropout_attn=None,
activation="relu", normalize_before=True,
norm_fn_name="ln"):
super().__init__()
if dropout_attn is None:
dropout_attn = dropout
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm1 = NORM_DICT[norm_fn_name](d_model)
self.norm2 = NORM_DICT[norm_fn_name](d_model)
self.norm3 = NORM_DICT[norm_fn_name](d_model)
self.dropout1 = nn.Dropout(dropout, inplace=True)
self.dropout2 = nn.Dropout(dropout, inplace=True)
self.dropout3 = nn.Dropout(dropout, inplace=True)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout, inplace=True)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.activation = ACTIVATION_DICT[activation]()
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
return_attn_weights: Optional [bool] = False):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2, attn = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
if return_attn_weights:
return tgt, attn
return tgt, None
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
return_attn_weights: Optional [bool] = False):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2, attn = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
if return_attn_weights:
return tgt, attn
return tgt, None
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
return_attn_weights: Optional [bool] = False):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos, return_attn_weights)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos, return_attn_weights)
| 3detr-main | models/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
import numpy as np
from utils.pc_util import shift_scale_points
class PositionEmbeddingCoordsSine(nn.Module):
def __init__(
self,
temperature=10000,
normalize=False,
scale=None,
pos_type="fourier",
d_pos=None,
d_in=3,
gauss_scale=1.0,
):
super().__init__()
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
assert pos_type in ["sine", "fourier"]
self.pos_type = pos_type
self.scale = scale
if pos_type == "fourier":
assert d_pos is not None
assert d_pos % 2 == 0
# define a gaussian matrix input_ch -> output_ch
B = torch.empty((d_in, d_pos // 2)).normal_()
B *= gauss_scale
self.register_buffer("gauss_B", B)
self.d_pos = d_pos
def get_sine_embeddings(self, xyz, num_channels, input_range):
# clone coords so that shift/scale operations do not affect original tensor
orig_xyz = xyz
xyz = orig_xyz.clone()
ncoords = xyz.shape[1]
if self.normalize:
xyz = shift_scale_points(xyz, src_range=input_range)
ndim = num_channels // xyz.shape[2]
if ndim % 2 != 0:
ndim -= 1
# automatically handle remainder by assiging it to the first dim
rems = num_channels - (ndim * xyz.shape[2])
assert (
ndim % 2 == 0
), f"Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}"
final_embeds = []
prev_dim = 0
for d in range(xyz.shape[2]):
cdim = ndim
if rems > 0:
# add remainder in increments of two to maintain even size
cdim += 2
rems -= 2
if cdim != prev_dim:
dim_t = torch.arange(cdim, dtype=torch.float32, device=xyz.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / cdim)
# create batch x cdim x nccords embedding
raw_pos = xyz[:, :, d]
if self.scale:
raw_pos *= self.scale
pos = raw_pos[:, :, None] / dim_t
pos = torch.stack(
(pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3
).flatten(2)
final_embeds.append(pos)
prev_dim = cdim
final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)
return final_embeds
def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None):
# Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html
if num_channels is None:
num_channels = self.gauss_B.shape[1] * 2
bsize, npoints = xyz.shape[0], xyz.shape[1]
assert num_channels > 0 and num_channels % 2 == 0
d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1]
d_out = num_channels // 2
assert d_out <= max_d_out
assert d_in == xyz.shape[-1]
# clone coords so that shift/scale operations do not affect original tensor
orig_xyz = xyz
xyz = orig_xyz.clone()
ncoords = xyz.shape[1]
if self.normalize:
xyz = shift_scale_points(xyz, src_range=input_range)
xyz *= 2 * np.pi
xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view(
bsize, npoints, d_out
)
final_embeds = [xyz_proj.sin(), xyz_proj.cos()]
# return batch x d_pos x npoints embedding
final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)
return final_embeds
def forward(self, xyz, num_channels=None, input_range=None):
assert isinstance(xyz, torch.Tensor)
assert xyz.ndim == 3
# xyz is batch x npoints x 3
if self.pos_type == "sine":
with torch.no_grad():
return self.get_sine_embeddings(xyz, num_channels, input_range)
elif self.pos_type == "fourier":
with torch.no_grad():
return self.get_fourier_embeddings(xyz, num_channels, input_range)
else:
raise ValueError(f"Unknown {self.pos_type}")
def extra_repr(self):
st = f"type={self.pos_type}, scale={self.scale}, normalize={self.normalize}"
if hasattr(self, "gauss_B"):
st += (
f", gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}"
)
return st
| 3detr-main | models/position_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
from functools import partial
import copy
class BatchNormDim1Swap(nn.BatchNorm1d):
"""
Used for nn.Transformer that uses a HW x N x C rep
"""
def forward(self, x):
"""
x: HW x N x C
permute to N x C x HW
Apply BN on C
permute back
"""
hw, n, c = x.shape
x = x.permute(1, 2, 0)
x = super(BatchNormDim1Swap, self).forward(x)
# x: n x c x hw -> hw x n x c
x = x.permute(2, 0, 1)
return x
NORM_DICT = {
"bn": BatchNormDim1Swap,
"bn1d": nn.BatchNorm1d,
"id": nn.Identity,
"ln": nn.LayerNorm,
}
ACTIVATION_DICT = {
"relu": nn.ReLU,
"gelu": nn.GELU,
"leakyrelu": partial(nn.LeakyReLU, negative_slope=0.1),
}
WEIGHT_INIT_DICT = {
"xavier_uniform": nn.init.xavier_uniform_,
}
class GenericMLP(nn.Module):
def __init__(
self,
input_dim,
hidden_dims,
output_dim,
norm_fn_name=None,
activation="relu",
use_conv=False,
dropout=None,
hidden_use_bias=False,
output_use_bias=True,
output_use_activation=False,
output_use_norm=False,
weight_init_name=None,
):
super().__init__()
activation = ACTIVATION_DICT[activation]
norm = None
if norm_fn_name is not None:
norm = NORM_DICT[norm_fn_name]
if norm_fn_name == "ln" and use_conv:
norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm
if dropout is not None:
if not isinstance(dropout, list):
dropout = [dropout for _ in range(len(hidden_dims))]
layers = []
prev_dim = input_dim
for idx, x in enumerate(hidden_dims):
if use_conv:
layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias)
else:
layer = nn.Linear(prev_dim, x, bias=hidden_use_bias)
layers.append(layer)
if norm:
layers.append(norm(x))
layers.append(activation())
if dropout is not None:
layers.append(nn.Dropout(p=dropout[idx]))
prev_dim = x
if use_conv:
layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias)
else:
layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias)
layers.append(layer)
if output_use_norm:
layers.append(norm(output_dim))
if output_use_activation:
layers.append(activation())
self.layers = nn.Sequential(*layers)
if weight_init_name is not None:
self.do_weight_init(weight_init_name)
def do_weight_init(self, weight_init_name):
func = WEIGHT_INIT_DICT[weight_init_name]
for (_, param) in self.named_parameters():
if param.dim() > 1: # skips batchnorm/layernorm
func(param)
def forward(self, x):
output = self.layers(x)
return output
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
| 3detr-main | models/helpers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''
import torch
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact
)
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
| 3detr-main | third_party/pointnet2/pytorch_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import glob
import os.path as osp
this_dir = osp.dirname(osp.abspath(__file__))
_ext_src_root = "_ext_src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
_ext_headers = glob.glob("{}/include/*".format(_ext_src_root))
setup(
name='pointnet2',
ext_modules=[
CUDAExtension(
name='pointnet2._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
"nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
},
include_dirs=[osp.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={
'build_ext': BuildExtension
}
)
| 3detr-main | third_party/pointnet2/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
| 3detr-main | third_party/pointnet2/pointnet2_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
| 3detr-main | third_party/pointnet2/pointnet2_test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
| 3detr-main | third_party/pointnet2/pointnet2_modules.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from contextual.contextual_models import *
from contextual.contextual_linucb import *
from tqdm import trange
from collections import namedtuple
from cycler import cycler
import matplotlib.pyplot as plt
import cvxpy as cp
import numpy as np
from scipy.linalg import sqrtm
from scipy.optimize import minimize
from tqdm import trange, tqdm
from scipy.optimize import linprog
from joblib import Parallel, delayed
def work(m, rad, nb_arms, nb_features, noise, nb_simu, T, all_algs, random_state, M=1, bound_context=1):
# create model
K = nb_arms
model = AttackOneUserModel(n_actions=K, n_features=nb_features, noise=noise, random_state=seed, bound_context=bound_context
, distance=rad)
theta_bound = np.max(np.linalg.norm(model.thetas, axis=1))
target_arm = model.n_actions - 1
target_context = np.random.randint(low=0, high=len(model.context_lists))
x_star = model.context_lists[target_context]
mask = np.ones(model.n_actions, dtype='int')
mask[target_arm] = 0
print(in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])))
if in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])):
raise ValueError()
AAA = []
for alg_name in tqdm(all_algs.keys(), desc='Sim. model {}'.format(m)):
args = {'nb_arms': model.n_actions,
'dimension': model.n_features,
'bound_features': theta_bound,
'bound_context': model.bound_context,
'reg_factor': 0.1,
'delta': 0.01,
'noise_variance': noise,
}
alg = all_algs[alg_name](**args)
regret = np.zeros((nb_simu, T))
draws = [[]] * nb_simu
epsilon_norm = np.zeros((nb_simu, T))
for k in trange(nb_simu, desc='Nombre simulations'):
alg.reset()
for t in trange(T, desc='Iteration'):
context = model.get_context()
old_context = context
if 'Attacked' in alg_name:
if np.linalg.norm(context - x_star) < 10 ** -10:
if 'Relaxed' in alg_name:
epsilon = compute_relaxed_attack(alg, target_arm, context, slack=10 ** -4)
else:
epsilon = compute_attack(alg, target_arm, context, slack=10 ** -3)
else:
epsilon = np.zeros((model.n_features,))
context = context + epsilon
epsilon_norm[k, t] = np.linalg.norm(epsilon)
a_t = alg.get_action(context)
if np.linalg.norm(x_star - old_context) < 10 ** -10:
draws[k].append(1*(a_t == target_arm))
r_t = model.reward(old_context, a_t)
alg.update(context, a_t, r_t)
regret[k, t] = model.best_arm_reward(old_context) - np.dot(model.thetas[a_t], old_context)
draws[k] = np.cumsum(draws[k])[-1]
draws = np.array(draws)
AAA += [(alg_name, {"regret": regret, "attack_cond": epsilon_norm, "target_draws": draws})]
return m, AAA, model, rad
def in_hull(points, x):
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
def compute_relaxed_attack(alg, a_star, x_star, slack=10**-10):
d = alg.n_features
delta = cp.Variable(d)
obj = cp.Minimize(cp.quad_form(delta, np.eye(d))/2)
theta_a_star = alg.thetas_hat[a_star]
betas = alg.alpha()
constraints = []
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = (theta - theta_a_star)@(x_star+delta) + betas[a]*cp.norm(P@(x_star+delta))\
#- betas[a_star]R * (cp.norm(P_a_star @ x_star) + (alg.inv_design_matrices[a] @ x_star) @
# delta/cp.norm(P_a_star @ x_star))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
prob.solve(verbose=False, max_iters=1000, feastol=10**-8)
epsilon = delta.value
# print('epsilon =', epsilon)
# for a in range(len(alg.thetas_hat)):
# if a != a_star:
# theta_a_star = alg.thetas_hat[a_star]
# betas = alg.alpha()
# theta = alg.thetas_hat[a]
# P = sqrtm(alg.inv_design_matrices[a])
# P_a_star = sqrtm(alg.inv_design_matrices[a_star])
# print('The constraint for arm {}:'.format(a), np.dot(theta - theta_a_star, (x_star+epsilon)) + betas[a]*np.linalg.norm(P.dot((x_star+epsilon))) \
# - betas[a_star] * (np.linalg.norm(P_a_star.dot(x_star)) +
# np.dot((alg.inv_design_matrices[a].dot(x_star)), epsilon)/np.linalg.norm(P_a_star.dot(x_star))))
except:
print('Exception')
epsilon = np.zeros((d,))
# print('epsilon =', epsilon)
if epsilon is None:
return np.zeros((d,))
return epsilon
def compute_attack(alg, a_star, x_star, slack=10 **-10):
d = alg.n_features
func = lambda delta: np.linalg.norm(delta)/2
theta_a_star = alg.thetas_hat[a_star]
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
betas = alg.alpha()
constraints_list = []
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta)))
- beta_a_star * np.linalg.norm(P_a_star.dot((x_star + delta))) + slack)
temp_cons = {'type': 'ineq', 'fun': temp_constraint}
constraints_list.append(temp_cons)
cons = tuple(constraints_list)
res = minimize(func, -x_star, method='SLSQP', constraints=cons)
# print(res.message)
try:
epsilon = res.x
except:
epsilon = np.zeros((d,))
if epsilon is None:
return np.zeros((d,))
return epsilon
n = 10 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
# linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
# plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
nb_radius = 4
radius = np.linspace(1/10, 1/2, nb_radius)
#radius = np.array([1/4, 1/10])
T = int(3*10**3)
nb_simu = 4
nb_arms = 9
n_features = 10
results = []
la = 0.1
parallel = True
algorithms = {
'LinUCB': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=0.1, delta=0.01, noise_variance=noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinUCB RelaxedAttacked': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=0.01,
noise_variance=noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
# 'LinUCB-Attacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
# reg_factor=0.1, delta=0.99,
# bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
# noise_variance=noise, bound_context=model.bound_context),
# 'LinUCB-RelaxedAttacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
# reg_factor=0.1, delta=0.01,
# bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
# noise_variance=noise, bound_context=model.bound_context)
}
if parallel:
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(m=i, rad=r, nb_arms=nb_arms, nb_features = n_features, noise = noise,
nb_simu=nb_simu, T=T, all_algs=algorithms, random_state=0, M=1, bound_context=1)
for i, r in enumerate(radius))
else:
for i, r in enumerate(radius):
ret = work(m=i, rad=r, nb_arms=nb_arms, nb_features = n_features, noise = noise,
nb_simu=nb_simu, T=T, all_algs=algorithms, random_state=0, M=1, bound_context=1)
results.append(ret)
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
for alg_name, res in results[0][1]:
algorithms[alg_name] = {'draws': np.zeros((nb_radius, nb_simu))}
for m in range(len(radius)):
res = results[m][1]
for i, val in enumerate(res):
alg_name = val[0]
val = val[1]
algorithms[alg_name]['draws'][m] = np.array(val['target_draws'])
import numpy as np
plt.figure(1, figsize=(8, 8))
t = np.linspace(0, T-1, T, dtype='int')
for alg_name, res in algorithms.items():
res['draws'] = np.array(res['draws'])
mean_draws = np.mean(res['draws'], axis=(1))
low_quantile = np.quantile(res['draws'], 0.1, axis=(1))
high_quantile = np.quantile(res['draws'], 1 - 0.1, axis=(1))
plt.plot(radius, mean_draws, label=alg_name)
plt.fill_between(radius, low_quantile, high_quantile, alpha=0.15)
plt.title('Number of target draws at T={}'.format(T))
print(mean_draws)
plt.legend()
plt.show()
# if n_features == 2:
# for i, (alg_name, val) in enumerate(results):
# plt.figure(i + 3)
# plt.title('Confidence ellipse for {}'.format(alg_name))
# x = np.linspace(0, 2*np.pi)
# x_1 = np.cos(x)
# y_1 = np.sin(x)
# X = np.vstack((x_1, y_1))
# betas = val.betas
# for a in range(model.n_actions):
# center = val.thetas[-1, -1, a]
# V = sqrtm(val.design_matrix[a])
# y = center.reshape((2, 1)) + betas[a] * np.dot(V, X)
# plt.plot(y[0, :], y[1, :], label = 'confidence ellipse arm {}'.format(a))
# plt.fill_between(y[0,:], y[1,:], (center.reshape((2, 1))*np.ones((2, 50)))[1, :], alpha=0.15)
# plt.scatter(model.thetas[a][0],model.thetas[a][1], c=new_colors[a])
# plt.scatter(center[0], center[1], marker='^', c=new_colors[a])
# plt.scatter(x_star[0], x_star[1], marker='+', c = new_colors[-1])
# plt.legend()
# plt.show()
#
# plt.figure(4)
# plt.title('Error true env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(7)
# plt.title('Error biased env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a]/np.maximum(2*np.dot(model.thetas[a], x_star)/np.dot(model.thetas[target_arm], x_star), 1), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(5)
# plt.title('Difference estimated reward for target context {}'.format(target_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, target_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(6)
# plt.title('Difference estimated reward for a random non target context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/test_distance_attack_one_user.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
import isoexp.contextual.contextual_models as arms
from isoexp.contextual.contextual_linucb import *
from tqdm import tqdm
from cycler import cycler
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import math
import pickle
from tqdm import trange
import json
import datetime
from collections import namedtuple
import re
import os
frequency = 100
class exp(object):
def __init__(self, nb_arms, type='random', a_star=0, m=None):
self.K = nb_arms
self.type = type
self.a_star = a_star
self.m = m
def get_action(self, context):
if self.type == 'random':
return np.ones((self.K,)) / self.K
elif self.type == 'optimal':
means = np.dot(self.m.thetas, context)
a = np.argmax(means)
proba = np.zeros((self.K,))
proba[a] = 1
return proba
else:
proba = np.zeros((self.K,))
proba[self.a_star] = 1
return proba
def work(m, nb_arms, nb_features, noise, nb_simu, T, all_algs, random_state, M=1, bound_context=1, dataset=False, which=None):
# create model
K = nb_arms
if dataset:
if which == 'jester':
arm_file = os.path.abspath("examples/jester/Vt_jester.csv")
user_file = os.path.abspath("examples/jester/U.csv")
model = arms.DatasetModel(arm_csvfile=arm_file, user_csvfile=user_file, noise=noise, random_state=random_state)
else:
arm_file = os.path.abspath('examples/movielens/Vt_movielens.csv')
user_file = os.path.abspath('examples/movielens/U.csv')
model = arms.DatasetModel(arm_csvfile=arm_file, user_csvfile=user_file, noise=noise, random_state=random_state, arms_limit=25)
else:
model = arms.RandomContextualLinearArms(n_actions=K, n_features=nb_features, noise=noise,
random_state=random_state, bound_context=bound_context)
theta_bound = np.max(np.linalg.norm(model.thetas, axis=1))
target_context = np.random.randint(low=0, high=len(model.context_lists))
other_context = np.random.randint(low=0, high=len(model.context_lists))
# while other_context == target_context:
# other_context = np.random.randint(low=0, high=len(model.context_lists))
target_arm = np.random.randint(low=0, high=model.n_actions)
AAA = []
for alg_name in tqdm(all_algs.keys(), desc='Sim. model {}'.format(m)):
args = {'nb_arms': model.n_actions,
'dimension': model.n_features,
'bound_features': theta_bound,
'bound_context': model.bound_context,
'reg_factor': 0.1,
'delta': delta,
'noise_variance': noise,
}
if 'Exp4' in alg_name:
eta = np.sqrt(2 * np.log(M) / (T * model.n_actions))
experts = []
for i in range(M - 2):
experts.append(exp(nb_arms=model.n_actions, type='random'))
experts.append(exp(nb_arms=model.n_actions, type='optimal', m=model))
experts.append(exp(nb_arms=model.n_actions, type='', a_star=int(target_arm)))
args['experts'] = experts
args['eta'] = eta
alg = all_algs[alg_name](**args)
if 'attacked' in alg_name:
if 'gamma' in alg_name:
temp_eps = re.findall(r'[\d\.\d]+', alg_name)
temp_eps = np.array(list(map(float, temp_eps)))
temp_eps = temp_eps[temp_eps<=1]
temp_eps = temp_eps[0]
temp_args = args.copy()
temp_args['eps'] = temp_eps
attacker = RewardAttacker(**temp_args)
regret = np.zeros((nb_simu, T//frequency)) #[[]] * nb_simu #np.zeros((nb_simu, T))
draws = regret.copy()
epsilon_norm = np.zeros((nb_simu, T//frequency)) #[[]] * nb_simu #np.zeros((nb_simu, T))
# thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
# prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n))
rewards_range = np.zeros((nb_simu, T//frequency)) #[[]] * nb_simu # np.zeros((nb_simu, T))
for k in range(nb_simu):
alg.reset()
if 'attacked' in alg_name and not 'stationary' in alg_name:
attacker.reset()
attack_acumulator = 0
regret_accumulator = 0
rewards_range_max = 0
draws_accumulator = 0
for t in trange(T):
context = model.get_context()
a_t = alg.get_action(context)
r_t = model.reward(context, a_t)
if 'attacked' in alg_name:
if not 'stationary' in alg_name:
attacker.update(context, a_t, r_t)
attack_t = attacker.compute_attack(a_t, context, target_arm)
else:
if a_t != target_arm:
attack_t = -r_t + noise*np.random.randn()
else:
attack_t = 0
# print('attack_t =', attack_t)
else:
attack_t = 0
alg.update(context, a_t, min(1, max(0, r_t+attack_t)))
attack_acumulator+= np.abs(attack_t)
regret_accumulator+= model.best_arm_reward(context) - np.dot(model.thetas[a_t], context)
rewards_range_max = max(rewards_range_max, min(1, max(r_t + attack_t, 0)))
draws_accumulator +=1 if a_t == target_arm else 0
if t % frequency == 0: # logging
epsilon_norm[k, t // frequency]= attack_acumulator
regret[k, t // frequency]= regret_accumulator
rewards_range[k, t // frequency]= rewards_range_max
draws[k, t // frequency]= draws_accumulator
attack_acumulator = 0
regret_accumulator = 0
rewards_range_max = 0
draws_accumulator = 0
# print('reward = ', min(1, max(r_t + attack_t, 0)))
# print('Target arm =', target_arm, 'a_t =', a_t)
# alg.update(context, a_t, r_t + attack_t)
# if hasattr(alg, 'thetas_hat'):
# thetas_alg[k, t] = alg.thetas_hat
# for a in range(model.n_actions):
# for i, x in enumerate(model.context_lists):
# if 'attacked' in alg_name:
# p = np.dot(alg.thetas_hat[a], x) - (1 - attacker.eps) * np.dot(model.thetas[target_arm], x)
# else:
# p = np.dot(alg.thetas_hat[a], x) - np.dot(model.thetas[target_arm], x)
# prod_scalar[k, t, a, i] = p
# print('-'*100)
# print('r_t =', r_t)
# print('atttack_t =', attack_t)
# print('r_t + attack_t = ', r_t + attack_t)
# rewards_range[k, t] = min(1, max(r_t + attack_t, 0))
AAA += [(alg_name, {"regret": regret, "attack_cond": epsilon_norm, "target_draws": draws, "thetas": (),
"prod_scalar": (), "range_rewards": rewards_range})]
return m, AAA, model, target_arm
def run_and_output(dataset=None):
results = []
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(m=m, nb_arms=K, nb_features=n_features, noise=a_noise,
nb_simu=nb_simu, T=T, all_algs=algorithms,
random_state=random_state + m, M=M, which=dataset) for m in range(nb_models))
else:
for m in tqdm(range(nb_models)):
ret = work(m, K, n_features, a_noise, nb_simu, T, algorithms, random_state + m, M=M)
results.append(ret)
id = '{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now())
pickle_name = "{}_{}_{}_contextual_attacks_rewards.pickle".format(dataset, id, "PAR" if PARALLEL else "SEQ")
print(pickle_name)
with open(pickle_name, "wb") as f:
pickle.dump(results, f)
with open("{}_{}_{}_contextual_attacks_rewards_settings.json".format(dataset, id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
return results, pickle_name, id,
if __name__ == '__main__':
PARALLEL = False
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, prod_scalar')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
print("seed: {}".format(random_state))
K = 10
n_features = 30
a_noise = 0.1
delta = 0.01
la = 0.1
T = 1*10**6 # horizon
nb_models = 5
nb_simu = 25
M = 5
# attack_parameter_to_test = np.linspace(0, 1, 10)
attack_parameter_to_test = np.array([1/2])
settings = {
"T": T,
"nb_simu": nb_simu,
"nb_models": nb_models,
"random_state": random_state,
"K": K,
"dimension": n_features,
"epsilon_tested": list(attack_parameter_to_test),
'frequency': frequency
}
algorithms = {
'LinUCB': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta, noise_variance=a_noise:
ContextualLinearBandit(reg_factor=la,
#
# 'LinTS': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
# dimension=dimension,
# reg_factor=reg_factor,
# delta=delta,
# noise_variance=noise_variance),
# 'Exp4': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: Exp4(nb_arms=nb_arms,
# dimension=dimension,
# experts=experts,
# eta=eta,
# gamma=0),
# 'eps-greedy': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
# decrease_epsilon=True, reg_factor=reg_factor),
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinTS': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
dimension=dimension,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise_variance),
'Exp4': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: Exp4(nb_arms=nb_arms,
dimension=dimension,
experts=experts,
eta=eta,
gamma=0),
'eps-greedy': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
decrease_epsilon=True, reg_factor=reg_factor),
}
algorithms.update({
# 'LinUCB attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta, noise_variance=a_noise:
# ContextualLinearBandit(reg_factor=la,
# delta=delta,
# nb_arms=nb_arms,
# dimension=dimension,
# noise_variance=noise_variance,
# bound_features=bound_features,
# bound_context=bound_context),
#
# 'LinTS attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
# dimension=dimension,
# reg_factor=reg_factor,
# delta=delta,
# noise_variance=noise_variance),
# 'Exp4 attacked stationary': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: Exp4(nb_arms=nb_arms,
# dimension=dimension,
# experts=experts,
# eta=eta,
# gamma=0),
#
# 'eps-greedy attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
# decrease_epsilon=True, reg_factor=reg_factor),
'LinUCB attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta, noise_variance=a_noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinTS attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
dimension=dimension,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise_variance),
'Exp4 attacked stationary': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: Exp4(nb_arms=nb_arms,
dimension=dimension,
experts=experts,
eta=eta,
gamma=0),
'eps-greedy attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
decrease_epsilon=True, reg_factor=reg_factor),
})
for eps in attack_parameter_to_test:
algorithms.update({
# 'LinUCB attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context,
# reg_factor=la, delta=delta, noise_variance=a_noise:
# ContextualLinearBandit(reg_factor=la,
# delta=delta,
# nb_arms=nb_arms,
# dimension=dimension,
# noise_variance=noise_variance,
# bound_features=bound_features,
# bound_context=bound_context),
# 'LinTS attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
# dimension=dimension,
# reg_factor=reg_factor,
# delta=delta,
# noise_variance=noise_variance),
# 'Exp4 attacked gamma {}'.format(eps): lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: Exp4(nb_arms=nb_arms,
# dimension=dimension,
# experts=experts,
# eta=eta,
# gamma=0),
#
# 'eps-greedy attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
# decrease_epsilon=True, reg_factor=reg_factor)
'LinUCB attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context,
reg_factor=la, delta=delta, noise_variance=a_noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinTS attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
dimension=dimension,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise_variance/5),
'Exp4 attacked gamma {}'.format(eps): lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: Exp4(nb_arms=nb_arms,
dimension=dimension,
experts=experts,
eta=eta,
gamma=0),
'eps-greedy attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
decrease_epsilon=True, reg_factor=reg_factor)
})
print(algorithms)
# results, pickle_name, id = run_and_output(dataset=None)
# results, pickle_name, id = run_and_output(dataset='jester')
results, pickle_name, id = run_and_output(dataset='movielens')
else:
for m in tqdm(range(nb_models)):
ret = work(m, K, n_features, a_noise, nb_simu, T, algorithms, random_state + m, M=M, dataset=true_data, which=which)
results.append(ret)
# id = '{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now())
# pickle_name = "{}_{}_contextual_attacks_rewards.pickle".format(id, "PAR" if PARALLEL else "SEQ")
# print(pickle_name)
# with open(pickle_name, "wb") as f:
# pickle.dump(results, f)
# with open("{}_{}_contextual_attacks_rewards_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
# json.dump(settings, f)
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
for alg_name, res in results[0][1]:
algorithms[alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T))}
for m in range(nb_models):
res = results[m][1]
for i, val in enumerate(res):
alg_name = val[0]
val = val[1]
algorithms[alg_name]['regret'][m, :, :] = val['regret']
algorithms[alg_name]['cost'][m, :, :] = val['attack_cond']
plt.figure(1, figsize=(8, 8))
t = np.linspace(0, T-1, T, dtype='int')
for alg_name, res in algorithms.items():
res['regret'] = res['regret'].cumsum(axis=2)
mean_regret = np.mean(res['regret'], axis=(0, 1))
low_quantile = np.quantile(res['regret'], 0.1, axis=(0, 1))
high_quantile = np.quantile(res['regret'], 1 - 0.1, axis=(0, 1))
plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.title('Cumulative regret')
plt.legend()
plt.show()
# n = 9 # Number of colors
# new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
# linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
# plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
# plt.rc('lines', linewidth=2)
# for alg_name, res in results[0][1]:
# algorithms[alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
# 'cost': np.zeros((nb_models, nb_simu, T))}
# for m in range(nb_models):
# res = results[m][1]
# for i, val in enumerate(res):
# alg_name = val[0]
# val = val[1]
# algorithms[alg_name]['regret'][m, :, :] = val['regret']
# algorithms[alg_name]['cost'][m, :, :] = val['attack_cond']
# plt.figure(1, figsize=(8, 8))
# t = np.linspace(0, T-1, T, dtype='int')
# for alg_name, res in algorithms.items():
# res['regret'] = res['regret'].cumsum(axis=2)
# mean_regret = np.mean(res['regret'], axis=(0, 1))
# low_quantile = np.quantile(res['regret'], 0.1, axis=(0, 1))
# high_quantile = np.quantile(res['regret'], 1 - 0.1, axis=(0, 1))
# plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.title('Cumulative regret')
#
# plt.legend()
#
# plt.figure(2, figsize=(8,8))
# t = np.linspace(0, T-1, T, dtype='int')
# for alg_name, res in algorithms.items():
# res['cost'] = res['cost'].cumsum(axis=2)
# mean_regret = np.mean(res['cost'], axis=(0, 1))
# low_quantile = np.quantile(res['cost'], 0.1, axis=(0, 1))
# high_quantile = np.quantile(res['cost'], 1 - 0.1, axis=(0, 1))
# plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.title('Total cost')
#
# plt.legend()
#
# plt.show()
# for res in results:
# alg_name, val = res[1][0], res[1][1]
# print(alg_name)
# mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
# t = np.linspace(0, T, T, dtype='int')
# low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
# high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
#
# plt.figure(0)
# plt.title('Cumulative Regret')
# plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
# low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
# high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
# plt.figure(1)
# plt.title('Draws target arm')
# plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(2)
# plt.title('Cumulative attack norm attacked reward')
# if 'Attacked' in alg_name:
# plt.plot(np.mean(np.cumsum(val.attack_cond, axis=1), axis=0), label=alg_name)
# low_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
#
# plt.figure(4)
# plt.title('Error true env and learned env')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# if 'weak' in alg_name:
# plt.figure(6)
# plt.title('Difference estimated reward random context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
# if not 'weak' in alg_name:
# plt.figure(7)
# plt.title('Error biased env and learned env')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[target_arm]*(1 - attack_parameter), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(8)
# plt.title('Number of pulls target arm attack context')
# plt.plot(t, np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
| ContextualBanditsAttacks-main | isoexp/devfair_reward_attack.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.contextual.contextual_models import RandomContextualLinearArms
from isoexp.contextual.contextual_linucb import *
from tqdm import trange
from collections import namedtuple
from cycler import cycler
import matplotlib.pyplot as plt
import cvxpy as cp
def compute_attack(model, action, context, a_star, x_star, slack=10**-3):
if action != a_star:# and np.linalg.norm(context - x_star) < 10**-5:
delta = np.maximum(2*np.dot(model.thetas[action], x_star)/np.dot(model.thetas[a_star], x_star), 1)
epsilon = (delta - 1)*context
return epsilon
else:
return np.zeros((model.n_features,))
n = 6 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, prod_scalar, context_norm')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
model = RandomContextualLinearArms(n_actions=3, n_features=10, noise=noise, random_state=seed, bound_context=1)
model.thetas = model.thetas
theta_bound = np.max(np.linalg.norm(model.thetas, 2, axis=(1)))
target_context = np.random.randint(low=0, high=len(model.context_lists))
other_context = np.random.randint(low=0, high=len(model.context_lists))
while other_context == target_context:
other_context = np.random.randint(low=0, high=len(model.context_lists))
x_star = model.context_lists[target_context]
means_x_star = np.dot(model.thetas, x_star)
#target_arm = np.random.randint(low=0, high=model.n_actions)
target_arm = np.argmin(means_x_star)
T = 1*10**4
nb_simu = 5
print('a star=', target_arm)
print('x_star', x_star)
print('means for context x_star:', np.dot(model.thetas, x_star))
algorithms = {
'LinUCB': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.99,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context),
'LinUCB-Attacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.99,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context)
}
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
nb_target_arms = np.zeros((nb_simu, T))
nb_attack_needed = np.zeros((nb_simu, T))
attack_condition = np.zeros((nb_simu, T))
draws = np.zeros((nb_simu, T))
context_norm = draws.copy()
epsilon_norm = np.zeros((nb_simu, T))
thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in range(T):
context = model.get_context()
a_t = alg.get_action(context)
old_context = context
if alg_name == 'LinUCB-Attacked':
epsilon = compute_attack(model, a_t, context, target_arm, x_star)
context = context + epsilon
alg.iteration -= 1
a_t = alg.get_action(context)
epsilon_norm[k, t] = np.linalg.norm(epsilon)
thetas_alg[k, t] = alg.thetas_hat
for a in range(model.n_actions):
for i, x in enumerate(model.context_lists):
p = np.dot(model.thetas[a], x) - np.dot(alg.thetas_hat[a], x)
prod_scalar[k, t, a, i] = p
r_t = model.reward(old_context, a_t)
alg.update(context, a_t, r_t)
regret[k, t] = model.best_arm_reward(old_context) - np.dot(model.thetas[a_t], old_context)
context_norm[k, t] = np.linalg.norm(context)
draws[k, t] = a_t
results += [(alg_name, MABResults(regret=regret, attack_cond=attack_condition, target_draws=draws,
thetas=thetas_alg, prod_scalar=prod_scalar, context_norm=context_norm))]
print('Target arm =', target_arm)
print('draws = ', np.mean(np.cumsum(draws == target_arm, axis=1),axis=0))
for i, (alg_name, val) in enumerate(results):
mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
t = np.linspace(0, T, T, dtype='int')
low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
plt.figure(0)
plt.title('Regret Attacked context')
plt.plot(mean_regret, label=alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
# mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
# low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
# high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
# plt.figure(1)
# plt.title('Draws target arm')
# plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
if 'Attacked' in alg_name:
plt.figure(2)
plt.title('Cumulative attack norm attacked context')
plt.plot(np.mean(np.cumsum(epsilon_norm, axis=1), axis=0), label=alg_name)
low_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.1, axis=0)
high_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(4)
plt.title('Error true env and learned env attack context')
for a in range(model.n_actions):
error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(error, 0.1, axis=0)
high_quantile = np.quantile(error, 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(7)
plt.title('Error biased env and learned env attack context')
for a in range(model.n_actions):
error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a]/np.maximum(2*np.dot(model.thetas[a], x_star)/np.dot(model.thetas[target_arm], x_star), 1), axis=2)
plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(error, 0.1, axis=0)
high_quantile = np.quantile(error, 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(5)
plt.title('Difference estimated reward for target context {}'.format(target_context))
for a in range(model.n_actions):
plt.plot(t, np.mean(val.prod_scalar[:, :, a, target_context], axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.1, axis=0)
high_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(6)
plt.title('Difference estimated reward for a random non target context {}'.format(other_context))
for a in range(model.n_actions):
plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(8)
plt.title('Number of pulls target arm attack context')
plt.plot(t, np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.1, axis=0)
high_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/main_attacked_context.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = '0.0.dev0'
| ContextualBanditsAttacks-main | isoexp/__init__.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def in_hull(points, x):
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
def compute_relaxed_attack(alg, a_star, x_star, slack=10**-10):
d = model.n_features
delta = cp.Variable(d)
obj = cp.Minimize(cp.quad_form(delta, np.eye(d))/2)
theta_a_star = alg.thetas_hat[a_star]
betas = alg.alpha()
constraints = []
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = (theta - theta_a_star)@(x_star+delta) + betas[a]*cp.norm(P@(x_star+delta))\
#- betas[a_star]R * (cp.norm(P_a_star @ x_star) + (alg.inv_design_matrices[a] @ x_star) @
# delta/cp.norm(P_a_star @ x_star))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
prob.solve(verbose=False, max_iters=1000, feastol=10**-8)
epsilon = delta.value
# print('epsilon =', epsilon)
# for a in range(len(alg.thetas_hat)):
# if a != a_star:
# theta_a_star = alg.thetas_hat[a_star]
# betas = alg.alpha()
# theta = alg.thetas_hat[a]
# P = sqrtm(alg.inv_design_matrices[a])
# P_a_star = sqrtm(alg.inv_design_matrices[a_star])
# print('The constraint for arm {}:'.format(a), np.dot(theta - theta_a_star, (x_star+epsilon)) + betas[a]*np.linalg.norm(P.dot((x_star+epsilon))) \
# - betas[a_star] * (np.linalg.norm(P_a_star.dot(x_star)) +
# np.dot((alg.inv_design_matrices[a].dot(x_star)), epsilon)/np.linalg.norm(P_a_star.dot(x_star))))
except:
print('Exception')
epsilon = np.zeros((d,))
# print('epsilon =', epsilon)
if epsilon is None:
return np.zeros((d,))
return epsilon
def compute_attack(alg, a_star, x_star, slack=10 **-10):
d = model.n_features
func = lambda delta: np.linalg.norm(delta)/2
theta_a_star = alg.thetas_hat[a_star]
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
betas = alg.alpha()
constraints_list = []
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta)))
- beta_a_star * np.linalg.norm(P_a_star.dot((x_star + delta))) + slack)
temp_cons = {'type': 'ineq', 'fun': temp_constraint}
constraints_list.append(temp_cons)
cons = tuple(constraints_list)
res = minimize(func, -x_star, method='SLSQP', constraints=cons)
# print(res.message)
try:
epsilon = res.x
except:
epsilon = np.zeros((d,))
if epsilon is None:
return np.zeros((d,))
return epsilon
n = 10 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
# linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
# plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, design_matrix, context_norm, betas')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
#model = RandomContextualLinearArms(n_actions=4, n_features=2, noise=noise, random_state=seed, bound_context=1)
model = AttackOneUserModel(n_actions=25, n_features=20, noise=noise, random_state=seed, bound_context=1, distance=1/2)
model.add_target_arm()
model.thetas = model.thetas
theta_bound = np.max(np.linalg.norm(model.thetas, 2, axis=(1)))
target_context = np.random.randint(low=0, high=len(model.context_lists))
# other_context = np.random.randint(low=0, high=len(model.context_lists))
# while other_context == target_context:
# other_context = np.random.randint(low=0, high=len(model.context_lists))
x_star = model.context_lists[target_context]
means_x_star = np.dot(model.thetas, x_star)
#target_arm = np.random.randint(low=0, high=model.n_actions)
# target_arm = np.argmin(means_x_star)
target_arm = model.n_actions-1
T = int(1*10**5)
nb_simu = 1
mask = np.ones(model.n_actions, dtype='int')
mask[target_arm] = 0
print('a star=', target_arm)
print('x_star', x_star)
print('means for context x_star:', np.dot(model.thetas, x_star))
print(in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])))
if in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])):
raise ValueError()
algorithms = {
'LinUCB': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.01,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context),
# 'LinUCB-Attacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
# reg_factor=0.1, delta=0.99,
# bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
# noise_variance=noise, bound_context=model.bound_context),
'LinUCB-RelaxedAttacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.01,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context)
}
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
nb_target_arms = np.zeros((nb_simu, T))
nb_attack_needed = np.zeros((nb_simu, T))
attack_condition = np.zeros((nb_simu, T))
draws = [[]]*nb_simu
context_norm = np.zeros((nb_simu, T))
epsilon_norm = np.zeros((nb_simu, T))
thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n_user))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in trange(T):
context = model.get_context()
old_context = context
if 'Attacked' in alg_name:
if np.linalg.norm(context - x_star) < 10**-10:
if 'Relaxed' in alg_name:
epsilon = compute_relaxed_attack(alg, target_arm, context, slack=10**-4)
else:
epsilon = compute_attack(alg, target_arm, context, slack=10 ** -3)
else:
epsilon = np.zeros((model.n_features,))
context = context + epsilon
epsilon_norm[k, t] = np.linalg.norm(epsilon)
a_t = alg.get_action(context)
if np.linalg.norm(x_star - old_context) < 10**-10:
draws[k].append(a_t)
thetas_alg[k, t] = alg.thetas_hat
# for a in range(model.n_actions):
# for i, x in enumerate(model.context_lists):
# p = np.dot(model.thetas[a], x) - np.dot(alg.thetas_hat[a], x)
# prod_scalar[k, t, a, i] = p
r_t = model.reward(old_context, a_t)
alg.update(context, a_t, r_t)
regret[k, t] = model.best_arm_reward(old_context) - np.dot(model.thetas[a_t], old_context)
context_norm[k, t] = np.linalg.norm(context)
draws[k] = np.array(draws[k])
draws = np.array(draws)
print(draws.shape)
print('Norm attacks=', epsilon_norm)
results += [(alg_name, MABResults(regret=regret, attack_cond=attack_condition, target_draws=draws,
thetas=thetas_alg, design_matrix=alg.inv_design_matrices, context_norm=context_norm, betas=alg.alpha()))]
print('Target arm =', target_arm)
print('draws = ', np.mean(np.cumsum(draws == target_arm, axis=1),axis=0))
for i, (alg_name, val) in enumerate(results):
mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
t = np.linspace(0, T, T, dtype='int')
low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
plt.figure(0)
plt.title('Regret Attacked context')
plt.plot(mean_regret, label=alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
plt.figure(1)
plt.title('Draws target arm')
plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
### TODO: Change this plot to have the number of time the context has been presented in x axis
if 'Attacked' in alg_name:
plt.figure(2)
plt.title('Cumulative attack norm attacked context')
plt.plot(np.mean(np.cumsum(epsilon_norm, axis=1), axis=0), label=alg_name)
low_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.1, axis=0)
high_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
if model.n_features == 2:
for i, (alg_name, val) in enumerate(results):
plt.figure(i + 3)
plt.title('Confidence ellipse for {}'.format(alg_name))
x = np.linspace(0, 2*np.pi)
x_1 = np.cos(x)
y_1 = np.sin(x)
X = np.vstack((x_1, y_1))
betas = val.betas
for a in range(model.n_actions):
center = val.thetas[-1, -1, a]
V = sqrtm(val.design_matrix[a])
y = center.reshape((2, 1)) + betas[a] * np.dot(V, X)
plt.plot(y[0, :], y[1, :], label = 'confidence ellipse arm {}'.format(a))
plt.fill_between(y[0,:], y[1,:], (center.reshape((2, 1))*np.ones((2, 50)))[1, :], alpha=0.15)
plt.scatter(model.thetas[a][0],model.thetas[a][1], c=new_colors[a])
plt.scatter(center[0], center[1], marker='^', c=new_colors[a])
plt.scatter(x_star[0], x_star[1], marker='+', c = new_colors[-1])
plt.legend()
plt.show()
#
# plt.figure(4)
# plt.title('Error true env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(7)
# plt.title('Error biased env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a]/np.maximum(2*np.dot(model.thetas[a], x_star)/np.dot(model.thetas[target_arm], x_star), 1), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(5)
# plt.title('Difference estimated reward for target context {}'.format(target_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, target_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(6)
# plt.title('Difference estimated reward for a random non target context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/main_attack_one_user.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.append('/isoexp')
import numpy as np
import isoexp.mab.arms as arms
import pickle
from isoexp.mab.smab_algs import UCB1, EXP3_IX, attacked_UCB1, attacked_EXP3_IX, EXP3_P, attacked_EXP3_P, FTRL, attacked_FTRL
from matplotlib import rc
import json
import datetime
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
from cycler import cycler
n = 6 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, cum_rewards, attacks, times_of_attacks')
random_state = np.random.randint(0, 123123)
K = 5
MAB = []
means = np.random.uniform(low=0.25, high=0.75, size=K)
#means = np.array([0.47823152, 0.70243227, 0.64504063, 0.65679234, 0.49546542,
# 0.46417188, 0.64736977, 0.71255566, 0.66844984, 0.26030838])
for k in range(K) :
MAB.append(arms.ArmBeta(a=8*means[k], b=8*(1-means[k])))
#MAB.append(arms.ArmBernoulli(p=means[k]))
nb_arms = len(MAB)
print('means: {}'.format(means))
mu_max = np.max(means)
a_star = np.argmin(means)
T = 1*10**4 # horizon
nb_simu = 2
eta = np.sqrt(1/(K*T))
# eta = 0.01
gamma = eta/2
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
}
algorithms = {
'INF': lambda T, MAB: FTRL(T, MAB, eta=eta, alg='inf'),
'Attacked INF': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta, alg='inf'),
'FTRL log barrier' : lambda T, MAB: FTRL(T, MAB, eta=eta, alg='log_barrier'),
'Attacked FTRL log barrier': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta,
alg='log_barrier'),
'UCB': lambda T, MAB: UCB1(T, MAB, alpha=1),
'Attacked UCB': lambda T, MAB: attacked_UCB1(T, MAB, target_arm = a_star, alpha=1., delta=0.99),
# 'EXP3-IX': lambda T, MAB: EXP3_IX(T, MAB, eta=eta, gamma=gamma),
# 'Attacked EXP3-IX': lambda T, MAB: attacked_EXP3_IX(T, MAB, target_arm=a_star),
'EXP3': lambda T, MAB: EXP3_P(T, MAB, eta=np.sqrt(np.log(K)/(T*K))),
'Attacked EXP3': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star, eta=np.sqrt(np.log(K)/(T*K))),
# 'EXP3.P Gamma 0.1': lambda T, MAB: EXP3_P(T, MAB, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T))),
# 'Attacked EXP3.P Gamma 0.1': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T)))
}
results = []
full_algo = algorithms
for alg_name in full_algo.keys():
alg = full_algo[alg_name]
regret = np.zeros((nb_simu, T))
rwds = 0*regret
times = 0*regret
attacks = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
try:
rewards, draws = alg(T, MAB)
except ValueError:
rewards, draws, att, times_of_att = alg(T, MAB)
attacks[k] = np.cumsum(att)
times[k] = times_of_att
rwds[k] = np.cumsum(means[draws.astype('int')])
regret[k] = max(means) * np.arange(1, T + 1) - rwds[k]
results += [(alg_name, MABResults(regret=regret, cum_rewards=rwds, attacks=attacks, times_of_attacks=times))]
# id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
# with open("{}_{}_MAB_illustration.pickle".format(id, "SEQ"), "wb") as f:
# pickle.dump(results, f)
# with open("{}_{}_MAB_illustration_settings.json".format(id, "SEQ"), "w+") as f:
# json.dump(settings, f)
t = np.arange(0, T)
for alg_name, val in results:
mean_regret = np.mean(val.regret, axis=0)
low_quantile_regret = np.quantile(val.regret, 0.25, axis=0)
high_quantile_regret = np.quantile(val.regret, 0.75, axis=0)
rwds = np.mean(val.cum_rewards, axis=0)
low_quantile_rwds = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_rwds = np.quantile(val.cum_rewards, 0.75, axis=0)
# plt.figure(1)
# plt.title('Rewards')
# plt.plot(rwds, label=alg_name)
# plt.legend()
# plt.fill_between(t, low_quantile_rwds, high_quantile_rwds, alpha=0.15)
plt.figure(2)
plt.title('Regret Adv alg')
plt.plot(mean_regret, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_regret, high_quantile_regret, alpha=0.15)
if 'Attacked' in alg_name:
plt.figure(3)
cum_sum_attacks = np.mean(np.abs(val.attacks), axis=0)
low_quantile_attacks = np.quantile(np.abs(val.attacks), 0.25, axis=0)
high_quantile_attacks = np.quantile(np.abs(val.attacks), 0.75, axis=0)
plt.title('Cumulative sum of attacks Adv alg')
plt.plot(cum_sum_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_attacks, high_quantile_attacks, alpha=0.15)
# plt.figure(2)
# rep = np.random.randint(low=0, high=nb_simu)
# times_to_consider = val.times_of_attacks[rep]
# plt.scatter(t[times_to_consider == 1], val.regret[rep, times_to_consider == 1])
plt.figure(4)
plt.title('Number of attacks Adv alg')
number_of_attacks = np.mean(np.cumsum(val.times_of_attacks, axis=1), axis=0)/t
high_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.75, axis=0)/t
low_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.25, axis=0)/t
plt.plot(number_of_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.show()
#import tikzplotlib
#tikzplotlib.save("lcb_worst.tex")
| ContextualBanditsAttacks-main | isoexp/main_mab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
import json
import re
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
def get_eps(name):
temp_eps = re.findall(r'[\d\.\d]+', name)
temp_eps = np.array(list(map(float, temp_eps)))
temp_eps = temp_eps[temp_eps <= 1]
temp_eps = temp_eps[0]
return temp_eps
def get_name(name):
first, rest = name.split(' ', 1)
return first
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20200121_153844_PAR_contextual_attacks_rewards.pickle'
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
setting_name = filename[:-7] + '_settings.json'
print('Opening settings %s...' % setting_name)
with open(setting_name, 'r') as f:
settings = json.load(f)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
with open(os.path.join(folder, setting_name), 'w') as f:
json.dump(settings, f)
EVERY = 500
LW = 2
LATEX = True
nb_models = settings["nb_models"]
nb_simu = settings["nb_simu"]
real_T = settings["T"]
frequency = settings['frequency']
T = real_T // frequency
attack_parameter = settings["epsilon_tested"]
eps_plot_regret = attack_parameter[np.random.randint(low=0, high=len(attack_parameter))]
print("Generating regret and cost figures ...")
# select "bad" model
algorithms = {}
attacked_algorithms = {}
stationary_alg = {}
for alg_name, res in results[0][1]:
if not 'attacked' in alg_name:
algorithms[alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T)),
'target_draws': np.zeros((nb_models, nb_simu, T)),
'rewards_range': np.zeros((nb_models, nb_simu, T))}
elif 'gamma' in alg_name:
eps = get_eps(alg_name)
shortened_alg_name = get_name(alg_name)
attacked_algorithms[(shortened_alg_name, eps)] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T)),
'target_draws': np.zeros((nb_models, nb_simu, T)),
'rewards_range': np.zeros((nb_models, nb_simu, T))}
else:
pass
shortened_alg_name = get_name(alg_name)
stationary_alg[shortened_alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T)),
'target_draws': np.zeros((nb_models, nb_simu, T)),
'rewards_range': np.zeros((nb_models, nb_simu, T))}
for m in range(nb_models):
res = results[m][1]
for i, val in enumerate(res):
alg_name = val[0]
val = val[1]
print(val['regret'])
if not 'attacked' in alg_name:
algorithms[alg_name]['regret'][m, :, :] = val['regret']
algorithms[alg_name]['cost'][m, :, :] = val['attack_cond']
algorithms[alg_name]['target_draws'][m, :, :] = val['target_draws']
algorithms[alg_name]['rewards_range'][m, :, :] = val['range_rewards']
elif 'gamma' in alg_name:
eps = get_eps(alg_name)
shortened_alg_name = get_name(alg_name)
attacked_algorithms[(shortened_alg_name, eps)]['regret'][m, :, :] = val['regret']
attacked_algorithms[(shortened_alg_name, eps)]['cost'][m, :, :] = val['attack_cond']
attacked_algorithms[(shortened_alg_name, eps)]['target_draws'][m, :, :] = val['target_draws']
attacked_algorithms[(shortened_alg_name, eps)]['rewards_range'][m, :, :] = val['range_rewards']
else:
shortened_alg_name = get_name(alg_name)
stationary_alg[shortened_alg_name]['regret'][m, :, :] = val['regret']
stationary_alg[shortened_alg_name]['cost'][m, :, :] = val['attack_cond']
stationary_alg[shortened_alg_name]['target_draws'][m, :, :] = val['target_draws']
stationary_alg[shortened_alg_name]['rewards_range'][m, :, :] = val['range_rewards']
plt.figure(1)
t = np.linspace(0, T - 1, T, dtype='int') * frequency
rep = nb_models * nb_simu
for alg_name, res in algorithms.items():
#Plot the regret ofr the normal alg
res['regret'] = res['regret'].cumsum(axis=2)
mean_regret = np.mean(res['regret'], axis=(0, 1))
std = np.std(res['regret'], axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
<<<<<<< HEAD
=======
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
#Plot the regret for the attacked algorithms
regret = attacked_algorithms[(alg_name, eps_plot_regret)]['regret'].cumsum(axis=2)
mean_regret = np.mean(regret, axis=(0, 1))
std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
regret = stationary_alg[alg_name]['regret'].cumsum(axis=2)
mean_regret = np.mean(regret, axis=(0, 1))
std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked statinary')
>>>>>>> 443bf801ec40dea0146947420af027f021b809a6
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
#Plot the regret for the attacked algorithms
# regret = attacked_algorithms[(alg_name, eps_plot_regret)]['regret'].cumsum(axis=2)
# mean_regret = np.mean(regret, axis=(0, 1))
# std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
# plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
#
# regret = stationary_alg[alg_name]['regret'].cumsum(axis=2)
# mean_regret = np.mean(regret, axis=(0, 1))
# std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked statinary')
# plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
plt.title('Cumulative regret')
plt.legend()
plt.savefig(os.path.join(folder, "avg_regret.png"))
if LATEX:
tikzplotlib.save(os.path.join(folder, "avg_regret.tex"))
plt.figure(2)
for alg_name, res in algorithms.items():
# #Plot the regret ofr the normal alg
# res['cost'] = res['cost'].cumsum(axis=2)
# mean_cost = np.mean(res['cost'], axis=(0, 1))
# std = np.std(res['cost'], axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_cost[::EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[::EVERY], mean_cost[::EVERY] - 2 * std[::EVERY], mean_cost[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
#Plot the regret for the attacked algorithms
cost = attacked_algorithms[(alg_name, eps_plot_regret)]['cost'].cumsum(axis=2)
mean_cost = np.mean(cost, axis=(0, 1))
std = np.std(cost, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_cost[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_cost[::EVERY] - 2 * std[::EVERY], mean_cost[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
cost = stationary_alg[alg_name]['cost'].cumsum(axis=2)
mean_cost = np.mean(cost, axis=(0, 1))
std = np.std(cost, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_cost[::EVERY], linewidth=LW, label=alg_name + ' attacked stationary')
plt.fill_between(t[::EVERY], mean_cost[::EVERY] - 2 * std[::EVERY], mean_cost[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
plt.title('Total attack cost')
plt.legend()
plt.savefig(os.path.join(folder, "avg_cost.png"))
if LATEX:
tikzplotlib.save(os.path.join(folder, "avg_cost.tex"))
plt.figure(3)
for alg_name, res in algorithms.items():
# #Plot the regret ofr the normal alg
# res['target_draws'] = res['target_draws'].cumsum(axis=2)
# mean_draws = np.mean(res['target_draws'], axis=(0, 1))
# std = np.std(res['target_draws'], axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_draws[::EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[::EVERY], mean_draws[::EVERY] - 2 * std[::EVERY], mean_draws[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
draws = attacked_algorithms[(alg_name, eps_plot_regret)]['target_draws'].cumsum(axis=2)
mean_draws = np.mean(draws, axis=(0, 1))
std = np.std(draws, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_draws[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_draws[::EVERY] - 2 * std[::EVERY], mean_draws[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
draws = stationary_alg[alg_name]['target_draws'].cumsum(axis=2)
mean_draws = np.mean(draws, axis=(0, 1))
std = np.std(draws, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_draws[::EVERY], linewidth=LW, label=alg_name + ' attacked stationary'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_draws[::EVERY] - 2 * std[::EVERY], mean_draws[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
plt.title('Total target arm draws')
plt.legend()
plt.savefig(os.path.join(folder, "avg_draws.png"))
if LATEX:
tikzplotlib.save(os.path.join(folder, "avg_draws.tex"))
print("Generating impact of epsilon figure")
regrets_mean = {}
costs_mean = {}
draws_mean = {}
regrets_std = {}
costs_std = {}
draws_std = {}
for alg_name in algorithms.keys():
list_r_mean = []
list_c_mean = []
list_d_mean = []
list_r_std = []
list_c_std = []
list_d_std = []
for eps in attack_parameter:
r = attacked_algorithms[(alg_name, eps)]['regret'].cumsum(axis=2)[:, :, -1]
std_r = np.std(r)/np.sqrt(rep)
mean_r = np.mean(r)
c = attacked_algorithms[(alg_name, eps)]['cost'].cumsum(axis=2)[:, :, -1]
std_c = np.std(c)/np.sqrt(rep)
mean_c = np.mean(c)
d = attacked_algorithms[(alg_name, eps)]['target_draws'].cumsum(axis=2)[:, :, -1]
std_d = np.std(d)/np.sqrt(rep)
mean_d = np.mean(d)
list_r_mean.append(mean_r)
list_c_mean.append(mean_c)
list_d_mean.append(mean_d)
list_r_std.append(std_r)
list_c_std.append(std_c)
list_d_std.append(std_d)
regrets_mean[alg_name] = np.array(list_r_mean)
costs_mean[alg_name] = np.array(list_c_mean)
draws_mean[alg_name] = np.array(list_d_mean)
regrets_std[alg_name] = np.array(list_r_std)
costs_std[alg_name] = np.array(list_c_std)
draws_std[alg_name] = np.array(list_d_std)
plt.figure(4)
plt.title('Cost as a function of attack parameter at T={}'.format(T))
for alg_name in algorithms.keys():
c = costs_mean[alg_name]
std = costs_std[alg_name]
plt.plot(attack_parameter, c, linewidth=LW, label=alg_name)
plt.fill_between(attack_parameter, c - 2 * std, c + 2 * std, alpha=0.15)
plt.legend()
plt.savefig(os.path.join(folder, "cost_epsilon.png"))
plt.figure(5)
plt.title('Regret as a function of attack parameter at T={}'.format(T))
for alg_name in algorithms.keys():
r = regrets_mean[alg_name]
std = regrets_std[alg_name]
plt.plot(attack_parameter, r, linewidth=LW, label=alg_name)
plt.fill_between(attack_parameter, r - 2 * std, r + 2 * std, alpha=0.15)
plt.legend()
plt.savefig(os.path.join(folder, "regret_epsilon.png"))
plt.figure(6)
plt.title('Target draws as a function of attack parameter at T={}'.format(T))
for alg_name in algorithms.keys():
d = draws_mean[alg_name]
std = draws_std[alg_name]
plt.plot(attack_parameter, d, linewidth=LW, label=alg_name)
plt.fill_between(attack_parameter, d - 2 * std, d + 2 * std, alpha=0.15)
plt.legend()
plt.savefig(os.path.join(folder, "draws_epsilon.png"))
for eps in attack_parameter:
rewards = np.array([])
for alg_name in algorithms.keys():
rewards = np.concatenate((rewards, attacked_algorithms[(alg_name, eps)]['rewards_range']), axis=None)
print('-'*100)
print('The maximum reward for epsilon = {:.2f} is:'.format(eps), np.max(rewards))
print('The minimum reward for epsilon = {:.2f} is:'.format(eps), np.min(rewards))
print('The mean reward for epsilon = {:.2f} is:'.format(eps), np.mean(rewards))
print('The median reward for epsilon = {:.2f} is:'.format(eps), np.median(rewards))
print('The 25% quantile reward for epsilon = {:.2f} is:'.format(eps), np.quantile(rewards, 0.25))
print('The 75% quantile reward for epsilon = {:.2f} is:'.format(eps), np.quantile(rewards, 0.75))
print('The perctange reward over 1 for epsilon = {:.2f} is:'.format(eps), np.sum(rewards > 1)/len(rewards))
print('The perctange reward below 0 for epsilon = {:.2f} is:'.format(eps), np.sum(rewards < 0) / len(rewards))
| ContextualBanditsAttacks-main | isoexp/parse_reward_attack.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.contextual.contextual_models import RandomContextualLinearArms
from isoexp.contextual.contextual_linucb import *
from tqdm import trange
from collections import namedtuple
from cycler import cycler
import matplotlib.pyplot as plt
import cvxpy as cp
class exp(object):
def __init__(self, nb_arms, type='random', a_star = 0):
self.K = nb_arms
self.type= type
self.a_star = a_star
def get_action(self, context):
if self.type == 'random':
return np.ones((self.K,))/self.K
elif self.type == 'optimal':
means = np.dot(model.thetas, context)
a = np.argmax(means)
proba = np.zeros((self.K,))
proba[a] = 1
return proba
else:
proba = np.zeros((self.K,))
proba[self.a_star] = 1
return proba
n = 6 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, prod_scalar')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
attack_parameter = 1/2
model = RandomContextualLinearArms(n_actions=5, n_features=10, noise=noise, random_state=seed, bound_context=1)
theta_bound = np.max(np.linalg.norm(model.thetas, 2, axis=(1)))
# target_context = np.random.randint(low=0, high=len(model.context_lists))
# other_context = np.random.randint(low=0, high=len(model.context_lists))
# while other_context == target_context:
# other_context = np.random.randint(low=0, high=len(model.context_lists))
# target_arm = np.random.randint(low=0, high=model.n_actions)
target_arm = np.argmax(np.dot(model.thetas, model.context_lists[-1]))
T = 5000
nb_simu = 30
M = 10
print('a_star=', target_arm)
eta = np.sqrt(2*np.log(M)/(T*model.n_actions))
experts = []
for i in range(M-2):
experts.append(exp(nb_arms=model.n_actions, type='random'))
experts.append(exp(nb_arms=model.n_actions, type='optimal'))
experts.append(exp(nb_arms=model.n_actions, type='', a_star=int(target_arm)))
algorithms = {
'Exp4': Exp4(nb_arms=model.n_actions, dimension=model.n_features, experts=experts, eta=eta, gamma=10**-3)
}
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
draws = np.zeros((nb_simu, T))
epsilon_norm = np.zeros((nb_simu, T))
thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in range(T):
context = model.get_context()
a_t = alg.get_action(context)
r_t = model.reward(context, a_t)
attack_t = 0
epsilon_norm[k, t] = np.abs(attack_t)
alg.update(context, a_t, r_t + attack_t)
# try:
# thetas_alg[k, t] = alg.thetas_hat
# except:
# pass
# for a in range(model.n_actions):
# for i, x in enumerate(model.context_lists):
# p = np.dot(alg.thetas_hat[a], x) - (1 - attack_parameter)*np.dot(model.thetas[target_arm], x)
# prod_scalar[k, t, a, i] = p
regret[k, t] = model.best_arm_reward(context) - np.dot(model.thetas[a_t], context)
draws[k, t] = a_t
results += [(alg_name, MABResults(regret=regret, attack_cond=epsilon_norm, target_draws=draws,
thetas=thetas_alg, prod_scalar=prod_scalar))]
print('Target arm =', target_arm)
print('draws = ', np.mean(np.cumsum(draws == target_arm, axis=1),axis=0))
for i,(alg_name, val) in enumerate(results):
mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
t = np.linspace(0, T, T, dtype='int')
low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
plt.figure(0)
plt.title('Cumulative Regret')
plt.plot(mean_regret, label=alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
# mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
# low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
# high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
# plt.figure(1)
# plt.title('Draws target arm')
# plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(2)
# plt.title('Cumulative attack norm attacked reward')
# if 'Attacked' in alg_name:
# plt.plot(np.mean(np.cumsum(val.attack_cond, axis=1), axis=0), label=alg_name)
# low_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
#
plt.figure(4)
plt.title('Error true env and learned env')
for a in range(model.n_actions):
error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(error, 0.1, axis=0)
high_quantile = np.quantile(error, 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
# if 'weak' in alg_name:
# plt.figure(6)
# plt.title('Difference estimated reward random context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
# if not 'weak' in alg_name:
# plt.figure(7)
# plt.title('Error biased env and learned env')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[target_arm]*(1 - attack_parameter), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(8)
# plt.title('Number of pulls target arm attack context')
# plt.plot(t, np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/main_attack_reward.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = '0.0.dev0' | ContextualBanditsAttacks-main | isoexp/contextual/__init__.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import cvxpy as cp
from scipy.optimize import minimize
class RandomArm(object):
def __init__(self, initial_arms):
self.arms = initial_arms
def get_action(self):
return np.random.choice(self.arms)
def update(self, a_t, r_t):
pass
def reset(self):
pass
class ContextualLinearBandit(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.99,
bound_features=None, noise_variance=None, bound_context=None, alpha=None):
self.K = nb_arms
self.dim = dimension
self.reg_factor = reg_factor
self.delta = delta
self.exploration_coeff = alpha
self.iteration = None
self.bound_context = bound_context
self.bound_features = bound_features
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
# self.range = 1
# self.est_bound_theta = 0
# self.est_bound_features = 0
self.n_samples = np.zeros((self.K,))
self.iteration = 0
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
# def auto_alpha(self):
# d = self.n_features
# sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
# return sigma * np.sqrt(d * np.log((1 + max(1, self.iteration - 1) * D * D / self.reg_factor) / self.delta)) \
# + np.sqrt(self.reg_factor) * B
def alpha(self):
d = self.dim
# print(d)
sigma, B, D = self.noise_variance, self.bound_context, self.bound_features
if self.exploration_coeff is None:
return sigma * np.sqrt(d * np.log((1 + np.maximum(1, self.n_samples) * B * B / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * D
else:
return self.exploration_coeff
def get_action(self, context):
self.iteration += 1
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.K) * 0.000001
estimate = np.zeros((self.K,))
sfactor = self.alpha()
for arm in range(self.K):
Ainv = self.inv_design_matrices[arm]
# print(Ainv)
b = self.bs[arm]
theta_hat = np.dot(Ainv, b)
self.thetas_hat[arm] = theta_hat
ta = np.dot(context, np.dot(Ainv, context))
sfactor = self.alpha()
# print('sfactor =', sfactor)
# print('context = ', context)
# print('theta_hat=', theta_hat)
# print('ta = ', ta)
estimate[arm] = np.dot(context, theta_hat) + sfactor[arm] * np.sqrt(ta)
ucb = estimate + noise
choice = np.argmax(ucb) # choose the highest
return choice
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
self.thetas_hat[a_t] = np.dot(self.inv_design_matrices[a_t], self.bs[a_t])
class ContextualLinearTS(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.99, noise_variance=None):
self.K = nb_arms
self.dim = dimension
self.delta = delta
self.reg_factor = reg_factor
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
self.n_samples = np.zeros((self.K,))
self.iteration = 0
self.thetas = self.thetas_hat
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
def get_action(self, context, deterministic=False):
self.iteration += 1
estimate = np.zeros((self.K,))
nu = self.noise_variance*np.sqrt(self.dim*np.log(self.iteration/self.delta)/2)
for arm in range(self.K):
Ainv = self.inv_design_matrices[arm]
b = self.bs[arm]
theta_hat = np.dot(Ainv, b)
self.thetas_hat[arm] = theta_hat
mean = np.dot(self.thetas_hat[arm], context)
variance = nu**2 * np.dot(context, np.dot(Ainv, context))
estimate[arm] = mean + np.sqrt(variance) * (0 if deterministic else np.random.randn())
ucb = estimate
choice = np.argmax(ucb) # choose the highest
return choice
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
class contextEpsGREEDY():
"""
Args:
T (int): horizon
arms (list): list of available arms
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
def __init__(self, number_arms, dimension, epsilon=0.1, reg_factor=0.1, decrease_epsilon=False):
self.decrease_epsilon = decrease_epsilon
self.epsilon = epsilon
self.K = number_arms
self.dim = dimension
self.rewards = []
self.draws = []
self.reg_factor = reg_factor
self.n_samples = np.ones((self.K,)) # number of observations of each arm
self.sum_rewards = np.zeros((self.K,)) # sum of rewards for each arm
self.thetas_hat = np.zeros((self.K, self.dim))
self.inv_design_matrices = [np.identity(self.dim)/self.reg_factor for _ in range(number_arms)]
self.bs = np.zeros((self.K, self.dim))
self.nb_iter = 0
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
self.n_samples = np.zeros((self.K,))
self.nb_iter = 0
def estimated_best_arm(self, context):
return np.argmax(self.thetas_hat.dot(context))
def get_action(self, context):
if self.nb_iter < self.K:
return self.nb_iter
else:
# select the chosen_arm
expected_rewards = self.thetas_hat.dot(context)
rnd = np.random.rand()
if rnd <= self.epsilon / (np.sqrt(self.nb_iter + 1) if self.decrease_epsilon else 1):
chosen_arm = np.random.choice(self.K)
else:
noise = 10**-7*np.random.randn(self.K)
chosen_arm = np.argmax(noise + expected_rewards)
return chosen_arm
def update(self, context, chosen_arm, reward):
# update quantities
self.nb_iter += 1
self.rewards.append(reward)
self.draws.append(chosen_arm)
self.sum_rewards[chosen_arm] += reward
self.n_samples[chosen_arm] += 1
self.inv_design_matrices[chosen_arm] = self.inv_design_matrices[chosen_arm] - np.dot(self.inv_design_matrices[chosen_arm], np.dot(np.outer(context, context),
self.inv_design_matrices[chosen_arm])) \
/ (1. + np.dot(context, np.dot(self.inv_design_matrices[chosen_arm], context)))
self.bs[chosen_arm] += reward * context
self.thetas_hat[chosen_arm] = self.inv_design_matrices[chosen_arm].dot(self.bs[chosen_arm])
return self.rewards, self.draws
class RewardAttacker(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.99,
bound_features=None, noise_variance=None, bound_context=None, eps=1/2, **kwargs):
self.K = nb_arms
self.dim = dimension
self.reg_factor = reg_factor
self.delta = delta
self.iteration = None
self.bound_context = bound_context
self.bound_features = bound_features
self.noise_variance = noise_variance
self.eps = eps
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.betas = np.zeros((self.K,))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
self.n_samples = np.zeros((self.K,))
self.iteration = 0
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
def alpha(self):
d = self.dim
sigma, B, D = self.noise_variance, self.bound_context, self.bound_features
return sigma * np.sqrt(d * np.log((1 + np.maximum(1, self.n_samples) * B * B / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * D
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
self.thetas_hat[a_t] = np.dot(self.inv_design_matrices[a_t], self.bs[a_t])
self.betas = self.alpha()
def compute_attack(self, action, context, a_star):
if action != a_star:
temp_1 = self.betas[action] * np.sqrt(np.dot(context, np.dot(self.inv_design_matrices[action], context)))
temp_2 = self.betas[a_star] * np.sqrt(np.dot(context, np.dot(self.inv_design_matrices[a_star], context)))
att = - min(1, max(np.dot(self.thetas_hat[action], context) + temp_1, 0)) + (1 - self.eps) \
* (min(1, max(0, np.dot(self.thetas_hat[a_star], context) - temp_2)))
return att
else:
return 0
class Exp4(object):
def __init__(self, nb_arms, dimension, experts, eta=0.5, gamma=1):
self.K = nb_arms
self.dim = dimension
self.eta = eta
self.gamma = gamma
self.experts = experts
self.nb_experts = len(experts)
self.reset()
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
def reset(self):
self.Q = np.ones((self.nb_experts,))/self.nb_experts
self.iteration = 0
def get_expert_advice(self, context):
proba_matrix = np.zeros((self.nb_experts, self.K))
for m in range(self.nb_experts):
proba_matrix[m] = self.experts[m].get_action(context)
return proba_matrix
def get_action(self, context):
self.iteration += 1
self.E = self.get_expert_advice(context)
self.P = np.dot(self.E.T, self.Q)
#self.P = self.P/np.sum(self.P)
temp = np.linspace(0, self.K-1, self.K, dtype='int')
action = np.random.choice(temp, p=self.P)
return action
def update(self, context, a_t, r_t):
X = np.ones((self.K,))
X[a_t] = X[a_t] - (1 - r_t)/(self.P[a_t] + self.gamma)
X_experts = np.dot(self.E, X)
self.Q = self.Q*np.exp(self.eta*X_experts)
self.Q = self.Q/np.sum(self.Q)
| ContextualBanditsAttacks-main | isoexp/contextual/contextual_linucb.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pickle
import matplotlib.pyplot as plt
class ContextualLinearMABModel(object):
def __init__(self, random_state=0, noise=0.1, thetas=None):
self.local_random = np.random.RandomState(random_state)
self.noise = noise
self.thetas = thetas
def reward(self, context, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = np.dot(context, self.thetas[action]) + self.noise * self.local_random.randn(1)
return reward
def best_arm_reward(self, context):
D = np.dot(self.thetas, context)
return np.max(D)
@property
def n_features(self):
return self.thetas.shape[1]
@property
def n_actions(self):
return self.thetas.shape[0]
class RandomContextualLinearArms(ContextualLinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, bound_context=1):
self.bound_context = bound_context
thetas = np.abs(np.random.randn(n_actions, n_features-1))
super(RandomContextualLinearArms, self).__init__(random_state=random_state, noise=noise,
thetas=thetas)
self.context_lists = []
self.n_user = 5
self.n = self.n_user
thetas = np.ones((n_actions, n_features))
thetas[:, :-1] = self.thetas.copy()
max_rwd = -float('inf')
min_rwd = float('inf')
for k in range(self.n_user):
test = np.abs(np.random.randn(self.n_features))
test = np.random.uniform(low=0, high=bound_context)*test/np.linalg.norm(test)
dot_prod = np.dot(self.thetas, test)
maxi = np.max(dot_prod)
mini = np.min(dot_prod)
if maxi >= max_rwd:
max_rwd = maxi
if mini <= min_rwd:
min_rwd = mini
self.context_lists.append(np.concatenate((test, np.array([1]))))
self.thetas = thetas
thetas[:, -1] = -min_rwd + 1
thetas = thetas / (max_rwd - min_rwd + 1)
self.thetas = thetas
print('Different Means:')
for k in range(self.n_user):
print('Means for context {}'.format(k), np.dot(thetas, self.context_lists[k]))
self.theta = self.thetas
def get_context(self):
return self.context_lists[np.random.randint(low=0, high=self.n_user)]
class DatasetModel(ContextualLinearMABModel):
def __init__(self, arm_csvfile, user_csvfile, random_state=0, noise=0., arms_limit=None, context_limit=None):
temp_thetas = np.loadtxt(arm_csvfile, delimiter=',').T
temp_user_contexts = np.loadtxt(user_csvfile, delimiter=',')
K, d = temp_thetas.shape
N, _ = temp_user_contexts.shape
thetas = np.ones((K, d+1))
user_contexts = np.ones((N, d+1))
thetas[:, :-1] = temp_thetas.copy()
if arms_limit is not None:
thetas = thetas[:arms_limit]
user_contexts[:, :-1] = temp_user_contexts.copy()
if context_limit is not None:
user_contexts = user_contexts[:context_limit]
self.bound_context = np.linalg.norm(user_contexts, axis=1).max()
D = np.dot(temp_user_contexts, temp_thetas.T)
min_rwd = np.min(D)
max_rwd = np.max(D)
thetas[:, -1] = -min_rwd + 1
thetas = thetas / (max_rwd - min_rwd + 1)
self.context_lists = user_contexts.copy()
self.n_user, _ = user_contexts.shape
super(DatasetModel, self).__init__(random_state=random_state, noise=noise,
thetas=thetas)
self.theta = self.thetas
def get_context(self):
return self.context_lists[np.random.randint(low=0, high=self.n_user)]
class AttackOneUserModel(ContextualLinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, bound_context=1, distance=1):
self.bound_context = bound_context
thetas = np.abs(np.random.randn(n_actions, n_features))
norm_thetas = np.linalg.norm(thetas, axis=1)
thetas = (1/2) * thetas/norm_thetas.reshape((n_actions, 1))
super(AttackOneUserModel, self).__init__(random_state=random_state, noise=noise,
thetas=thetas)
self.context_lists = []
self.n_user = 1
self.n = self.n_user
self.distance = distance
for k in range(self.n_user):
test = np.abs(np.random.randn(self.n_features))
test = np.random.uniform(low=0, high=bound_context)*test/np.linalg.norm(test)
self.context_lists.append(test)
print('Different Means:')
# for k in range(self.n_user):
# print('Means for context {}'.format(k), np.dot(thetas, self.context_lists[k]))
self.theta = self.thetas
def get_context(self):
return self.context_lists[np.random.randint(low=0, high=self.n_user)]
def add_target_arm(self):
theta_target_arm = np.abs(np.random.randn(self.n_features))
theta_target_arm = self.distance * theta_target_arm/np.linalg.norm(theta_target_arm)
import cvxpy as cp
n_points = len(self.thetas)
lambdas = cp.Variable(n_points)
A = np.ones((1,n_points))
pos = -np.eye(n_points)
constraints = [A@lambdas == 1, pos@lambdas <= 0]
obj = cp.Minimize(cp.quad_form(theta_target_arm - self.thetas.T @ lambdas, np.eye(self.n_features)))
prob = cp.Problem(obj, constraints)
prob.solve()
print('Distance to convex hull', np.sqrt(prob.value))
self.thetas = np.concatenate((self.thetas, theta_target_arm.reshape((1, self.n_features))), axis=0)
if __name__ == '__main__':
import os
# arm_file = os.path.join(os.getcwd(),'../../examples/jester/Vt_jester.csv')
# user_file = os.path.join(os.getcwd(),'../../examples/jester/U.csv')
# test_model = DatasetModel(arm_csvfile=arm_file, user_csvfile=user_file, context_limit=100)
r = np.linspace(0, 1/2)
for rr in r:
test_model = AttackOneUserModel(n_features=2, n_actions=10, distance=rr)
# print(test_model.context_lists)
# print(np.linalg.norm(test_model.thetas,axis=1))
test_model.add_target_arm()
# print(test_model.thetas)
# for x in test_model.context_lists:
# print(np.dot(test_model.thetas, x))
if test_model.n_features == 2:
for a in range(test_model.n_actions-1):
plt.scatter(test_model.thetas[a, 0], test_model.thetas[a, 1], marker='+')
plt.scatter(test_model.thetas[test_model.n_actions - 1, 0], test_model.thetas[test_model.n_actions - 1, 1],
marker='^')
# for x in test_model.context_lists:
# plt.scatter(x[0], x[1], marker='o')
plt.show()
# RandomContextualLinearArms() | ContextualBanditsAttacks-main | isoexp/contextual/contextual_models.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import cvxpy as cp
from scipy.optimize import minimize
class RandomArm(object):
def __init__(self, initial_arms):
self.arms = initial_arms
def get_action(self):
return np.random.choice(self.arms)
def update(self, a_t, r_t):
pass
def reset(self):
pass
class LinearBandit(object):
def __init__(self, arm_features, reg_factor=1., delta=0.5,
bound_theta=None, noise_variance=None):
self.arm_features = arm_features
self.reg_factor = reg_factor
self.delta = delta
self.iteration = None
self.bound_theta = bound_theta
self.bound_features = np.max(np.sqrt(np.sum(np.abs(arm_features) ** 2, axis=1)))
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.n_features
self.A = self.reg_factor * np.eye(d, d)
self.b = np.zeros((d,))
self.range = 1
self.est_bound_theta = 0
self.est_bound_features = 0
self.n_samples = 0
self.iteration = 0
@property
def n_actions(self):
return self.arm_features.shape[0]
@property
def n_features(self):
return self.arm_features.shape[1]
def auto_alpha(self):
d = self.n_features
return self.range * np.sqrt(d * np.log((1 + max(1, self.n_samples) / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * np.linalg.norm(self.theta_hat, 2)
def alpha(self, n_samples):
d = self.n_features
if self.bound_theta is None or self.noise_variance is None:
# use estimated quantities
sigma, B, D = self.range, self.est_bound_theta, self.bound_features
else:
sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
return sigma * np.sqrt(d * np.log((1 + max(1, n_samples) * D * D / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * B
def get_action(self):
self.iteration += 1
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.n_actions) * 0.000001
A_inv = np.linalg.inv(self.A)
self.theta_hat = A_inv.dot(self.b)
ta = np.diag(self.arm_features.dot(A_inv).dot(self.arm_features.T))
sfactor = self.alpha(self.n_samples)
ucb = self.arm_features.dot(self.theta_hat) + sfactor * ta
ucb = ucb + noise
choice = np.argmax(ucb) # choose the highest
# print(ucb, choice)
return choice
def update(self, a_t, r_t):
# update the input vector
phi = self.arm_features[a_t]
self.A += np.outer(phi, phi)
self.b += r_t * phi
self.range = max(self.range, abs(r_t))
self.est_bound_theta = np.linalg.norm(self.theta_hat)
self.n_samples += 1
class EfficientLinearBandit(object):
def __init__(self, arm_features, reg_factor=1., delta=0.5,
bound_theta=None, noise_variance=None):
self.arm_features = arm_features
self.reg_factor = reg_factor
self.delta = delta
self.iteration = None
self.bound_theta = bound_theta
self.bound_features = np.max(np.sqrt(np.sum(np.abs(arm_features) ** 2, axis=1)))
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.n_features
self.Ainv = np.eye(d, d) / self.reg_factor
self.b = np.zeros((d,))
self.range = 1
self.est_bound_theta = 0
self.est_bound_features = 0
self.n_samples = 0
self.iteration = 0
@property
def n_actions(self):
return self.arm_features.shape[0]
@property
def n_features(self):
return self.arm_features.shape[1]
def auto_alpha(self):
d = self.n_features
sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
return sigma * np.sqrt(d * np.log((1 + max(1, self.iteration - 1) * D * D / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * B
def alpha(self, n_samples):
d = self.n_features
if self.bound_theta is None or self.noise_variance is None:
# use estimated quantities
sigma, B, D = self.range, self.est_bound_theta, self.bound_features
else:
sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
return sigma * np.sqrt(d * np.log((1 + max(1, n_samples) * D * D / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * B
def get_action(self, n_sam=None):
self.iteration += 1
if n_sam is None:
n_sam = self.n_samples
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.n_actions) * 0.000001
# A_inv = np.linalg.inv(self.A)
# assert np.allclose(A_inv, self.Ainv)
self.theta_hat = np.dot(self.Ainv, self.b)
ta = np.diag(np.dot(self.arm_features, np.dot(self.Ainv, self.arm_features.T)))
sfactor = self.alpha(n_sam)
ucb = self.arm_features.dot(self.theta_hat) + sfactor * np.sqrt(ta)
ucb = ucb + noise
choice = np.argmax(ucb) # choose the highest
# print(ucb, choice)
return choice
def update(self, a_t, r_t):
# update the input vector
phi = self.arm_features[a_t]
# self.A += np.outer(phi, phi)
self.Ainv = self.Ainv - np.dot(self.Ainv, np.dot(np.outer(phi, phi), self.Ainv)) / (
1. + np.dot(phi.T, np.dot(self.Ainv, phi)))
self.b += r_t * phi
self.range = max(self.range, abs(r_t))
# self.est_bound_theta = np.linalg.norm(self.theta_hat)
self.n_samples += 1
class UCB_GLM() :
def __init__(self, arm_features, reg_factor = 1, delta = 0.1,
bound_theta = 1,
link_function = lambda x : x,
noise_variance = None,
model = None,
conservative_level=0.1,
tighter_ucb = False,
kappa = None) :
self.conservative_level = conservative_level
self.tighter_ucb = tighter_ucb
self.arm_features = arm_features
self.reg_factor = reg_factor
self.delta = delta
self.bound_theta = bound_theta
self.model = model
self.n_actions, self.d = arm_features.shape
self.noise_variance = noise_variance
if self.model == 'gaussian' :
self.link_function = lambda x : x
self.kappa = 1
self.L = 1
elif self.model == 'bernoulli' :
self.link_function = lambda x : 1/(1+np.exp(-x))
if kappa is None :
self.kappa = 1/1000
else :
self.kappa = kappa
self.L = 1/4
self.reset()
def reset(self) :
self.rewards_history = []
self.features_history = []
self.A = self.reg_factor * np.eye(self.d, self.d)/self.kappa
self.Ainv = np.eye(self.d, self.d)*self.kappa / self.reg_factor
self.n_samples = 0
self.iteration = 0
self.theta_hat = np.zeros(self.d)
def solve_MLE(self, rewards_history, features_history) :
if self.iteration > 1:
if not self.model is None :
n_samples = len(self.rewards_history)
n_features = self.d
X = np.zeros((n_samples, n_features))
X = 1*np.array(self.features_history)
y = (np.array(self.rewards_history).reshape((n_samples,)))
beta = cp.Variable(n_features)
lambd = cp.Parameter(nonneg = True)
lambd.value = self.reg_factor/2
if self.model == 'bernoulli' :
log_likelihood = cp.sum(cp.multiply(y, X @ beta) -
cp.log_sum_exp(cp.vstack([np.zeros(n_samples), X @ beta]), axis=0)) - lambd * cp.norm(beta, 2)
problem = cp.Problem(cp.Maximize(log_likelihood))
problem.solve(verbose = False, warm_start = False, max_iters = 200)
return beta.value
else :
log_likelihood = cp.sum( cp.multiply(y, X @ beta) -
cp.power(X@beta, 2)/2) - lambd * cp.norm(beta, 2)
problem = cp.Problem(cp.Maximize(log_likelihood))
problem.solve(verbose = False, warm_start = False, max_iters = 200)
return beta.value
else :
return np.zeros((self.d,))
def auto_alpha(self, tight_bound):
if tight_bound :
return np.sqrt(2*self.L*np.log(self.n_samples + 1)/self.kappa)
else :
sigma, B = self.noise_variance, self.bound_theta
return np.sqrt(self.reg_factor/self.kappa)*B + sigma*np.sqrt( self.d*np.log(1 + self.iteration*self.kappa/(self.reg_factor*self.d)) + 2*np.log(1/self.delta))/self.kappa
def get_action(self) :
self.iteration += 1
noise = np.random.random(self.n_actions) * 0.0000001
self.theta_hat = self.solve_MLE(self.rewards_history, self.features_history)
beta = self.auto_alpha(self.tighter_ucb)
ta = np.diag(np.dot(self.arm_features, np.dot(self.Ainv, self.arm_features.T)))
ucb = self.arm_features.dot(self.theta_hat) + beta * ta
ucb = ucb + noise
UCB_action= np.argmax(ucb)
return UCB_action
def update(self, a_t, r_t):
phi = self.arm_features[a_t]
self.Ainv = self.Ainv - np.dot(self.Ainv, np.dot(np.outer(phi, phi), self.Ainv)) / (1. + np.dot(phi.T, np.dot(self.Ainv, phi)))
self.A += np.outer(phi,phi)
self.rewards_history.append(r_t)
self.features_history.append(phi)
self.n_samples += 1
def check_condition(self, theta):
temp = np.array(self.rewards_history).reshape((len(self.rewards_history),)) - self.link_function(np.array(self.features_history).dot(self.theta_hat))
temp = temp*np.array(self.features_history).reshape((self.d,len(self.rewards_history)))
temp = temp.T
temp = np.sum(temp, axis = 0) - self.reg_factor*theta
return temp
class LinPHE():
def __init__(self, arm_features, reg_factor=1, alpha=2):
self.arm_features = arm_features
self.reg_factor = reg_factor
self.alpha = alpha
self.reset()
def reset(self):
self.K, self.d = self.arm_features.shape
self.design_matrix = self.reg_factor * np.eye(self.d)
self.inv_design_matrix = np.eye(self.d) / (self.reg_factor)
self.iteration = 0
self.N = np.zeros((self.K,))
self.S = np.zeros((self.K,))
def get_action(self):
if self.iteration < -1: #self.d:
choice = np.random.randint(self.K)
else:
temp = np.zeros((self.d,))
for j in range(self.K):
Z = np.random.binomial(1 + int(self.alpha * self.N[j]), 0.5)
temp = temp + self.arm_features[j] * (self.S[j] + Z)
self.theta_hat = np.dot(self.inv_design_matrix, temp) / (self.alpha + 1)
ucb = self.arm_features.dot(self.theta_hat)
noise = np.random.randn(self.K) * 10 ** -7
ucb = ucb + noise
choice = np.argmax(ucb)
self.iteration += 1
return choice
def update(self, a_t, r_t):
self.S[a_t] += r_t * 1
self.N[a_t] += 1
x = self.arm_features[a_t]
self.design_matrix = self.design_matrix + np.outer(x, x)
self.inv_design_matrix = (self.inv_design_matrix - np.dot(self.inv_design_matrix,
np.dot(np.outer(x, x), self.inv_design_matrix)) / (
1. + np.dot(x.T, np.dot(self.inv_design_matrix, x))))
if __name__ == '__main__':
import sys
sys.path[0] = '/Users/evrard/Desktop/monotone_mabs/'
# from isoexp.linear.linearbandit import EfficientLinearBandit, LinearBandit
# from isoexp.conservative.linearmabs import EfficientConservativeLinearBandit, NewCLUB, SafetySetCLUCB, BatchedCLUCB, LinearOracle, LinUCBOracle
from isoexp.linear.linearmab_models import RandomLinearArms, RandomLogArms
from tqdm import trange
from collections import namedtuple
from joblib import Parallel, delayed
seed = np.random.randint(0, 10 ** 5)
MABResults = namedtuple('MABResults', 'regret,norm_error')
noise = 0.1
model = RandomLogArms(n_actions=20, n_features=2, noise=noise,
bound_features=1,
random_state=seed)
model.features = model.features
theta_bound = np.linalg.norm(model.theta, 2)
link = lambda x: 1 / (1 + np.exp(-x))
link_means = np.array([link(np.dot(model.theta, el)) for el in model.features])
means = np.array([(np.dot(model.theta, el)) for el in model.features])
T = 1500
PARALLEL = True
nb_simu = 10
algorithms = {
# 'EfficientLinearBandit': EfficientLinearBandit(arm_features=model.features,
# reg_factor=1.,
# delta=0.1,
# noise_variance=noise,
# bound_theta=theta_bound),
'UCB-GLM-tight-bound': UCB_GLM(arm_features=model.features,
bound_theta=theta_bound,
model='bernoulli',
noise_variance=noise,
reg_factor=1,
delta=0.1,
tighter_ucb=True),
'UCB-GLM': UCB_GLM(arm_features=model.features,
bound_theta=theta_bound,
model='bernoulli',
noise_variance=noise,
reg_factor=1,
delta=0.1,
tighter_ucb=False)}
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
def work(alg_name, alg):
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in trange(T, desc='Current episode :', leave=True):
a_t = alg.get_action()
# print(a_t)
r_t = model.reward(a_t)
alg.update(a_t, r_t)
regret[k, t] = link(model.best_arm_reward()) - link(np.dot(model.theta, model.features[a_t]))
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
return alg_name, MABResults(regret=regret, norm_error=norms)
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(alg_name, algorithms[alg_name]) for alg_name in algorithms.keys())
else:
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
cond = np.zeros((nb_simu, T))
draws = np.zeros((nb_simu, T))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in trange(T, desc='Current episode ', leave=True):
a_t = alg.get_action()
r_t = model.reward(a_t)
alg.update(a_t, r_t)
regret[k, t] = link(model.best_arm_reward()) - link(np.dot(model.theta, model.features[a_t]))
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
draws[k, t] = a_t
results += [(alg_name, MABResults(regret=regret, norm_error=norms))]
import pylab as plt
for (alg_name, val) in results:
mean_regret = np.mean(val.regret.cumsum(axis=0), axis=0)
mean_norms = np.mean(val.norm_error, axis=0)
t = np.linspace(1, T + 1, T, dtype='int')
low_quantile = np.quantile(val.regret/t, 0.1, axis=0)
high_quantile = np.quantile(val.regret/t, 0.9, axis=0)
plt.figure(0)
plt.semilogx(mean_regret.cumsum()/t, label=alg_name)
plt.legend()
# plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.figure(1)
plt.plot(mean_norms, label=alg_name)
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | isoexp/linear/linearbandit.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
class LinearMABModel(object):
def __init__(self, random_state=0, noise=0.1, features=None, theta=None):
self.local_random = np.random.RandomState(random_state)
self.noise = noise
self.features = features
self.theta = theta
def reward(self, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = np.dot(self.features[action], self.theta) + self.noise * self.local_random.randn(1)
# mean = np.dot(self.features[action], self.theta)
# reward = np.random.binomial(1, mean)
return reward
def best_arm_reward(self):
D = np.dot(self.features, self.theta)
return np.max(D)
@property
def n_features(self):
return self.features.shape[1]
@property
def n_actions(self):
return self.features.shape[0]
class LinPHEModel(object):
def __init__(self, d=10, n_actions=100, random_state=0):
self.local_random = np.random.RandomState(random_state)
self.n_features = d
self.n_actions = n_actions
temp_theta = self.local_random.randn(d - 1)
temp_theta = np.random.uniform(0, 1 / 2) * temp_theta / np.linalg.norm(temp_theta)
self.theta = np.ones(d) / 2
self.theta[:-1] = temp_theta
self.features = np.ones((n_actions, d))
# temp_features = self.local_random.randn(n_actions, d-1)
# temp_features = np.random.uniform(0, 1)*temp_features/np.linalg.norm(temp_features, axis = 1).reshape((self.n_actions, 1))
# print(temp_features)
# self.features[:, :-1] = temp_features
radius = 1
Y = self.local_random.randn(n_actions, d - 1)
U = np.random.uniform(0, 1, size=n_actions)
r = radius * np.power(U, 1. / (d - 1))
F = r / np.linalg.norm(Y, axis=1)
X = Y * F[:, np.newaxis]
self.features[:, :-1] = X
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# if d-1 == 3:
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(X[:,0], X[:,1], X[:,2], label='new')
# ax.scatter(temp_features[:,0], temp_features[:,1], temp_features[:,2], label='old')
# plt.legend()
# plt.show()
# if d-1 == 2:
# plt.figure()
# plt.scatter(X[:,0], X[:,1], label='new')
# plt.scatter(temp_features[:,0], temp_features[:,1], label='old')
# plt.legend()
# plt.show()
#
# print(X)
def reward(self, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = self.local_random.binomial(1, np.dot(self.theta, self.features[action]))
return reward
def best_arm_reward(self):
D = np.dot(self.features, self.theta)
return np.max(D)
def means(self):
D = np.dot(self.features, self.theta)
return D
class RandomLogArms(object) :
def __init__(self, random_state = 0, noise = .1,
n_actions = 4, n_features = 100,
bound_features = 1, bound_theta = 1) :
features = np.random.randn(n_actions, n_features)
self.features = bound_features*features/max(np.linalg.norm(features, axis = 1))
theta = np.random.randn(n_features)
self.theta = np.random.uniform(low = 0, high = bound_theta)*theta/np.linalg.norm(theta)
self.link = lambda x : 1/(1 + np.exp(-x))
self.noise = noise
self.local_random = np.random.RandomState(random_state)
self.n_actions, self.n_features = n_actions, n_features
temp = np.dot(self.features,self.theta) + bound_features
self.kappa = min(self.link(temp)*(1 - self.link(temp)))
def reward(self, action) :
reward = self.link(np.dot(self.features[action], self.theta)) + self.noise * self.local_random.randn(1)
return reward
def best_arm_reward(self):
D = np.dot(self.features, self.theta)
return self.link(np.max(D))
class RandomNormalLinearArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, reward_lim=(-np.inf, np.inf)):
features = np.random.randn(n_actions, n_features)
real_theta = np.random.randn(n_features) * 0.5
means = np.dot(features, real_theta)
idxs = (means < reward_lim[0]) | (means > reward_lim[1])
idxs = np.arange(n_actions)[idxs]
for i in idxs:
mean = -np.inf
feat = None
while mean > reward_lim[1] or mean < reward_lim[0]:
feat = np.random.randn(1, n_features)
mean = np.dot(feat, real_theta)
features[i, :] = feat
super(RandomNormalLinearArms, self).__init__(random_state=random_state, noise=noise,
features=features, theta=real_theta)
class RandomLinearArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, bound_features=1, bound_theta = 1, positive=True, max_one=True):
features = np.random.randn(n_actions, n_features)
real_theta = np.random.randn(n_features)
real_theta = np.random.uniform(low = 1/2, high = bound_theta)*real_theta/np.linalg.norm(real_theta)
if positive:
idxs = np.dot(features, real_theta) <= 0
idxs = np.arange(n_actions)[idxs]
for i in idxs:
mean = -1
feat = None
while mean <= 0:
feat = np.random.randn(1, n_features)
mean = np.dot(feat, real_theta)
features[i, :] = feat
features = np.random.uniform(low = 1/2, high = bound_features, size = (n_actions,1)) * features / max(np.linalg.norm(features, axis=1))
if max_one:
D = np.dot(features, real_theta)
min_rwd = min(D)
max_rwd = max(D)
min_features = features[np.argmin(D)]
features = (features - min_features) / (max_rwd - min_rwd)
super(RandomLinearArms, self).__init__(random_state=random_state, noise=noise,
features=features, theta=real_theta)
class DiffLinearArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=2, real_theta=np.array([9 / 10, 1 / 10]),
optimal_arm=np.array([1, 0]), baseline_arm=np.array([0, 1]), concentration_coeff=0.3):
baseline_arm = baseline_arm.reshape((baseline_arm.shape[0], 1))
features = baseline_arm + concentration_coeff * np.random.randn(n_features, n_actions)
idxs = np.dot(real_theta, features) <= 0
idxs = np.arange(n_actions)[idxs]
for i in idxs:
mean = -1
feat = None
while mean <= 0:
feat = baseline_arm + concentration_coeff * np.random.randn(n_features, 1)
mean = float(np.dot(real_theta, feat))
features[:, i] = feat.squeeze()
optimal_arm = optimal_arm.reshape((optimal_arm.shape[0], 1))
features = np.concatenate((features, optimal_arm), axis=1)
features = np.concatenate((features, baseline_arm), axis=1)
super(DiffLinearArms, self).__init__(random_state=random_state, noise=noise, features=features,
theta=real_theta)
class OtherArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=2):
angular_fraction = np.linspace(0, np.pi / 2, n_actions)
features = np.zeros((n_actions, n_features))
features[:, 0] = np.cos(angular_fraction)
features[:, 1] = np.sin(angular_fraction)
real_theta = np.array([1 - np.pi / (4 * n_actions), np.pi / (4 * n_actions)])
super(OtherArms, self).__init__(random_state=random_state, noise=noise, features=features, theta=real_theta)
class CircleBaseline(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=2, inner_radius=1 / 10, outer_radius=2):
temp = np.random.uniform(0, 2 * np.pi)
theta = outer_radius * np.array([np.cos(temp), np.sin(temp)])
angle_baseline = np.random.uniform(0, 2 * np.pi)
radius_baseline = np.random.uniform(1 / 10, inner_radius)
baseline = radius_baseline * np.array([np.cos(angle_baseline), np.sin(angle_baseline)]).reshape(1, n_features)
features = np.zeros((n_actions - 1, n_features))
radius_features = np.random.uniform(low=2 * inner_radius, high=outer_radius, size=(n_actions - 1, 1))
# radius_features = np.random.uniform(low = 0, high = inner_radius, size = (n_actions-1,1))
angle_features = np.random.uniform(0, 2 * np.pi, size=n_actions - 1)
features[:, 0] = np.cos(angle_features)
features[:, 1] = np.sin(angle_features)
features = radius_features * features
features = np.concatenate((features, baseline), axis=0)
# features = np.concatenate((baseline, features), axis = 0)
super(CircleBaseline, self).__init__(random_state=random_state, noise=noise, features=features, theta=theta)
| ContextualBanditsAttacks-main | isoexp/linear/linearmab_models.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from .linearmab_models import LinearMABModel
class ColdStartFromDatasetModel(LinearMABModel):
def __init__(self, arm_csvfile, user_csvfile, random_state=0, noise=0.):
features = np.loadtxt(arm_csvfile, delimiter=',').T
thetas = np.loadtxt(user_csvfile, delimiter=',')
super(ColdStartFromDatasetModel, self).__init__(random_state=random_state, noise=noise,
features=features, theta=None)
self.theta_idx = np.random.randint(low=0, high=thetas.shape[0])
print("Selecting user: {}".format(self.theta_idx))
self.theta = thetas[self.theta_idx]
# self.theta = np.random.randn(thetas.shape[1])
D = np.dot(self.features, self.theta)
min_rwd = min(D)
max_rwd = max(D)
min_features = features[np.argmin(D)]
self.features = (self.features - min_features) / (max_rwd - min_rwd)
| ContextualBanditsAttacks-main | isoexp/linear/coldstart.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import sys
import numpy.random as npr
from tqdm import tqdm
class contextEpsGREEDY():
"""
Args:
T (int): horizon
arms (list): list of available arms
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
def __init__(self, number_arms, dimension, epsilon=0.1, decrease_epsilon=False):
self.decrease_epsilon = decrease_epsilon
self.epsilon = epsilon
self.K = number_arms
self.dimension = dimension
self.rewards = []
self.draws = []
self.number_pulls = np.ones((self.K,)) # number of observations of each arm
self.sum_rewards = np.zeros((self.K,)) # sum of rewards for each arm
self.thetas = np.zeros((self.K, self.dimension))
self.As = [np.identity(self.dimension) for _ in range(number_arms)]
self.rewards_matrix = np.zeros((self.K, self.dimension))
self.nb_iter=0
self.inv_design_matrices = np.zeros((self.K, dimension, dimension))
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(dimension, dimension)
def estimated_best_arm(self, context):
return np.argmax(self.thetas.dot(context))
def get_action(self, context, deterministic=False):
if self.nb_iter < self.K:
return self.nb_iter
else:
# select the chosen_arm
expected_rewards = self.thetas.dot(context)
rnd = np.random.rand()
if not deterministic and rnd <= self.epsilon / (math.sqrt(self.nb_iter + 1) if self.decrease_epsilon else 1):
chosen_arm = np.random.choice(self.K)
else:
idxs = np.flatnonzero(np.isclose(expected_rewards, expected_rewards.max()))
chosen_arm = np.asscalar(np.random.choice(idxs))
return chosen_arm
def update(self, context, chosen_arm, reward):
# update quantities
self.nb_iter += 1
self.rewards.append(reward)
self.draws.append(chosen_arm)
self.sum_rewards[chosen_arm] += reward
self.number_pulls[chosen_arm] += 1
self.As[chosen_arm] += np.outer(context, context)
self.rewards_matrix[chosen_arm] += reward * context
self.thetas[chosen_arm] = np.linalg.inv(self.As[chosen_arm]).dot(self.rewards_matrix[chosen_arm])
return self.rewards, self.draws
class ContextualLinearBandit(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.5,
bound_features=None, noise_variance=None, bound_context=None, alpha=None):
self.K = nb_arms
self.dim = dimension
self.reg_factor = reg_factor
self.delta = delta
self.exploration_coeff = alpha
self.iteration = None
self.bound_context = bound_context
self.bound_features = bound_features
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.dim
self.thetas = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
# self.range = 1
# self.est_bound_theta = 0
# self.est_bound_features = 0
self.n_samples = np.zeros((self.K,))
self.iteration = 0
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.n_features
# def auto_alpha(self):
# d = self.n_features
# sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
# return sigma * np.sqrt(d * np.log((1 + max(1, self.iteration - 1) * D * D / self.reg_factor) / self.delta)) \
# + np.sqrt(self.reg_factor) * B
def alpha(self):
d = self.dim
# print(d)
sigma, B, D = self.noise_variance, self.bound_context, self.bound_features
if self.exploration_coeff is None:
return sigma * np.sqrt(
d * np.log((1 + np.maximum(1, self.n_samples) * B * B / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * D
else:
return self.exploration_coeff
def get_action(self, context, deterministic=False):
self.iteration += 1
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.K) * 0.000001
estimate = np.zeros((self.K,))
sfactor = self.alpha()
for arm in range(self.K):
Ainv = self.inv_design_matrices[arm]
# print(Ainv)
b = self.bs[arm]
theta_hat = np.dot(Ainv, b)
self.thetas[arm] = theta_hat
ta = np.dot(context, np.dot(Ainv, context))
sfactor = self.alpha()
# print('sfactor =', sfactor)
# print('context = ', context)
# print('theta_hat=', theta_hat)
# print('ta = ', ta)
estimate[arm] = np.dot(context, theta_hat) + sfactor[arm] * np.sqrt(ta)
ucb = estimate + (0 if deterministic else noise)
choice = np.argmax(ucb) # choose the highest
# print(ucb[choice])
return choice
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
| ContextualBanditsAttacks-main | isoexp/mab/contextual_mab_algs.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import math
from scipy.stats import truncnorm
class ContextualLinearMABModel(object):
def __init__(self, random_state=0, noise=0.1, theta=None):
if isinstance(random_state, int):
self.local_random = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState), "random state is neither an int nor a random number generator"
self.local_random = random_state
self.noise = noise
self.theta = theta
def reward(self, context, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = self.expected_reward(action, context) + self.noise * self.local_random.randn(1)
# mean = np.dot(self.features[action], self.theta)
# reward = np.random.binomial(1, mean)
return reward
def expected_reward(self, action, context):
return np.dot(context, self.theta[action])
def best_expected_reward(self, context):
D = np.dot(self.theta, context)
return np.max(D)
def best_arm(self, context):
D = np.dot(self.theta, context)
return np.argmax(D)
@property
def n_features(self):
return self.theta.shape[1]
@property
def n_actions(self):
return self.theta.shape[0]
def compute_regret(self, context, a_t):
D = np.dot(self.theta, context)
return np.max(D) - D[a_t]
class LinContextualArm(object):
def __init__(self, theta: np.array, random_state:int):
"""
Args:
mean: expectation of the arm
variance: variance of the arm
random_state (int): seed to make experiments reproducible
"""
self.theta = theta
self.local_random = np.random.RandomState(random_state)
def sample(self, random_state):
pass
class LinBernoulliArm(LinContextualArm):
def __init__(self, theta, random_state=0):
"""
Bernoulli arm
Args:
p (float): mean parameter
random_state (int): seed to make experiments reproducible
"""
super(LinBernoulliArm, self).__init__(theta=theta, random_state=random_state)
def sample(self, context: np.array):
proba = sigmoid(np.dot(self.theta, context))
return self.local_random.rand(1) < proba
def sigmoid(x):
return 1 / (1 + np.exp(-x))
| ContextualBanditsAttacks-main | isoexp/mab/contextual_arms.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import sys
import numpy.random as npr
import cvxpy as cp
from tqdm import trange
from tqdm import tqdm
def UCB1(T, MAB, alpha=1.):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
alpha (float): shrink confidence interval
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
for k in range(K):
a = k
r = MAB[a].sample()
# update quantities
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
for t in range(K, T):
# select the arm
ucb = S / N + alpha * np.sqrt(np.log(t + 1) / N)
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def TS(T, MAB):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
alphas = np.ones((K,))
betas = np.ones((K,))
for t in range(T):
# sample the arm
thetas = np.random.beta(alphas, betas)
# select and apply action
a = np.argmax(thetas)
r = MAB[a].sample()
# update distribution
alphas[a] += r
betas[a] += 1 - r
rewards[t] = r
draws[t] = a
return rewards, draws
def epsGREEDY(T, MAB, epsilon=0.1):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
for k in range(K):
a = k
r = MAB[a].sample()
# update quantities
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
for t in range(K, T):
# select the arm
ucb = S / N
rnd = np.random.rand()
if rnd <= epsilon:
a = np.random.choice(K)
else:
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def SoftMAB(T, MAB, temp=1.0):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
for k in range(K):
a = k
r = MAB[a].sample()
# update quantities
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
for t in range(K, T):
# select the arm
ucb = S / N
proba = np.exp(ucb / temp)
proba = proba / np.sum(proba)
a = np.random.choice(K, p=proba)
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def ExploreThenExploit(T, MAB, T1):
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
T1 = np.ceil(T1).astype(np.int)
for t in range(T1):
a = np.random.choice(K)
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
for t in range(T1, T):
# select the arm
ucb = S / N
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def UCBV(T, MAB, alpha=1.):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
alpha (float): shrink confidence interval
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
M = np.zeros((K,)) # second moment (for Welford's algorithm)
vars = np.ones((K,)) * np.inf
for t in range(T):
# select the arm
ln = np.log(t + 1)
ucb = S / N + alpha * (np.sqrt(vars * ln / N) + ln / N)
ucb[N < 2] = sys.maxsize
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
old_mean = S[a] / N[a] if N[a] > 0 else 0
S[a] += r
N[a] += 1
M[a] = M[a] + (r - old_mean) * (r - S[a]/N[a])
vars[a] = M[a] / N[a] # update variance estimate
return rewards, draws
def BootstrapedUCB(T, MAB, delta = 0.1, b_rep = 200):
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.zeros((K,))
S = np.zeros((K,))
rewards_arm = {}
for k in range(K):
a = k
r = 1*MAB[a].sample().squeeze()
rewards[k] = r
draws[k] = a
rewards_arm[k] = [r]
S[a] += r
N[a] += 1
for t in range(K, T):
alpha = 1/(t+1)
bootstrap_quantile = quantile((1-delta)*alpha, S, N, rewards_arm, B = b_rep)
phi = np.sqrt(2*np.log(1/alpha)/N)
## Theoretical ucb
#ucb = S / N + (bootstrap_quantile + np.sqrt(np.log(2/(delta*alpha))/N)*phi)
## Ucb used in practice
ucb = S / N + (bootstrap_quantile + np.sqrt(1/N)*phi)
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = 1*MAB[a].sample().squeeze()
rewards[t] = r
draws[t] = a
rewards_arm[a].append(r)
S[a] += r
N[a] += 1
return rewards, draws
def quantile(alpha, S, N, rwds, B = 100, distrib = 'rademacher') :
means = np.nan_to_num(S/N)
K = len(N)
np_quantile = np.zeros(K)
for k in range(K) :
n = N[k]
if n > 0 :
bootstrap_avg = np.zeros(B)
if distrib == 'rademacher' :
weights = 2*npr.binomial(1, 1/2, size = (int(B),int(n))) - 1
elif distrib =='gaussian' :
weights = npr.randn(int(B),int(n))
history = np.array(rwds[k]) - means[k]
bootstrap_avg = (np.dot(weights, history)/n)
np_quantile[k] = np.percentile(bootstrap_avg, 100*(1 - alpha), interpolation = 'nearest')
else :
np_quantile[k] = +np.inf
return np_quantile
def PHE(T, MAB, alpha = 2) :
K = len(MAB)
rewards = np.zeros((T,))
draws = 0*rewards
N = np.zeros((K,))
S = np.zeros((K,))
biased_test = np.zeros((K,))
for k in range(K):
a = k
r = 1*MAB[a].sample().squeeze()
rewards[k] = r
draws[k] = a
S[a] +=r
N[a] +=1
for t in range(K, T) :
for i in range(K) :
Z = np.random.binomial(1,1/2, size = int(alpha*N[i]))
biased_test[i] = (np.sum(Z) + S[i])/((alpha+1)*N[i])
idxs = np.flatnonzero(np.isclose(biased_test, biased_test.max()))
a = np.asscalar(np.random.choice(idxs))
r = 1*MAB[a].sample().squeeze()
N[a] +=1
S[a] +=r
rewards[t] = r
draws[t] = a
return rewards, draws
def Random_exploration(T, MAB, alpha = 2) :
K = len(MAB)
rewards = np.zeros((T,))
draws = 0*rewards
N = np.zeros((K,))
S = np.zeros((K,))
biased_test = np.zeros((K,))
for k in range(K):
a = k
r = 1*MAB[a].sample().squeeze()
rewards[k] = r
draws[k] = a
S[a] +=r
N[a] +=1
for t in range(K, T) :
for i in range(K) :
Z = np.random.binomial(1,1/2, size = int(alpha*N[i]))
biased_test[i] = np.nan_to_num(np.mean(Z))+ S[i]/N[i]
idxs = np.flatnonzero(np.isclose(biased_test, biased_test.max()))
a = np.asscalar(np.random.choice(idxs))
r = 1*MAB[a].sample().squeeze()
N[a] +=1
S[a] +=r
rewards[t] = r
draws[t] = a
return rewards, draws
def EXP3_IX(T, MAB, eta = 0.1, gamma = 0):
K = len(MAB)
losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
sum_exp = K
exp_losses = np.ones((K,))
arms = np.linspace(0, K-1, K, dtype='int')
for t in range(T):
# print('cum_losses =', exp_losses)
# print('sum losses=', sum_exp)
P = exp_losses/sum_exp
# print('P =', P)
action = np.random.choice(arms, p=P)
X = 1*MAB[action].sample().squeeze()
losses[action] = losses[action] + (1 - X)/(gamma + P[action])
exp_losses[action] = exp_losses[action]*np.exp(-eta* (1 - X)/(gamma + P[action]))
sum_exp = np.sum(exp_losses)
rewards[t] = X
draws[t] = action
return rewards, draws
def attacked_EXP3_IX(T, MAB, target_arm, eta = None, gamma = None, delta=0.99):
K = len(MAB)
losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
sum_exp = K
exp_losses = np.ones((K,))
arms = np.linspace(0, K-1, K, dtype='int')
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,))
beta = np.zeros((K,))
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
if eta is None or gamma is None:
eta = np.sqrt(2*np.log(K + 1)/(K*T))
gamma = np.sqrt(2*np.log(K + 1)/(K*T))/2
for t in range(T):
P = exp_losses/sum_exp
if t < K:
action = t
attack_t = 0
else:
time_of_attacks[t] = 1
action = np.random.choice(arms, p=P)
if action != target_arm:
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2*N))
attack_t = - np.maximum((S / N)[action] - (S / N)[target_arm] + beta[action] + beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
true_X = 1*MAB[action].sample().squeeze()
X = true_X + attack_t
losses[action] = losses[action] + (1 - X)/(gamma + P[action])
exp_losses[action] = exp_losses[action]*np.exp(-eta*(1 - X)/(gamma + P[action]))
sum_exp = np.sum(exp_losses)
rewards[t] = true_X
draws[t] = action
N[action] += 1
S[action] += true_X
return rewards, draws, attacks, time_of_attacks
def attacked_UCB1(T, MAB, target_arm, alpha=1., delta=0.99, constant_attack=False):
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
N_pre = np.ones((K,)) # number of observations of each arm
S_pre = np.zeros((K,))
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
for k in range(K):
a = k
r = MAB[a].sample()
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
S_pre[a] += r
N_pre[a] += 1
attacks[k] = 0
for t in range(K, T):
# select the arm
ucb = S / N + alpha * np.sqrt(np.log(t + 1) / N)
beta = np.sqrt(np.log(np.pi**2*K*N**2/(3*delta))/(2*N))
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
if a != target_arm:
time_of_attacks[t] = 1
if constant_attack:
attack_t = - 2 * np.maximum(0, MAB[a].mean - MAB[target_arm].mean)
else:
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2 * N))
attack_t = - np.maximum((S_pre / N)[a] - (S_pre / N)[target_arm] + beta[a] + beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
r = MAB[a].sample()
false_r = r + attack_t
# update quantities
rewards[t] = r
draws[t] = a
S[a] += false_r
N[a] += 1
S_pre[a] += r
N_pre[a] += 1
return rewards, draws, attacks, time_of_attacks
def EXP3_P(T, MAB, eta=0.1, gamma=0):
K = len(MAB)
S = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
sum_exp = K
exp_S = np.ones((K,))
arms = np.linspace(0, K-1, K, dtype='int')
for t in range(T):
P = (1 - gamma) * exp_S / sum_exp + gamma / K * np.ones((K,))
if t < K:
action = t
attack_t = 0
else:
# print('Probability distribution:', P)
action = np.random.choice(arms, p=P)
X = 1*MAB[action].sample().squeeze()
S = S + 1
S[action] = S[action] - (1 - X)/P[action]
exp_S = exp_S*np.exp(eta)
exp_S[action] = exp_S[action]*np.exp(-eta *(1 - X)/P[action])
sum_exp = np.sum(exp_S)
rewards[t] = X
draws[t] = action
return rewards, draws
def attacked_EXP3_P(T, MAB, target_arm, eta = None, gamma = None, delta=0.99, constant_attack=False):
K = len(MAB)
estimated_S = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0 * rewards
sum_exp = K
exp_estimated_S = np.ones((K,))
arms = np.linspace(0, K - 1, K, dtype='int')
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,))
beta = np.zeros((K,))
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
if eta is None and gamma is None:
eta = np.sqrt(np.log(K + 1) / (K * T))
gamma = 0
elif eta is None:
eta = np.sqrt(np.log(K + 1) / (K * T))
elif gamma is None:
gamma = 0
for t in range(T):
P = (1 - gamma) * exp_estimated_S / sum_exp + gamma/K*np.ones((K,))
if t < K:
action = t
attack_t = 0
else:
action = np.random.choice(arms, p=P)
if action != target_arm:
time_of_attacks[t] = 1
if constant_attack:
attack_t = - 2*np.maximum(0, MAB[action].mean - MAB[target_arm].mean)
else:
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2 * N))
attack_t = - np.maximum((S / N)[action] - (S / N)[target_arm] + beta[action] + beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
true_X = 1 * MAB[action].sample().squeeze()
X = true_X + attack_t
estimated_S = estimated_S + 1
estimated_S[action] = estimated_S[action] - (1 - X) /P[action]
exp_estimated_S = exp_estimated_S*np.exp(eta)
exp_estimated_S[action] = exp_estimated_S[action] * np.exp(eta * (- (1 - X) /P[action]))
sum_exp = np.sum(exp_estimated_S)
rewards[t] = true_X
draws[t] = action
N[action] += 1
S[action] += true_X
return rewards, draws, attacks, time_of_attacks
def FTRL(T, MAB, eta=10, alg='exp_3'):
K = len(MAB)
S = np.zeros((K,))
losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
arms = np.linspace(0, K-1, K, dtype='int')
for t in trange(T):
x = cp.Variable(K, pos=True)
temp_1 = cp.Constant(value=np.ones((K,)))
temp_2 = cp.Constant(value=losses)
constraints = [cp.sum(cp.multiply(temp_1, x)) == 1]
if alg == 'log_barrier':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 1/eta*cp.sum(cp.log(x)))
elif alg == 'inf':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 2/eta*cp.sum(cp.sqrt(x)))
else:
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) + 1/eta*(cp.sum(cp.kl_div(x, temp_1)) - K))
pb = cp.Problem(obj, constraints)
try:
pb.solve()
P = x.value
except:
P = np.ones((K,))/K
# print('Probability distribution:', P)
if not np.sum(P) == 1:
P = P/np.sum(P)
action = np.random.choice(arms, p=P)
X = 1*MAB[action].sample().squeeze()
S[action] = S[action] + X/P[action]
losses[action] = losses[action] + (-X)/P[action]
rewards[t] = X
draws[t] = action
return rewards, draws
def attacked_FTRL(T, MAB, target_arm, eta=10, alg='exp_3', delta=0.99, constant_attack=False):
K = len(MAB)
true_S = np.zeros((K,))
true_losses = np.zeros((K,))
N = np.zeros((K,))
estimated_losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
arms = np.linspace(0, K-1, K, dtype='int')
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
for t in trange(T):
x = cp.Variable(K, pos=True)
temp_1 = cp.Constant(value=np.ones((K,)))
temp_2 = cp.Constant(value=estimated_losses)
constraints = [cp.sum(cp.multiply(temp_1, x)) == 1]
if alg == 'log_barrier':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 1/eta*cp.sum(cp.log(x)))
elif alg == 'inf':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 2/eta*cp.sum(cp.sqrt(x)))
else:
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) + 1/eta*(cp.sum(cp.kl_div(x, temp_1)) - K))
pb = cp.Problem(obj, constraints)
try:
pb.solve()
P = x.value
except:
P = np.ones((K,))/K
# print("\nThe optimal value is", pb.value)
# print("A solution x is")
# print(x.value)
# print("A dual solution corresponding to the inequality constraints is")
# print(pb.constraints[0].dual_value)
# print('Probability distribution:', P)
if not np.sum(P) == 1:
P = P/np.sum(P)
if t < K:
action = t
attack_t = 0
else:
action = np.random.choice(arms, p=P)
if action != target_arm:
time_of_attacks[t] = 1
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2 * N))
if constant_attack:
attack_t = - 2*np.maximum(0, MAB[action].mean - MAB[target_arm].mean)
else:
attack_t = - np.maximum((true_S / N)[action] - (true_S / N)[target_arm] + beta[action]
+ beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
true_X = 1*MAB[action].sample().squeeze()
X = true_X + attack_t
true_S[action] = true_S[action] + true_X
true_losses[action] = true_losses[action] + (1-true_X)/P[action]
estimated_losses[action] = estimated_losses[action] + (1 - X)/P[action]
N[action] = N[action] + 1
rewards[t] = true_X
draws[t] = action
return rewards, draws, attacks, time_of_attacks
| ContextualBanditsAttacks-main | isoexp/mab/smab_algs.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import math
from scipy.stats import truncnorm
class AbstractArm(object):
def __init__(self, mean, variance, random_state):
"""
Args:
mean: expectation of the arm
variance: variance of the arm
random_state (int): seed to make experiments reproducible
"""
self.mean = mean
self.variance = variance
self.local_random = np.random.RandomState(random_state)
def sample(self):
pass
class ArmTruncNorm():
def __init__(self, original_mean=0, a=-1, b=1, original_std=0.1):
a, b = (a - original_mean) / original_std, (b - original_mean) / original_std
self.a = a
self.b = b
self.true_sigma = original_std
self.true_mean = original_mean
self.mean, self.sigma = truncnorm.stats(a=self.a, b=self.b, loc=self.true_mean, scale=self.true_sigma)
def sample(self):
return truncnorm.rvs(a=self.a, b=self.b, loc=self.true_mean, scale=self.true_sigma)
class ArmBernoulli(AbstractArm):
def __init__(self, p, random_state=0):
"""
Bernoulli arm
Args:
p (float): mean parameter
random_state (int): seed to make experiments reproducible
"""
self.p = p
super(ArmBernoulli, self).__init__(mean=p,
variance=p * (1. - p),
random_state=random_state)
def sample(self):
return self.local_random.rand(1) < self.p
class ArmBeta(AbstractArm):
def __init__(self, a, b, random_state=0):
"""
arm having a Beta distribution
Args:
a (float): first parameter
b (float): second parameter
random_state (int): seed to make experiments reproducible
"""
self.a = a
self.b = b
super(ArmBeta, self).__init__(mean=a / (a + b),
variance=(a * b) / ((a + b) ** 2 * (a + b + 1)),
random_state=random_state)
def sample(self):
return self.local_random.beta(self.a, self.b, 1)
class ArmExp(AbstractArm):
# https://en.wikipedia.org/wiki/Truncated_distribution
# https://en.wikipedia.org/wiki/Exponential_distribution
# http://lagrange.math.siu.edu/Olive/ch4.pdf
def __init__(self, L, B=1., random_state=0):
"""
pdf =
Args:
L (float): parameter of the exponential distribution
B (float): upper bound of the distribution (lower is 0)
random_state (int): seed to make experiments reproducible
"""
assert B > 0.
self.L = L
self.B = B
v_m = (1. - np.exp(-B * L) * (1. + B * L)) / L
super(ArmExp, self).__init__(mean=v_m / (1. - np.exp(-L * B)),
variance=None, # compute it yourself!
random_state=random_state)
def cdf(self, x):
cdf = lambda y: 1. - np.exp(-self.L * y)
truncated_cdf = (cdf(x) - cdf(0)) / (cdf(self.B) - cdf(0))
return truncated_cdf
def inv_cdf(self, q):
assert 0 <= q <= 1.
v = - np.log(1. - (1. - np.exp(- self.L * self.B)) * q) / self.L
return v
def sample(self):
# Inverse transform sampling
# https://en.wikipedia.org/wiki/Inverse_transform_sampling
q = self.local_random.random_sample(1)
x = self.inv_cdf(q=q)
return x
class ArmFinite(AbstractArm):
def __init__(self, X, P, random_state=0):
"""
Arm with finite support
Args:
X: support of the distribution
P: associated probabilities
random_state (int): seed to make experiments reproducible
"""
self.X = X
self.P = P
mean = np.sum(X * P)
super(ArmFinite, self).__init__(mean=mean,
variance=np.sum(X ** 2 * P) - mean ** 2,
random_state=random_state)
def sample(self):
i = self.local_random.choice(len(self.P), size=1, p=self.P)
reward = self.X[i]
return reward
class ArmNormal(AbstractArm):
def __init__(self, mu, sigma, random_state=0):
self.sigma = sigma
super(ArmNormal, self).__init__(mean=mu,
variance=sigma ** 2,
random_state=random_state)
def sample(self):
x = self.local_random.randn() * self.sigma + self.mean
return x
if __name__ == '__main__':
arm = ArmTruncNorm(mean=-1, a=0, b=0.01)
print(arm.sample())
| ContextualBanditsAttacks-main | isoexp/mab/arms.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = '0.0.dev0'
| ContextualBanditsAttacks-main | isoexp/mab/__init__.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.append('/isoexp')
import numpy as np
import isoexp.mab.arms as arms
import pickle
from isoexp.mab.smab_algs import UCB1, EXP3_IX, attacked_UCB1, attacked_EXP3_IX, EXP3_P, attacked_EXP3_P, FTRL, attacked_FTRL
from matplotlib import rc
import json
import datetime
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
MABResults = namedtuple('MABResults', 'regret, cum_rewards, attacks, times_of_attacks')
random_state = np.random.randint(0, 123123)
K = 5
MAB = []
means = np.random.uniform(low=0.25, high=0.75, size=K)
#means = np.array([0.47823152, 0.70243227, 0.64504063, 0.65679234, 0.49546542,
# 0.46417188, 0.64736977, 0.71255566, 0.66844984, 0.26030838])
for k in range(K) :
#MAB.append(arms.ArmBeta(a=8*means[k], b=8*(1-means[k])))
MAB.append(arms.ArmBernoulli(p=means[k]))
nb_arms = len(MAB)
print('means: {}'.format(means))
mu_max = np.max(means)
a_star = np.argmin(means)
T = 1*10**4# horizon
nb_simu = 10
eta = np.sqrt(2*np.log(K + 1)/(K*T))
# eta = 0.01
gamma = eta/2
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
}
algorithms = {
#'EXP3': lambda T, MAB: FTRL(T, MAB, eta=eta, alg='epx_3'),
'INF': lambda T, MAB: FTRL(T, MAB, eta=eta, alg='inf'),
'Attacked INF': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta, alg='inf'),
# 'FTRL log barrier' : lambda T, MAB: FTRL(T, MAB, eta=eta, alg='log_barrier'),
# 'Attacked FTRL log barrier': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta, alg='log_barrier'),
'UCB': lambda T, MAB: UCB1(T, MAB, alpha=1),
'Attacked UCB': lambda T, MAB: attacked_UCB1(T, MAB, target_arm = a_star, alpha=1., delta=0.99),
# 'EXP3-IX': lambda T, MAB: EXP3_IX(T, MAB, eta=eta, gamma=gamma),
# 'Attacked EXP3-IX': lambda T, MAB: attacked_EXP3_IX(T, MAB, target_arm=a_star),
'EXP3': lambda T, MAB: EXP3_P(T, MAB, eta=np.sqrt(np.log(K)/(T*K))),
'Attacked EXP3': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star),
# 'EXP3.P Gamma 0.1': lambda T, MAB: EXP3_P(T, MAB, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T))),
# 'Attacked EXP3.P Gamma 0.1': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T)))
}
results = []
full_algo = algorithms
for alg_name in full_algo.keys():
alg = full_algo[alg_name]
regret = np.zeros((nb_simu, T))
rwds = 0*regret
times = 0*regret
attacks = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
try:
rewards, draws = alg(T, MAB)
except ValueError:
rewards, draws, att, times_of_att = alg(T, MAB)
attacks[k] = np.cumsum(att)
times[k] = times_of_att
rwds[k] = np.cumsum(means[draws.astype('int')])
regret[k] = max(means) * np.arange(1, T + 1) - rwds[k]
results += [(alg_name, MABResults(regret=regret, cum_rewards=rwds, attacks=attacks, times_of_attacks=times))]
id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
with open("{}_{}_MAB_illustration.pickle".format(id, "SEQ"), "wb") as f:
pickle.dump(results, f)
with open("{}_{}_MAB_illustration_settings.json".format(id, "SEQ"), "w+") as f:
json.dump(settings, f)
t = np.arange(0, T)
for alg_name, val in results:
mean_regret = np.mean(val.regret, axis=0)
low_quantile_regret = np.quantile(val.regret, 0.25, axis=0)
high_quantile_regret = np.quantile(val.regret, 0.75, axis=0)
rwds = np.mean(val.cum_rewards, axis=0)
low_quantile_rwds = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_rwds = np.quantile(val.cum_rewards, 0.75, axis=0)
plt.figure(1)
plt.title('Rewards')
plt.plot(rwds, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_rwds, high_quantile_rwds, alpha=0.15)
plt.figure(2)
plt.title('Regret')
plt.plot(mean_regret, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_regret, high_quantile_regret, alpha=0.15)
if 'Attacked' in alg_name:
plt.figure(3)
cum_sum_attacks = np.mean(np.abs(val.attacks), axis=0)
low_quantile_attacks = np.quantile(np.abs(val.attacks), 0.25, axis=0)
high_quantile_attacks = np.quantile(np.abs(val.attacks), 0.75, axis=0)
plt.title('Cumulative sum of attacks')
plt.plot(cum_sum_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_attacks, high_quantile_attacks, alpha=0.15)
# plt.figure(2)
# rep = np.random.randint(low=0, high=nb_simu)
# times_to_consider = val.times_of_attacks[rep]
# plt.scatter(t[times_to_consider == 1], val.regret[rep, times_to_consider == 1])
plt.figure(4)
plt.title('Number of attacks')
number_of_attacks = np.mean(np.cumsum(val.times_of_attacks, axis=1), axis=0)
high_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.75, axis=0)
low_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.25, axis=0)
plt.plot(number_of_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.show()
#import tikzplotlib
#tikzplotlib.save("lcb_worst.tex")
| ContextualBanditsAttacks-main | isoexp/mab/main_mab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 23:30:58 2019
@author: evrardgarcelon
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20190822_065452_Bernoulli_PAR_martingale_results.pickle'
filename = '20190901_124136_linear_PAR_linear_results.pickle'
filename = '20190902_135139_linear_PAR_linear_results.pickle'
filename = '20190903_233609_linear_PAR_jester_res§ults.pickle'
filename = '20190903_235606_linear_PAR_jester_results.pickle'
filename = '20190904_010618_linear_PAR_jester_results.pickle'
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
EVERY = 20
LW = 2
print("Generating all figures ...")
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
<<<<<<< HEAD
=======
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
EVERY = 200
LW = 2
>>>>>>> e314f8f8accdff7717898e2745f92c6c0e230275
print("Generating all figures ...")
bad_model = None
min_val = np.inf
total_experiments = {}
avg_area = {}
avg_margin = {}
for m, model in enumerate(results):
cucb_M, cucb_H = 0, 0
plt.figure()
ymax = -np.inf
T = None
for alg_name, val in model[1]:
if alg_name not in total_experiments.keys():
total_experiments[alg_name] = []
avg_area[alg_name] = []
avg_margin[alg_name] = []
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
regret = np.cumsum(val['regret'], axis=1)
total_experiments[alg_name] += regret.tolist()
margin = val['cum_rewards'].cumsum(axis=1)
area = np.sum(margin * (margin < 0), axis=1).mean()
print('min_margin(', alg_name, ')=', margin.min())
print('area(', alg_name, ')=', area)
print()
avg_area[alg_name] += [area]
avg_margin[alg_name] += margin.tolist()
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}.png".format(m)))
tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
plt.close()
print("Done.\n")
avg_regret_name = os.path.join(folder, "avg_regret.png")
print("Saving average regret to %s..." % avg_regret_name)
ymax = -np.inf
TOSAVE = {}
for alg_name in total_experiments.keys():
regret = np.array(total_experiments[alg_name])
rep, T = regret.shape
t = np.arange(1, T + 1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY],
mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
M = np.concatenate((t.reshape(-1, 1), mean_regret.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
np.savez_compressed(os.path.join(folder, "avg_regret"), **TOSAVE)
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
plt.savefig(avg_regret_name)
plt.close()
print("Done.\n")
for alg_name in avg_area.keys():
print("AverageAREA({}): {}".format(alg_name, np.mean(avg_area[alg_name])))
avg_margin_name = os.path.join(folder, "avg_margin.png")
print("Saving average margin to %s..." % avg_margin_name)
plt.figure(figsize=(10,10))
ymax = -np.inf
maxt = -np.inf
TOSAVE = {}
for alg_name in avg_margin.keys():
margin = np.array(avg_margin[alg_name])
rep, T = margin.shape
t = np.arange(1, T + 1)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
idxs = mean_margin < 10
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymax = max(ymax, mean_margin[-1] + 2 * std[-1])
maxt = max(maxt, np.max(t[idxs]))
M = np.concatenate((t.reshape(-1,1), mean_margin.reshape(-1,1), std.reshape(-1,1)), axis=1)
TOSAVE[alg_name] = M
plt.xlim([0, maxt])
# plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
plt.savefig(avg_margin_name)
plt.close()
np.savez_compressed(os.path.join(folder, "avg_margin"), **TOSAVE)
print("Done.\n")
# worst_name = os.path.join(folder, "worst_linear_exp.tex")
# print("Saving worst model to %s..." % worst_name)
# tikzplotlib.save(worst_name)
# print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
# # select "bad" model
# plt.figure(1,figsize=(10, 10))
# plt.figure(1)
# plt.clf()
# plt.title('Regret')
# ymax = -np.inf
# T = None
# for alg_name, res in results:
# val = res[0][1]
# print(alg_name)
# rep, T = val['cum_rewards'].shape
# t = np.arange(1, T + 1)
# regret = np.cumsum(val['regret'], axis=1)
# mean_regret = np.mean(regret, axis=0)
# std = np.std(regret, axis=0) / np.sqrt(rep)
# plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
# ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
#
# plt.xlim([0, T])
# plt.ylim([0, ymax])
# plt.legend()
# plt.savefig(os.path.join(folder, "real_data.png"))
# print("Done.\n")
#
# worst_name = os.path.join(folder, "real_data.tex")
# print("Saving worst model to %s..." % worst_name)
# tikzplotlib.save(worst_name)
#
# plt.figure(2,figsize=(10, 10))
# plt.figure(2)
# plt.clf()
# plt.title('Margin')
# ymax = -np.inf
# max_time = 1000
# T = None
# for alg_name, res in results:
# val = res[0][1]
# rep, T = val['cum_rewards'].shape
# t = np.arange(1, T + 1)
# margin = val['cum_rewards'].cumsum(axis = 1)
# print(alg_name, '=', margin.min())
# mean_margin = np.mean(margin, axis=0)
# std = np.std(margin, axis=0) / np.sqrt(rep)
# plt.plot(t[:max_time:EVERY], mean_margin[:max_time:EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[:max_time:EVERY], mean_margin[:max_time:EVERY] - 2 * std[:max_time:EVERY], mean_margin[:max_time:EVERY] + 2 * std[:max_time:EVERY],
# alpha=0.15)
# #ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
#
# plt.xlim([0, max_time])
# #plt.ylim([0, ymax])
# plt.legend()
# plt.savefig(os.path.join(folder, "real_data_margin.png"))
# print("Done.\n")
#
# worst_name = os.path.join(folder, "real_data.tex")
# print("Saving worst model to %s..." % worst_name)
# tikzplotlib.save(worst_name)
# print("Done.\n")
#
# archive_name = "{}.tar.gz".format(folder)
# print("Compressing files to %s..." % archive_name)
# tardir(folder, archive_name)
# print("Done.\n")
#
# plt.show()
| ContextualBanditsAttacks-main | examples/parse_real_data_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from isoexp.monenvs import Env1
from sklearn.isotonic import IsotonicRegression
import matplotlib.pyplot as plt
import math
np.random.seed(521524)
from sklearn.utils import check_random_state
from isoexp._samplers import isotonic_data_bands
def paper_f(X, sigma):
v = 20 * X / 0.4 - (10 + 0.3 * 20 / 0.4)
v[X <= 0.3] = -10
v[X > 0.7] = 10
return v + sigma * np.random.randn(len(X))
env = Env1()
N = 450
sigma = 0.5
X = np.random.rand(N)
X = np.sort(X)
Y = env.f[0](X) + sigma * np.random.randn(N)
Y = paper_f(X, sigma=sigma)
# Y = (10*X)**3/ 3.4 + sigma * np.random.randn(N)
# X = np.random.rand(N)
# X = np.sort(X)
# rs = check_random_state(312312)
# Y = rs.randint(-10, 10, size=(N,)) + 10. * np.log1p(np.arange(N))
L = 20/0.4
# L = 3 * 100
plt.plot(X, Y, 'o', label="Y")
idx_vector = np.arange(N)
ir = IsotonicRegression()
ir = ir.fit(X, Y)
Y_iso = ir.transform(X)
plt.plot(X, Y_iso, '-d', label="iso(Y)")
plt.legend()
T = np.linspace(0.001, 0.999, 50)
f = ir.predict(T)
f[T < X[0]] = Y_iso[0]
f[T > X[-1]] = Y_iso[-1]
delta = 0.1
# for idx in range(len(T)):
# X_new = T[idx]
# if X_new < X[0]:
# lb = -L * np.abs(X_new - X[0]) - np.sqrt(2*np.log((N**2 + N)/delta))
# lbm = 1
# m = 1
# ub = np.inf
# while m <= N:
# val = np.mean(Y_iso[0:m]) - f[idx] + np.sqrt(2*np.log((N**2 + N)/delta) / m)
# if val < ub:
# ub = val
# ubm = m
# m += 1
# elif X_new > X[-1]:
# ub = L * np.abs(X_new - X[-1]) + np.sqrt(np.log(2*(N**2 + N)/delta))
# ubm = 1
# m = 1
# while m <= N:
# val = np.mean(Y_iso[N-m:N]) - f[idx] - np.sqrt(np.log(2*(N**2 + N)/delta) / m)
# if val > lb:
# lb = val
# lbm = m
# m += 1
# else:
#
# k = np.max(idx_vector[(X_new > X)]) + 1
# assert k == (np.sum(X_new > X)), "{},{}".format(k, (np.sum(X_new > X)))
# m = 1
# mtop = max(k, N - k)
# ub = np.inf
# lb = -np.inf
# while m <= mtop:
# if m <= k:
# val = np.mean(Y_iso[k - m:k + 1]) - f[idx] - np.sqrt(np.log(2*(N**2 + N)/delta) / m)
# if val > lb:
# lb = val
# lbm = m
#
# if m <= N - k:
# val = np.mean(Y_iso[k:k + m]) - f[idx] + np.sqrt(np.log(2*(N**2 + N)/delta) / m)
#
# if val < ub:
# ub = val
# ubm = m
# m += 1
#
# print(X_new, lbm, lb, ub, ubm)
# plt.plot(X_new, f[idx] + ub, 'r+')
# plt.plot(X_new, f[idx] + lb, 'g*')
# idx = N - 7
# print("N: {}".format(N))
# print(T[idx], f[idx])
# lb, ub = isotonic_data_bands(X, Y, T[idx], f[idx], L, sigma, delta)
# print()
# print(lb, ub)
# exit(312)
LUCB = np.zeros((len(T), 4))
plt.figure()
plt.plot(T, f, ':+', label="iso(t)")
plt.plot(X, Y_iso, 'o', label="iso(Y)")
for idx in range(len(T)):
X_new = T[idx]
y_new = f[idx]
lb, ub = isotonic_data_bands(X, Y_iso, X_new, y_new, L, sigma, delta)
LUCB[idx, 0:2] = [lb, ub]
# plt.plot(X_new, y_new + ub, 'r+', label="ub-iso(Y)")
# plt.plot(X_new, y_new + lb, 'g*', label="lb-iso(Y)")
# plt.figure()
# plt.plot(T, f, '+')
for idx in range(len(T)):
X_new = T[idx]
y_new = f[idx]
lb, ub = isotonic_data_bands(X, Y, X_new, y_new, L, sigma, delta)
LUCB[idx, 2:4] = [lb, ub]
# plt.plot(X_new, y_new + ub, 'r*', label="ub-Y")
# plt.plot(X_new, y_new + lb, 'g-', label="lb-Y")
print(LUCB)
plt.plot(T, f + LUCB[:,0], 'g*', label="lb-iso(Y)")
plt.plot(T, f + LUCB[:,1], 'r+', label="ub-iso(Y)")
plt.plot(T, f + LUCB[:,2], 'b4', label="lb-Y")
plt.plot(T, f + LUCB[:,3], 'ko', label="ub-Y", fillstyle='none')
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/show_confidence.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 14:23:15 2019
@author: evrardgarcelon
"""
import numpy as np
import pylab as plt
x = np.linspace(0,100)
y = np.linspace(0,50)
c_1 = 1
a = np.array([1,-1])
f = lambda c,d : c_1*np.sqrt(c+d) + a[0]*c + a[1]*d
X,Y = np.meshgrid(x,y)
res = f(X,Y)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x,y,res)
plt.show() | ContextualBanditsAttacks-main | examples/untitled0.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import json
import logging
import os
import pickle
from collections import namedtuple
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
import sys
from math import sqrt
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
from isoexp.contextual.contextual_linucb import ContextualLinearTS
from isoexp.contextual.contextual_models import DatasetModel, RandomContextualLinearArms
from isoexp.mab import contextual_arms
from isoexp.mab.contextual_mab_algs import contextEpsGREEDY, ContextualLinearBandit
import quadprog
from scipy.linalg import sqrtm
from scipy.optimize import minimize, linprog
import cvxpy as cp
from scipy import stats
from math import log
"""
TEST Linear Bandit
"""
logging_period = 1000
def compute_regret(theta, context, a_t):
D = np.dot(theta, context)
return np.max(D) - D[a_t]
def work(nb_arms, noise, n_features, T, random_state, attack_frequency, alg_name, weak_attack=False,
adversarial_xi=0.00001, method=None, sparse_attacks=False, simulator=None, target_arm=None, x_star=None, delta=0.99, reg_factor=0.1):
# create model
print(
f"adversarial {attack_frequency}, xi {adversarial_xi}, weak_attack {weak_attack} method {method}")
local_random = np.random.RandomState(random_state)
if simulator is None:
raise ValueError('No simulator')
# # real_theta = np.random.randn(nb_arms, n_features)
# # real_theta = np.random.uniform(low=1 / 2, high=3) * real_theta / np.linalg.norm(real_theta)
#
# # simulator = contextual_arms.ContextualLinearMABModel(theta=real_theta, noise=noise, random_state=local_random)
# simulator = RandomContextualLinearArms(n_actions=nb_arms, n_features=n_features, noise=noise, bound_context=1)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.thetas, x_star)
#
# target_arm = np.argmin(means_x_star)
# # print('the simulator is {}'.format(simulator))
simulator.local_random = local_random
all_rewards = np.dot(simulator.context_lists, simulator.thetas.T)
regret = []
rewards = []
norms = []
attacks = []
relative_attack_norm = []
contexts_norms = []
successful_attack = []
failed_attack = []
iteration = []
cumulative_regret = []
ratio_successful_attacks = []
sum_attacks_norms = []
nb_attacks_list = []
inv_design_worst_ratio = []
alg_names = []
biases = []
target_arm_chosen_list = []
target_arm_chosen_count = 0
x_star_appeared =0
a_star_in_x_star=0
a_star_in_x_star_list=[]
x_star_appeared_list =[]
TS_attacker=None
if alg_name == 'eps_greedy':
alg = contextEpsGREEDY(number_arms=simulator.n_actions, dimension=simulator.n_features, decrease_epsilon=True)
elif alg_name == 'LinUCB':
alg = ContextualLinearBandit(nb_arms=simulator.n_actions, dimension=simulator.n_features,
reg_factor=reg_factor, delta=delta,
bound_features=np.max(np.linalg.norm(simulator.thetas, axis=1)),
noise_variance=noise, bound_context=simulator.bound_context)
elif alg_name == 'LinTS':
alg = ContextualLinearTS(nb_arms=simulator.n_actions, dimension=simulator.n_features,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise/5)
TS_attacker = TS_relaxed_attacks_calculator(simulator, alg, T)
else:
raise ValueError(f'Unknown alg_name {alg_name}')
cumulative_regret_t = 0
n_successful_attacks = 0
n_failed_attacks = 0
attack_sums = 0
nb_attacks = 0
for t in tqdm(range(T)):
context = simulator.get_context()
context_norm = norm(context)
if attack_frequency == 'target_arm':
is_attacked = is_equal(context, x_star)
else:
attack_proba = 1 / sqrt(t + 1) if attack_frequency == 'decrease_sqrt' else attack_frequency
is_attacked = local_random.rand() < attack_proba
if is_attacked:
predicted_best_arm = alg.get_action(context, deterministic=True)
if sparse_attacks:
# true_best_arm = np.argmax(simulator.theta.dot(context))
if predicted_best_arm == target_arm:
# print("no need to attack")
n_successful_attacks += 1
attack = 0
attack_norm = 0
else:
n_failed_attacks += 1
attack = compute_long_term_attack(simulator, predicted_best_arm, context, target_arm, all_rewards, factor=sparse_attacks)
attack_norm = norm(attack)
else:
estimated_rewards = alg.thetas.dot(context)
if weak_attack:
attack, attack_norm, attack_succeeded = compute_weak_attack(adversarial_xi, alg, predicted_best_arm,
context,
estimated_rewards, nb_arms)
else:
attack, attack_norm, attack_succeeded = compute_strong_attack(adversarial_xi, alg,
context,
estimated_rewards, method,
simulator.n_features,
simulator=simulator, target_arm=target_arm, x_star=x_star, attacker=TS_attacker)
if attack_norm == float('inf'):
attack = 0
attack_norm = 0
else:
attack_norm = 0
attack = 0
if attack_norm > 0:
nb_attacks += 1
if attack_norm < float('inf'):
attack_sums += attack_norm
attacked_context = context + attack
a_t = alg.get_action(attacked_context)
if is_attacked and not sparse_attacks:
if attack_succeeded:
assert t <= nb_arms or 0 < attack_norm < float(
'inf'), 'The attack is seen as successful but is zero or of infinite norm, the attack was {}'.format(
attack)
n_successful_attacks += 1
else:
n_failed_attacks += 1
r_t = simulator.reward(context, a_t)
regret_t = compute_regret(simulator.theta, context, a_t)
alg.update(attacked_context, a_t, r_t)
if is_equal(context, x_star):
x_star_appeared += 1
if a_t == target_arm:
a_star_in_x_star+=1
cumulative_regret_t += regret_t
if a_t == target_arm:
target_arm_chosen_count +=1
if t % logging_period == 0:
bias = (r_t - alg.thetas[a_t].dot(attacked_context)) / r_t
norm_error = np.linalg.norm(alg.thetas - simulator.theta, 2)
# logging
worst_ratio = None
for inv_a in alg.inv_design_matrices:
for i, col in enumerate(inv_a):
ratio = abs(max(col) / col[i])
if worst_ratio is None or worst_ratio < ratio:
worst_ratio = ratio
inv_design_worst_ratio.append(worst_ratio)
regret.append(
regret_t) # simulator.best_expected_reward(context) - simulator.expected_reward(action=a_t, context=context)
norms.append(norm_error)
rewards.append(r_t)
attacks.append(attack_norm)
iteration.append(t)
relative_attack_norm.append(norm(attacked_context) / context_norm)
contexts_norms.append(context_norm)
cumulative_regret.append(cumulative_regret_t)
ratio_successful_attacks.append(n_successful_attacks / (
n_failed_attacks + n_successful_attacks) if n_failed_attacks + n_successful_attacks else 0)
successful_attack.append(n_successful_attacks)
failed_attack.append(n_failed_attacks)
sum_attacks_norms.append(attack_sums)
nb_attacks_list.append(nb_attacks)
alg_names.append(alg_name)
biases.append(bias)
x_star_appeared_list.append(x_star_appeared)
target_arm_chosen_list.append(target_arm_chosen_count)
a_star_in_x_star_list.append(a_star_in_x_star)
logging.info(f"Iteration {t}, regret {regret_t}, reward{r_t}, norm error {norm_error}")
return {'iteration': iteration, "regret": regret, 'cumulative_regret': cumulative_regret, "rewards": rewards,
"norm_errors": norms, "attacks": attacks, 'target_arm_chosen': target_arm_chosen_list,
"relative_attack_norm": relative_attack_norm, 'contexts_norms': contexts_norms,
'successful_attack': ratio_successful_attacks, 'xi': adversarial_xi, 'biases': biases,
'attack_frequency': attack_frequency, 'sum_attacks_norms': sum_attacks_norms, 'weak_attack': weak_attack,
'x_star_appearances':x_star_appeared_list, 'a_star_in_x_star': a_star_in_x_star_list,
'method': method, 'sparse_attacks': sparse_attacks, "nb_attacks": nb_attacks_list,
'n_successful_attack': successful_attack, 'n_failed_attack': failed_attack,
'design_mat_worse_ratio': inv_design_worst_ratio, 'alg_names': alg_names}, simulator
def is_equal(context, x_star):
if x_star is None:
return False
return norm(context - x_star) < 1e-8
def compute_short_attack_linUCB(dimension, alg, a_star, x_star, slack=10 ** -5, relaxed=False):
func = lambda delta: np.linalg.norm(delta)/2
theta_a_star = alg.thetas[a_star]
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
betas = alg.alpha()
constraints_list = []
for a in range(len(alg.thetas)):
if a != a_star:
theta = alg.thetas[a]
P = sqrtm(alg.inv_design_matrices[a])
if not(relaxed):
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta)))
- beta_a_star * np.linalg.norm(P_a_star.dot((x_star + delta))) + slack)
else:
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta))) + slack)
temp_cons = {'type': 'ineq', 'fun': temp_constraint}
constraints_list.append(temp_cons)
cons = tuple(constraints_list)
res = minimize(func, -x_star, method='SLSQP', constraints=cons)
# print(res.message)
try:
epsilon = res.x
except KeyboardInterrupt:
raise
except:
epsilon = np.zeros((dimension,))
# print('Epsilon =', epsilon)
# for a in range(len(constraints_list)):
# theta = alg.thetas[a]
# P = sqrtm(alg.inv_design_matrices[a])
# print('Constraints for arm {}'.format(a), constraints_list[a]['fun'](x_star + epsilon))
if epsilon is None:
return np.zeros((dimension,)), 0, False
return epsilon, norm(epsilon), norm(epsilon) > 0
def compute_relaxed_attack(dimension, alg, a_star, x_star, slack=10 ** -5):
delta = cp.Variable(dimension)
obj = cp.Minimize(cp.quad_form(delta, np.eye(dimension))/2)
theta_a_star = alg.thetas[a_star]
betas = alg.alpha()
constraints = []
for a in range(len(alg.thetas)):
if a != a_star:
theta = alg.thetas[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = (theta - theta_a_star) @ (x_star + delta) + betas[a] * cp.norm2(P @ (x_star + delta))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
out = prob.solve(solver='SCS', max_iters=10000,)
epsilon = delta.value
except KeyboardInterrupt:
raise
except:
print('Exception')
epsilon = np.zeros((dimension,))
if epsilon is None:
return np.zeros((dimension,)), 0, False
# if norm(epsilon > 0):
# margin = (theta - theta_a_star) @ (x_star + epsilon) + betas[a] * np.linalg.norm(np.dot(np.array(sqrtm(alg.inv_design_matrices[a])) ,(x_star + epsilon))) #np.sqrt(np.dot(x_star + epsilon, alg.inv_design_matrices[a] @ (x_star + epsilon)))
# # print(f'the margin is {margin}')
# if margin > 0 :
# print('the margin was negative, {}, norm eps {}, norm x {}'.format(out, norm(epsilon), norm(x_star)))
return epsilon, norm(epsilon), norm(epsilon) > 0
class TS_relaxed_attacks_calculator:
def __init__(self, simulator, alg, T):
delta_zero = 0.95
sigma = alg.sigma
nu = sigma * 3 * sqrt(simulator.n_features * log(T/delta_zero))
self.cste = nu * stats.norm.ppf(1 - delta_zero / (simulator.thetas.shape[0] -1))
def compute_relaxed_attack(self, dimension, alg, a_star, x_star, slack=10 ** -5):
delta = cp.Variable(dimension)
obj = cp.Minimize(cp.quad_form(delta, np.eye(dimension))/2)
theta_a_star = alg.thetas[a_star]
constraints = []
for a in range(len(alg.thetas)):
if a != a_star:
theta = alg.thetas[a]
P = sqrtm(alg.inv_design_matrices[a] + alg.inv_design_matrices[a_star])
temp_constraint = (theta - theta_a_star) @ (x_star + delta) + self.cste * cp.norm(P @ (x_star + delta))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
prob.solve()#(feastol=1e-11, feastol_inacc=1e-11)
epsilon = delta.value
except KeyboardInterrupt:
raise
except:
print('Exception')
epsilon = np.zeros((dimension,))
# print('epsilon =', epsilon)
if epsilon is None:
return np.zeros((dimension,)), 0, False
return epsilon, norm(epsilon), norm(epsilon) > 0
def compute_strong_attack(adversarial_xi, alg, context, estimated_rewards, method, n_features, simulator, target_arm, x_star, attacker=None):
# worst_arm = np.argmin(estimated_rewards)
if method == 'linUCB_Relaxed':
alg.get_action(context)
attack, attack_norm, attack_succeeded = compute_relaxed_attack(simulator.n_features, alg, target_arm, context, slack=10 ** -9)
# attack, attack_norm, attack_succeeded = compute_short_attack_linUCB(simulator.n_features, alg, target_arm, x_star, slack=10 ** -10 , relaxed=False)
# if attack_succeeded:
# print(f'attack succeeded {attack_norm}')
# new_chosen = alg.get_action(context + attack, deterministic=True)
# if new_chosen != target_arm:
# new_context = context + attack
# print(f'algorithm chose arm {new_chosen} instead of {target_arm}')
# print(
# f'the scores were {alg.thetas[target_arm].dot(new_context) + alg.alpha()[target_arm] * np.sqrt(np.dot(new_context, np.dot(alg.inv_design_matrices[target_arm], new_context)))} vs {alg.thetas[new_chosen].dot(new_context) + alg.alpha()[new_chosen] * np.sqrt(np.dot(new_context, np.dot(alg.inv_design_matrices[new_chosen], new_context)))}, {norm(context+attack)}')
# print(
# f'with just attack the scores were {alg.thetas[target_arm].dot(attack)} vs {alg.thetas[new_chosen].dot(attack)}')
# # raise ValueError('Wrong chosen arm')
return attack, attack_norm, attack_succeeded
elif method == 'linUCB_full':
return compute_short_attack_linUCB(simulator.n_features, alg, target_arm, context, slack=10 ** -3, relaxed=False)
elif method == 'TS_Relaxed':
assert(attacker is not None), "Should pass an attacker to attack LinTS"
return attacker.compute_relaxed_attack(simulator.n_features, alg, target_arm, context, slack=10 ** -10)
elif method == 'quadprog':
try:
attack = quadprog_solve_qp(n_features, thetas=alg.thetas, arm_to_select=target_arm, context=context,
adversarial_xi=10**-5)
attack_norm = norm(attack)
attack_succeeded = True
except ValueError:
attack = 0
attack_norm = float('inf')
attack_succeeded = False
return attack, attack_norm, attack_succeeded
# elif method == 'heuristic':
# best_arm = np.argmax(estimated_rewards)
# attack, attack_norm = compute_attack_for_arm(worst_arm, best_arm, alg.thetas, estimated_rewards,
# adversarial_xi)
# attack_succeeded = alg.estimated_best_arm(context + attack) == worst_arm
else:
assert False, f'Unkown method for targeted attacks: {method}'
return attack, attack_norm, attack_succeeded
def compute_long_term_attack(simulator, action, context, a_star, all_rewards, slack=10 ** -3, factor=2):
if action != a_star: # and np.linalg.norm(context - x_star) < 10**-5:
# worst_ratio1 = (all_rewards[:, action] / all_rewards[:, a_star]).max()
worst_ratio = 1 / all_rewards[:, a_star].min()
# print(worst_ratio1, worst_ratio)
# assert(worst_ratio1 <= worst_ratio), 'there is a reward that is greater than 1'
delta = factor * worst_ratio
# print(f'delta: {delta}')
delta = max(delta, 1)
# delta = np.maximum(2*np.dot(model.thetas[action], x_star)/np.dot(model.thetas[a_star], x_star), 1)
epsilon = (delta - 1) * context
return epsilon
else:
return np.zeros((simulator.n_features,))
def compute_weak_attack(adversarial_xi, alg, best_arm, context, estimated_rewards, nb_arms):
possible_attacks = [compute_attack_for_arm(arm, best_arm, alg.thetas, estimated_rewards, adversarial_xi) for arm
in range(nb_arms) if arm != best_arm]
attack, attack_norm = possible_attacks[np.argmin([att[1] for att in possible_attacks])]
attack_succeeded = alg.estimated_best_arm(context + attack) != alg.estimated_best_arm(context)
return attack, attack_norm, attack_succeeded
def norm(vector):
return np.linalg.norm(vector, 2)
def compute_attack_for_arm(chosen_arm, best_arm, thetas, estimated_rewards, adversarial_xi):
attack_direction = thetas[chosen_arm] - thetas[best_arm]
norm = attack_direction.dot(attack_direction)
if norm == 0:
return 0, float('inf')
attack_norm = (estimated_rewards[best_arm] - estimated_rewards[chosen_arm] + adversarial_xi) / norm
attack = attack_norm * attack_direction
return attack, attack_norm
def generate_context(n_features, low=-3, high=3):
context = np.random.randn(n_features)
context = np.random.uniform(low=low, high=high) * context / np.linalg.norm(context)
return context
def quadprog_solve_qp(n_features, thetas, arm_to_select, context, adversarial_xi):
qp_G = np.identity(n_features) # make sure P is symmetric
qp_a = np.zeros_like(context)
# no equality constraint
constraints_lines = np.delete(thetas - thetas[arm_to_select], arm_to_select, axis=0)
qp_C = - constraints_lines.T
qp_b = constraints_lines.dot(context) + adversarial_xi
meq = 0
return quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0]
# if __name__ == '__main__':
#
# PARALLEL = True
# print("PARALLEL: {}".format(PARALLEL))
#
# MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
#
# random_state = np.random.randint(0, 123123)
# np.random.seed(random_state)
# local_random = np.random.RandomState(random_state)
#
# print("seed: {}".format(random_state))
#
# K = 10
# n_features = 30
# a_noise = 0.1
#
# T = 5 * 10 ** 6 # horizon
# nb_simu = 15 # 48 * 5 #240
# adversarial_xi = 0.0001
#
# attack_frequencies = [1.0, 0.0] # [1.0, 'decrease_sqrt', 0]
# algo_names = ['LinUCB', 'eps-greedy', 'LinTS']
# weak_attacks_list = [False] # [False, True] #
# methods_to_test = [None] # ['quadprog', 'heuristic']
# sparse_factors = [2.0]
# results = []
#
# sparse_attacks = None
# movielens = True
# jester = False
# dataset_model = movielens or jester
# assert(not(movielens and jester)), "cannot use both movielens and jester"
# if dataset_model:
# if movielens:
# simulator = DatasetModel(os.path.abspath('examples/movielens/Vt_movielens.csv'), user_csvfile=os.path.abspath("examples/movielens/U.csv"), arms_limit=100)
# elif jester:
# simulator = DatasetModel(os.path.abspath("examples/jester/Vt_jester.csv"), user_csvfile=os.path.abspath('examples/jester/U.csv'))
# else:
# print('Issue, should use a dataset that isn\'t jester or movielens')
# exit(0)
# # target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# # x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.context_lists, simulator.thetas.T).mean(axis=0)
# target_arm = np.argmin(means_x_star)
# else:
# simulator = None
# target_arm = None
#
# settings = {
# "T": T,
# "nb_simu": nb_simu,
# "random_state": random_state,
# "K": simulator.n_actions if simulator else K,
# "dimension": simulator.n_features if simulator else n_features,
# 'attack_frequencies': attack_frequencies,
# 'weak_attacks': weak_attacks_list,
# 'methods_to_test': methods_to_test,
# 'adversarial_xi': adversarial_xi,
# 'sparse_factors': sparse_factors,
# 'target_arm': target_arm,
# }
# weak_attack=False
# method=None
# if PARALLEL:
# import multiprocessing
# work_to_be_done = []
# # for attack_frequency in attack_frequencies:
# # sparse_factors_to_test = sparse_factors if attack_frequency != 0 else [False]
# # for sparse_attacks in sparse_factors_to_test:
# # method = None
# # weak_attack = False
# # for weak_attack in weak_attacks_list if attack_frequency else [True]:
# # methods_to_test_list = methods_to_test if not weak_attack and attack_frequency != 0 else [
# # 'quadprog']
# # for method in methods_to_test_list:
# # for xi in adversarial_xi:
#
# for alg_name in algo_names:
# for sim_index in range(nb_simu):
# work_to_be_done.append(
# (attack_frequency, sparse_attacks, weak_attack, method, adversarial_xi, sim_index, alg_name))
#
# for sim_index in range(nb_simu):
# # work_to_be_done.append((0.2, 10, False, 'quadprog', xi, sim_index))
# work_to_be_done.append((0.2, 10, False, 'quadprog', adversarial_xi[0], sim_index))
# settings['work_list'] = work_to_be_done
# num_cores = multiprocessing.cpu_count()
# results.append(Parallel(n_jobs=num_cores, verbose=1)(
# delayed(work)(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + sim_index,
# attack_frequency=attack_frequency,alg_name=alg_name,
# weak_attack=weak_attack, adversarial_xi=xi, method=method,
# sparse_attacks=sparse_attacks, simulator=simulator, target_arm=target_arm) for
# attack_frequency, sparse_attacks, weak_attack, method, xi, sim_index, alg_name in work_to_be_done))
# else:
# # for decrease_epsilon in [True, False]:
# for attack_frequency in [0]: # [1.0,0.1, 0]:
# weak_attack = False
# for k in tqdm(range(nb_simu)):
# ret = work(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + k,
# attack_frequency=attack_frequency,
# weak_attack=weak_attack)
# results.append(ret)
#
#
# id = '{}_{:%Y%m%d_%H%M%S}_{}'.format('jester' if jester else 'movilens' if movielens else 'simulation', datetime.datetime.now(), '_Contextual_linear')
# pickle_name = "{}_{}_linear_results.pickle".format(id, "PAR" if PARALLEL else "SEQ")
# print(pickle_name)
# with open(pickle_name, "wb") as f:
# pickle.dump(results, f)
# # with open("{}_{}_linear_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
# # json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/linear_contextual_bandit.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from isoexp.linear.linearbandit import EfficientLinearBandit, LinearBandit, LinPHE
from isoexp.conservative.linearmabs import EfficientConservativeLinearBandit, SafetySetCLUCB
from isoexp.linear.linearmab_models import RandomLinearArms, DiffLinearArms, OtherArms, CircleBaseline, LinPHEModel
from matplotlib import rc
from joblib import Parallel, delayed
from isoexp.linear.coldstart import ColdStartFromDatasetModel
import os
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
MABResults = namedtuple('MABResults', 'regret,norm_error, cum_rewards')
random_state = np.random.randint(0, 123123)
NOISE = 0.1
#model = RandomLinearArms(n_actions=300, n_features=100, noise=NOISE, bound_features = 5, bound_theta = 3)
model = ColdStartFromDatasetModel(csvfile=os.path.abspath('jester/Vt_jester.csv'), noise=NOISE)
theta_bound = np.linalg.norm(model.theta, 2)
means = np.dot(model.features, model.theta)
print(means)
idxs = np.argsort(means)
#baseline = np.random.randint(0, model.n_actions - 1)
baseline = idxs[-5]
mean_baseline = means[baseline]
optimal_arm = np.argmax(means)
PARALLEL = False
n_a = model.n_actions
d = model.n_features
T = 20000
batch_horizon = int(T*0.2)
nb_simu = 10
alpha = 0.1
algorithms = {
'EfficientLinearBandit': EfficientLinearBandit(arm_features=model.features,
reg_factor=1.,
delta=0.01,
noise_variance=NOISE,
bound_theta=theta_bound)
}
conservative_algorithms = {
# 'CLUCB-new': EfficientConservativeLinearBandit(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'new', oracle = False, means = means,
# batched = False, check_every = batch_horizon, positive = True),
# 'CLUCB-old': EfficientConservativeLinearBandit(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'old', oracle = False, means = means,
# batched = False, check_every = batch_horizon, positive = True),
# 'SafetySet-Old' : SafetySetCLUCB(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'old', batched = False, check_every = batch_horizon, positive = True, verbose = False),
# 'SafetySet-new' : SafetySetCLUCB(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'new', oracle = False, batched = False, check_every = batch_horizon, means = means,
# verbose = False, positive = True)
}
algorithms = {**algorithms, **conservative_algorithms}
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
def work(alg_name, alg):
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
cond = regret.copy()
for k in tqdm(range(nb_simu)):
alg.reset()
for t in range(T):
a_t = alg.get_action()
# print(a_t)
r_t = model.reward(a_t)
alg.update(a_t, r_t)
cond[k, t] = means[a_t] - (1-alpha)*mean_baseline
regret[k, t] = model.best_arm_reward() - r_t
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
# results[alg_name] = \
return alg_name, MABResults(regret=regret, norm_error=norms, cum_rewards = cond)
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(alg_name, algorithms[alg_name]) for alg_name in algorithms.keys())
else:
from tqdm import trange
results = []
for alg_name in algorithms.keys():
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
cond = np.zeros((nb_simu, T))
nb = 0
draws = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
alg = algorithms[alg_name]
alg.reset()
for t in trange(T):
a_t = alg.get_action()
r_t = model.reward(a_t)
cond[k, t] = means[a_t] - (1-alpha)*mean_baseline
alg.update(a_t, r_t)
draws[k,t] = a_t
if a_t == baseline:
nb += 1
regret[k, t] = model.best_arm_reward() - r_t
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
results += [(alg_name, MABResults(regret=regret, norm_error=norms, cum_rewards=cond.cumsum(axis = 1)))]
#%%
plt.figure(1, figsize=(10, 10))
plt.figure(2, figsize=(10, 10))
for alg_name, val in results :
temp = val.regret
temp = temp.cumsum(axis = 1)
mean_regret = np.mean(temp, axis=0)
mean_norms = np.mean(val.norm_error, axis=0)
low_quantile = np.quantile(temp, 0.000, axis=0)
high_quantile = np.quantile(temp, 1, axis=0)
condition_satisfied = np.mean(val.cum_rewards, axis=0)
low_quantile_condition = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_condition = np.quantile(val.cum_rewards, 0.75, axis=0)
t = np.linspace(0, T-1, T, dtype='int')
# plt.subplot(131)
# # plt.plot(mean_norms, label=alg_name)
# plt.plot(mean_regret.cumsum() / (np.arange(len(mean_regret)) + 1), label=alg_name)
# plt.fill_between(t, low_quantile.cumsum() / (np.arange(len(mean_regret)) + 1),
# high_quantile.cumsum() / (np.arange(len(mean_regret)) + 1), alpha=0.15)
plt.figure(1)
print('mean_regret')
print(alg_name, ' = ', mean_regret[-1])
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.plot(mean_regret, label=alg_name)
plt.title('d = {}'.format(model.n_features))
plt.figure(2)
print(alg_name, '= ', min(condition_satisfied.cumsum()))
print('-'*100)
# plt.plot(condition_satisfied, label=alg_name)
plt.title('d = {}'.format(model.n_features))
# plt.fill_between(t, low_quantile_condition, high_quantile_condition, alpha = 0.15)
if alg_name != 'EfficientLinearBandit':
plt.plot(condition_satisfied.cumsum()[:200], label=alg_name)
plt.fill_between(t[:200], low_quantile_condition.cumsum()[:200], high_quantile_condition.cumsum()[:200], alpha = 0.15)
#ax = plt.subplot(131)
## plt.ylabel(r'$\|\hat{\theta} - \theta\|_{2}$')
#plt.ylabel(r'$R_t / t$')
#plt.xlabel("Rounds")
## # Turn off tick labels
## ax.set_yticklabels([])
## ax.set_xticklabels([])
#plt.legend()
#
#ax = plt.subplot(132)
#plt.ylabel("Cumulative Regret")
#plt.xlabel("Rounds")
## # Turn off tick labels
## ax.set_yticklabels([])
## ax.set_xticklabels([])
#plt.legend()
#
##ax = plt.subplot(223)
##plt.title('Model')
##plt.scatter(model.features[:, 0], model.features[:, 1])
##optimal_arm = np.argmax(means)
##plt.scatter([model.features[optimal_arm, 0]], [model.features[optimal_arm, 1]], color='red', label='Optimal arm')
##plt.scatter([model.features[baseline, 0]], [model.features[baseline, 1]], color='cyan', label='Baseline arm')
##plt.scatter([model.theta[0]], [model.theta[1]], color='yellow', label='Theta')
### # Turn off tick labels
### ax.set_yticklabels([])
### ax.set_xticklabels([])
##plt.legend()
#
#ax = plt.subplot(133)
#plt.ylabel("Margin")
#plt.xlabel("Rounds")
# # Turn off tick labels
# ax.set_yticklabels([])
# ax.set_xticklabels([])
plt.figure(1)
plt.legend()
#plt.savefig("model_random_{}_{}_seed_{}.png".format(alpha, model.n_actions, random_state))
plt.show()
| ContextualBanditsAttacks-main | examples/main_linearmab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import json
import os
import pickle
import sys
from collections import namedtuple
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
from isoexp.contextual.contextual_models import DatasetModel, RandomContextualLinearArms
from examples.linear_contextual_bandit import work
from scipy.optimize import minimize, linprog
"""
TEST Linear Bandit
"""
def in_hull(points, x):
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
if __name__ == '__main__':
PARALLEL = True
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
local_random = np.random.RandomState(random_state)
print("seed: {}".format(random_state))
K = 10
n_features = 30
nb_arms=10
a_noise = 0.1
la = 1. / 2.
delta = 0.99
reg_factor = 0.1
just_a_test= True
if just_a_test:
T = 5 * 10 ** 4 # horizon
nb_simu = 5 # 48 * 5 #240
else:
T = int(1 * 10 ** 6) # horizon
nb_simu = 40 # 48 * 5 #240
adversarial_xi = 0.0001
noise=0.1
attack_frequencies = ['target_arm', 0.0] # [1.0, 'decrease_sqrt', 0]
algo_names = ['LinUCB', 'eps_greedy', 'LinTS']
weak_attacks_list = [False] # [False, True] #
methods_to_test = [None] # ['quadprog', 'heuristic']
sparse_factors = [None]
results = []
decrease_epsilon = True
movielens = True
jester = False
dataset_model = movielens or jester
assert(not(movielens and jester)), "cannot use both movielens and jester"
if dataset_model:
if movielens:
simulator = DatasetModel(os.path.abspath('examples/movielens/Vt_movielens.csv'), user_csvfile=os.path.abspath("examples/movielens/U.csv"), arms_limit=25, noise=noise, context_limit=100)
elif jester:
simulator = DatasetModel(os.path.abspath("examples/jester/Vt_jester.csv"), user_csvfile=os.path.abspath('examples/jester/U.csv'), noise=noise, context_limit=100)
else:
print('Issue, should use a dataset that isn\'t jester or movielens')
exit(0)
else:
simulator = RandomContextualLinearArms(n_actions=nb_arms, n_features=n_features, noise=noise, bound_context=1)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.thetas, x_star)
target_context = np.random.randint(low=0, high=len(simulator.context_lists))
x_star = simulator.context_lists[target_context]
means_x_star = np.dot(simulator.thetas, x_star)
target_arm = np.argmin(means_x_star)
method= 'linUCB_Relaxed'
settings = {
"T": T,
'models': algo_names,
"nb_simu": nb_simu,
"random_state": random_state,
"K": simulator.n_actions if simulator else K,
"dimension": simulator.n_features if simulator else n_features,
'attack_frequencies': attack_frequencies,
'weak_attacks': weak_attacks_list,
'methods_to_test': methods_to_test,
'adversarial_xi': adversarial_xi,
'sparse_factors': sparse_factors,
'target_arm': target_arm,
}
weak_attack=False
dataset_type = 'jester' if jester else 'movilens' if movielens else 'simulation'
print(f'running on {dataset_type}')
mask = np.ones(simulator.n_actions, dtype='int')
mask[target_arm] = 0
print(in_hull(x=simulator.thetas[target_arm], points=np.array(simulator.thetas[mask])))
if in_hull(x=simulator.thetas[target_arm], points=np.array(simulator.thetas[mask])):
raise ValueError()
if PARALLEL:
import multiprocessing
work_to_be_done = []
for alg_name in algo_names:
for attack_frequency in attack_frequencies:
for sparse_attacks in sparse_factors:
for sim_index in range(nb_simu):
work_to_be_done.append((attack_frequency, False, weak_attack, 'quadprog' if alg_name == 'eps_greedy' else 'linUCB_Relaxed' if alg_name == 'LinUCB' else 'TS_Relaxed' if alg_name=='LinTS' else None, adversarial_xi, sim_index, alg_name, x_star))
# for sim_index in range(nb_simu):
# # work_to_be_done.append((0.2, 10, False, 'quadprog', xi, sim_index))
# work_to_be_done.append((0.2, 10, False, 'quadprog', adversarial_xi[0], sim_index))
settings['work_list'] = work_to_be_done
num_cores = multiprocessing.cpu_count()
results.append(Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + sim_index,
attack_frequency=attack_frequency,alg_name=alg_name,
weak_attack=weak_attack, adversarial_xi=xi, method=method,
sparse_attacks=sparse_attacks, simulator=simulator, target_arm=target_arm, x_star=x_star) for
attack_frequency, sparse_attacks, weak_attack, method, xi, sim_index, alg_name, x_star in work_to_be_done))
else:
# for decrease_epsilon in [True, False]:
for attack_frequency in [0]: # [1.0,0.1, 0]:
weak_attack = False
for k in tqdm(range(nb_simu)):
ret = work(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + k,
attack_frequency=attack_frequency,
weak_attack=weak_attack)
results.append(ret)
id = '{}_{:%Y%m%d_%H%M%S}_{}_alg{}{}'.format(dataset_type, datetime.datetime.now(), 'linear_one_context', algo_names, '_test' if just_a_test else '')
pickle_name = "{}_{}_linear_results.pickle".format(id, "PAR" if PARALLEL else "SEQ")
print(pickle_name)
with open(pickle_name, "wb") as f:
pickle.dump(results, f)
with open("{}_{}_linear_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/experiment_one_context.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import isoexp.mab.arms as arms
import pickle
from isoexp.mab.smab_algs import UCB1, UCBV, BootstrapedUCB, PHE, Random_exploration
from isoexp.conservative.mab import CUCBV, SafetySetUCBV, powerful_oracle, CBUCB, CUCB, CPHE
from matplotlib import rc
import json
import datetime
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
MABResults = namedtuple('MABResults', 'regret, cum_rewards')
random_state = np.random.randint(0, 123123)
random_state = 117060
K = 10
MAB = []
means = np.random.uniform(low = 0.25, high = 0.75, size = K)
means = np.array([0.47823152, 0.70243227, 0.64504063, 0.65679234, 0.49546542,
0.46417188, 0.64736977, 0.71255566, 0.66844984, 0.26030838])
for k in range(K) :
MAB.append(arms.ArmBernoulli(p = means[k]))
nb_arms = len(MAB)
print('means: {}'.format(means))
mu_max = np.max(means)
T = 10000# horizon
nb_simu = int(np.sqrt(T))
#Define baseline
pos = 3
baseline = np.argsort(means)[pos]
mean_baseline = MAB[baseline].mean
#Batch Version
conservative_level = 0.1
check_every = 2*np.sqrt(T)
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
"baseline": pos,
"conservative_levels": conservative_level,
}
algorithms = {
'UCB': lambda T, MAB: UCB1(T, MAB, alpha=1),
# 'UCBV': lambda T, MAB: UCBV(T, MAB),
# 'BootstrapUCB' : lambda T, MAB: BootstrapedUCB(T, MAB, delta = 0.1, b_rep = 200),
'PHE' : lambda T, MAB : PHE(T, MAB, alpha =2),
# 'RE' : lambda T, MAB : Random_exploration(T, MAB, alpha = 3, verbose = False)
}
conservative_algorithms = {
'CUCB' : lambda T, MAB : CUCB(T, MAB, baseline, mean_baseline, conservative_level= conservative_level, oracle = False, version = 'old', batched = False, check_every = check_every, alpha = 1),
'Oracle UCB' : lambda T, MAB : CUCB(T, MAB, baseline, mean_baseline, conservative_level= conservative_level, oracle = True, version = 'old', batched = False, check_every = check_every, alpha = 1),
# 'CUCB-new' : lambda T, MAB : CUCB(T, MAB, baseline, mean_baseline, conservative_level= conservative_level, oracle = False, version = 'new', batched = False, check_every = check_every, alpha = 1),
# 'CPHE-new' : lambda T, MAB : CPHE(T, MAB, baseline, mean_baseline, conservative_level = conservative_level, param_a1 = 2, version = 'new', batched = False, check_every = check_every),
'CPHE' : lambda T, MAB : CPHE(T, MAB, baseline, mean_baseline, conservative_level = conservative_level, param_a1 = 2, version = 'old', batched = False, check_every = None),
# 'CPHE-oracle' : lambda T, MAB : CPHE(T, MAB, baseline, mean_baseline, conservative_level = conservative_level, param_a1 = 2, oracle = True),
#'SafetySetUCBV-old' : lambda T, MAB : SafetySetUCBV(T, MAB, baseline, mean_baseline, alpha=1., conservative_level= conservative_level, version ='old'),
#'SafetySetUCBV-new' : lambda T, MAB : SafetySetUCBV(T, MAB, baseline, mean_baseline, alpha=1., conservative_level= conservative_level, version = 'new')
}
results = []
full_algo = {**algorithms, **conservative_algorithms}
for alg_name in full_algo.keys():
alg = full_algo[alg_name]
regret = np.zeros((nb_simu, T))
rwds = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
if alg_name in ['SafetySetUCBV-old', 'SafetySetUCBV-new'] :
rewards, draws, safe = alg(T, MAB)
else :
rewards, draws = alg(T, MAB)
regret[k] = max(means) * np.arange(1, T + 1) - np.cumsum(rewards)
rwds[k] = np.cumsum(means[draws.astype('int')])
results += [(alg_name, MABResults(regret=regret, cum_rewards= rwds))]
id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
with open("{}_{}_MAB_illustration.pickle".format(id, "SEQ"), "wb") as f:
pickle.dump(results, f)
with open("{}_{}_MAB_illustration_settings.json".format(id, "SEQ"), "w+") as f:
json.dump(settings, f)
#%%
#plt.figure(1,figsize=(10, 10))
plt.figure(2,figsize=(10, 10))
t = np.arange(1, T+1)
for alg_name, val in results:
mean_regret = np.mean(val.regret, axis=0)
low_quantile = np.quantile(val.regret, 0.25, axis=0)
high_quantile = np.quantile(val.regret, 0.75, axis=0)
rwds = np.mean(val.cum_rewards, axis = 0)
low_quantile_rwds = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_rwds = np.quantile(val.cum_rewards, 0.75, axis=0)
#
# plt.figure(1)
# plt.title('Margin')
# temp = rwds - (1- conservative_level)*t*mean_baseline
# plt.plot(temp[:200], label = alg_name)
# plt.legend()
# plt.fill_between(t, low_quantile_rwds - (1- conservative_level)*t*mean_baseline, high_quantile_rwds - (1- conservative_level)*t*mean_baseline, alpha = 0.15)
print(alg_name, '=', min(rwds - (1- conservative_level)*t*mean_baseline))
plt.figure(2)
plt.title('Regret')
plt.plot(mean_regret, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.show()
#import tikzplotlib
#tikzplotlib.save("lcb_worst.tex")
| ContextualBanditsAttacks-main | examples/main_mab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import json
import os
import pickle
import sys
from collections import namedtuple
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
from isoexp.contextual.contextual_models import DatasetModel, RandomContextualLinearArms
from examples.linear_contextual_bandit import work
"""
TEST Linear Bandit
"""
if __name__ == '__main__':
PARALLEL = True
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
local_random = np.random.RandomState(random_state)
print("seed: {}".format(random_state))
K = 10
n_features = 30
nb_arms=10
a_noise = 0.1
la = 1. / 2.
delta = 0.99
reg_factor = 0.1
just_a_test = False
if just_a_test:
T = 1 * 10 ** 4 # horizon
nb_simu = 5 # 48 * 5 #240
else:
T = 1 * 10 ** 6 # horizon
nb_simu = 20 # 48 * 5 #240
adversarial_xi = 0.0001
noise=0.1
attack_frequencies = [1.0, 0.2, 0.0] # [1.0, 'decrease_sqrt', 0]
algo_names = ['LinUCB', 'eps_greedy', 'LinTS']
weak_attacks_list = [False] # [False, True] #
methods_to_test = [None] # ['quadprog', 'heuristic']
sparse_factors = [2.0]
results = []
decrease_epsilon = True
seed = 1
movielens = False
jester = False
dataset_model = movielens or jester
assert(not(movielens and jester)), "cannot use both movielens and jester"
if dataset_model:
if movielens:
simulator = DatasetModel(os.path.abspath('examples/movielens/Vt_movielens.csv'), user_csvfile=os.path.abspath("examples/movielens/U.csv"), arms_limit=25, noise=noise)
elif jester:
simulator = DatasetModel(os.path.abspath("examples/jester/Vt_jester.csv"), user_csvfile=os.path.abspath('examples/jester/U.csv'), noise=noise)
else:
print('Issue, should use a dataset that isn\'t jester or movielens')
exit(0)
else:
simulator = RandomContextualLinearArms(n_actions=nb_arms, n_features=n_features, noise=noise, random_state=seed, bound_context=1)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.thetas, x_star)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
means_x_star = np.dot(simulator.context_lists, simulator.thetas.T).mean(axis=0)
target_arm = np.argmin(means_x_star)
settings = {
"T": T,
'models': algo_names,
"nb_simu": nb_simu,
"random_state": random_state,
"K": simulator.n_actions if simulator else K,
"dimension": simulator.n_features if simulator else n_features,
'attack_frequencies': attack_frequencies,
'weak_attacks': weak_attacks_list,
'methods_to_test': methods_to_test,
'adversarial_xi': adversarial_xi,
'sparse_factors': sparse_factors,
'target_arm': target_arm,
}
weak_attack=False
method=None
dataset_type = 'jester' if jester else 'movilens' if movielens else 'simulation'
print(f'running on {dataset_type}')
if PARALLEL:
import multiprocessing
work_to_be_done = []
for alg_name in algo_names:
for attack_frequency in attack_frequencies:
for sparse_attacks in sparse_factors:
for sim_index in range(nb_simu):
work_to_be_done.append((attack_frequency, sparse_attacks/ attack_frequency if attack_frequency > 0 else 0, weak_attack, method, adversarial_xi, sim_index, alg_name))
# for sim_index in range(nb_simu):
# # work_to_be_done.append((0.2, 10, False, 'quadprog', xi, sim_index))
# work_to_be_done.append((0.2, 10, False, 'quadprog', adversarial_xi[0], sim_index))
settings['work_list'] = work_to_be_done
num_cores = multiprocessing.cpu_count()
results.append(Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + sim_index,
attack_frequency=attack_frequency,alg_name=alg_name,
weak_attack=weak_attack, adversarial_xi=xi, method=method,
sparse_attacks=sparse_attacks, simulator=simulator, target_arm=target_arm) for
attack_frequency, sparse_attacks, weak_attack, method, xi, sim_index, alg_name in work_to_be_done))
else:
# for decrease_epsilon in [True, False]:
for attack_frequency in [0]: # [1.0,0.1, 0]:
weak_attack = False
for k in tqdm(range(nb_simu)):
ret = work(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + k,
attack_frequency=attack_frequency,
weak_attack=weak_attack)
results.append(ret)
id = '{}_{:%Y%m%d_%H%M%S}_{}_alg{}'.format(dataset_type, datetime.datetime.now(), '_Contextual_linear_all_contextes', algo_names)
pickle_name = "{}_{}_linear_results.pickle".format(id, "PAR" if PARALLEL else "SEQ")
print(pickle_name)
with open(pickle_name, "wb") as f:
pickle.dump(results, f)
with open("{}_{}_linear_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/experiment_all_contexts.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
import json
ALPHA = 0.05
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20190822_065452_Bernoulli_PAR_martingale_results.pickle'
filename = '20190901_124136_linear_PAR_linear_results.pickle'
filename = '20190902_135139_linear_PAR_linear_results.pickle'
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
setting_name = filename[:-14] + 'settings.json'
print('Opening settings %s...' % setting_name)
with open(setting_name, 'r') as f:
settings = json.load(f)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
with open(os.path.join(folder, setting_name), 'w') as f:
json.dump(settings, f)
EVERY = 200
LW = 2
LATEX = True
SAVE_MARGIN_FOR_EACH_MODEL=True
print("Generating all figures ...")
# select "bad" model
fpoint = open(os.path.join(folder, "scores.txt"), "w")
bad_model = None
min_val = np.inf
total_experiments = {}
for m, model in enumerate(results):
cucb_M, cucb_H = 0, 0
plt.figure()
ymax = -np.inf
T = None
for alg_name, val in model[1]:
rep, T = val['cum_rewards'].shape
if alg_name not in total_experiments.keys():
total_experiments[alg_name] = []
t = np.arange(1, T + 1)
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
total_experiments[alg_name] += regret.tolist()
if alg_name in ["CLUCB-new-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
cucb_M = mean_regret[-1] + 2 * std[-1]
if alg_name in ["CLUCB-old-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
cucb_H = mean_regret[-1] - 2 * std[-1]
val = abs(cucb_M - cucb_H) / cucb_H
print(m, val)
fpoint.write("{} {}\n".format(m, val))
if val < min_val:
bad_model = m
min_val = val
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
plt.close()
fpoint.close()
print("Generating all figures for margin ...")
avg_area = {}
avg_margin = {}
for m, model in enumerate(results):
plt.figure()
ymax = -np.inf
ymin = np.inf
maxt = 0
T = None
print()
TOSAVE = {}
for alg_name, val in model[1]:
if alg_name not in avg_area.keys():
avg_area[alg_name] = []
avg_margin[alg_name] = []
rep, T = val['cum_rewards'].shape
margin = val['cum_rewards'].cumsum(axis=1)
t = np.arange(1, T + 1)
area = np.sum(margin * (margin < 0), axis=1).mean()
print('min_margin(', alg_name, ')=', margin.min())
print('area(', alg_name, ')=', area)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
if SAVE_MARGIN_FOR_EACH_MODEL:
M = np.concatenate((t.reshape(-1, 1), mean_margin.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
avg_area[alg_name] += [area]
avg_margin[alg_name] += margin.tolist()
idxs = mean_margin < 10
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
if SAVE_MARGIN_FOR_EACH_MODEL:
np.savez_compressed(os.path.join(folder, "model{}_margin".format(m)), **TOSAVE)
plt.xlim([1, maxt])
plt.ylim([ymin, ymax])
plt.legend()
plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}_margin.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}_margin.tex".format(m)))
plt.close()
ymax = -np.inf
TOSAVE = {}
for alg_name in total_experiments.keys():
regret = np.array(total_experiments[alg_name])
rep, T = regret.shape
t = np.arange(1, T + 1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
M = np.concatenate((t.reshape(-1, 1), mean_regret.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
np.savez_compressed(os.path.join(folder, "avg_regret"), **TOSAVE)
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
average_name = os.path.join(folder, "avg_regret.png")
print("Saving average performance to %s ..." % average_name)
plt.savefig(average_name)
average_name = os.path.join(folder, "avg_regret.tex")
tikzplotlib.save(average_name)
print("Done.\n")
avg_margin_name = os.path.join(folder, "avg_margin.png")
print("Saving average margin to %s..." % avg_margin_name)
plt.figure(figsize=(10, 10))
ymax = -np.inf
ymin = np.inf
maxt = -np.inf
TOSAVE = {}
for alg_name in avg_margin.keys():
margin = np.array(avg_margin[alg_name])
rep, T = margin.shape
t = np.arange(1, T + 1)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
idxs = mean_margin < 2
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
M = np.concatenate((t.reshape(-1, 1), mean_margin.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
plt.xlim([1, maxt])
# plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
plt.savefig(avg_margin_name)
average_name = os.path.join(folder, "avg_margin.tex")
tikzplotlib.save(average_name)
plt.close()
np.savez_compressed(os.path.join(folder, "avg_margin"), **TOSAVE)
print("Done.\n")
print(bad_model, min_val)
plt.figure(figsize=(10, 10))
plt.title("Model: {}".format(bad_model))
ymax = -np.inf
T = None
for model in [results[bad_model]]: # results:
print(model[2])
# for el in model[2]:
# print(el.mean)
for alg_name, val in model[1]:
print(alg_name)
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
low_quantile = np.quantile(regret, 0.25, axis=0)
high_quantile = np.quantile(regret, 0.75, axis=0)
# rwds = np.mean(val['cum_rewards'], axis=0)
# low_quantile_rwds = np.quantile(val['cum_rewards'], 0.25, axis=0)
# high_quantile_rwds = np.quantile(val['cum_rewards'], 0.75, axis=0)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.savefig(os.path.join(folder, "worst_linear_exp.png"))
print("Done.\n")
worst_name = os.path.join(folder, "worst_linear_exp.tex")
print("Saving worst model to %s..." % worst_name)
tikzplotlib.save(worst_name)
print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
| ContextualBanditsAttacks-main | examples/parse_linear_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path[0] = '/Users/evrard/Desktop/monotone_mabs/'
import numpy as np
import isoexp.linear.linearmab_models as arms
import isoexp.linear.linearbandit as mabs
import isoexp.conservative.linearmabs as cmabs
from tqdm import tqdm, trange
from joblib import Parallel, delayed
import math
import dill
import json
import datetime
from collections import namedtuple
"""
TEST GLM Bandit
Compare between martingale and sum of Hoffding bounds
"""
def work(m, nb_arms, nb_features, noise, b_pos, nb_simu, T, all_algs, random_state):
# create model
K = nb_arms
model = arms.RandomLogArms(n_actions = K,
n_features = n_features,
random_state = random_state + m,
bound_features = 1,
bound_theta = 1,
noise = noise)
means = model.link(np.dot(model.features,model.theta))
kappa = model.kappa
theta_bound = np.linalg.norm(model.theta, 2)
# Define baseline
baseline = np.argsort(means)[b_pos]
mean_baseline = means[baseline]
AAA = []
for alg_name in tqdm(all_algs.keys(), desc='Sim. model {}'.format(m)):
alg = all_algs[alg_name](model.features, noise, theta_bound,
mean_baseline, baseline, kappa = kappa)
regret = np.zeros((nb_simu, T))
rwds = regret.copy()
norms = regret.copy()
for k in trange(nb_simu, desc = 'Repetitions'):
alg.reset()
for t in trange(T, desc = 'Inside episode') :
a_t = alg.get_action()
r_t = model.reward(a_t)
if hasattr(alg, 'conservative_level'):
rwds[k,t] = means[a_t] - (1 - alg.conservative_level)*mean_baseline
else :
rwds[k,t] = means[a_t]
alg.update(a_t, r_t)
regret[k, t] = model.best_arm_reward() - means[a_t]
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
AAA += [(alg_name, {"regret": regret, "cum_rewards": rwds.cumsum(axis = 1), "norm_errors" : norms})]
return m, AAA, model, mean_baseline
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
PARALLEL = True
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
print("seed: {}".format(random_state))
K = 20
n_features = 10
a_noise = 0.1
delta = 0.01
la = 1/4
T = 3000 # horizon
nb_models = 4
nb_simu = int(np.sqrt(T))
CLEVELS = [0.1]
BATCHES = [1]
pos = 15
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
"dimension" : n_features,
"baseline": pos,
"conservative_levels": CLEVELS,
"batches": BATCHES
}
algorithms = {
'GLM-UCB': lambda feat, noise, b_theta, mean_b = 0, b = 0, alpha = 0, kappa = 1 : mabs.UCB_GLM(
reg_factor=la,
delta=delta,
arm_features = feat,
noise_variance = noise,
bound_theta = b_theta,
kappa = kappa,
model = 'bernoulli',
tighter_ucb = True)
}
conservative_algorithms = {}
for conservative_level in CLEVELS:
conservative_algorithms.update(
{
"GLM-CUCB-{}".format(conservative_level):
lambda feat, noise, b_theta, mean_b, b, alpha = conservative_level, kappa = 1:
cmabs.CUCB_GLM(arm_features = feat,
noise_variance = noise,
bound_theta = b_theta,
mean_baseline = mean_b,
baseline = b,
reg_factor = la,
delta = delta,
conservative_level = alpha,
kappa = kappa,
tighter_ucb = True,
model = 'bernoulli'), }
)
results = []
full_algo = {**algorithms, **conservative_algorithms}
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(m=m, nb_arms=K, nb_features = n_features, noise = a_noise, b_pos=pos,
nb_simu=nb_simu, T=T, all_algs=full_algo,
random_state=random_state) for m in range(nb_models))
else:
for m in tqdm(range(nb_models)):
ret = work(m, K, n_features, a_noise, pos, nb_simu, T, full_algo, random_state)
results.append(ret)
# MAB = []
# means = None
# if ARMS == "Bernoulli":
# means = np.random.uniform(low=0.25, high=0.75, size=K)
# for n in range(K):
# MAB.append(arms.ArmBernoulli(p=means[n], random_state=random_state + n))
# elif ARMS == "TruncatedNormal":
# means = np.random.uniform(low=0., high=1., size=K)
# sigmas = np.random.uniform(low=0.1, high=1., size=K)
# for n in range(K):
# MAB.append(arms.ArmTruncNorm(original_mean=means[n], a=0, b=1, original_std=sigmas[n]))
# means[n] = MAB[n].mean
# sigmas[n] = MAB[n].sigma
# else:
# raise ValueError("Unknown arm type")
#
# AAA = []
# for alg_name in full_algo.keys():
# alg = full_algo[alg_name]
#
# regret = np.zeros((nb_simu, T))
# rwds = 0 * regret
#
# for k in range(nb_simu):
# # Define baseline
# baseline = np.argsort(means)[pos]
# mean_baseline = MAB[baseline].mean
#
# rewards, draws = alg(T, MAB, baseline, mean_baseline)
# regret[k] = max(means) * np.arange(1, T + 1) - np.cumsum(rewards)
# rwds[k] = np.cumsum(means[draws.astype('int')])
#
# AAA += [(alg_name, {"regret": regret, "cum_rewards": rwds})]
#
# results += [(m, AAA, MAB)]
results += [CLEVELS]
id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
with open("{}_{}_GLM_results.pickle".format(id, "PAR" if PARALLEL else "SEQ"), "wb") as f:
dill.dump(results, f)
with open("{}_{}_GLM_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/runner_GLM.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import os
from cycler import cycler
import matplotlib.pyplot as plt
import tikzplotlib
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
what = 'regret'
what = 'margin'
EVERY = 200
LW = 2
folders = ["20190905_043752_Bernoulli_PAR_martingale_results"]
plt.figure(figsize=(20, 10))
T = 0
if what == 'margin':
ymax = np.inf
else:
ymax = -np.inf
for fname in folders:
M = np.load(os.path.join(fname, "avg_{}.npz".format(what)), mmap_mode='r')
for alg in M.files:
if not alg in ["CUCB-oracle-0.05", "CUCB-new-0.05-1",
"CUCB-LBS-new-0.05-1",
"CSUCB-old-0.05-1","CUCB-LBS-old-0.05-1"]:
data = M[alg]
t = data[:, 0]
yval = data[:, 1]
std = data[:, 2]
plt.plot(t[::EVERY], yval[::EVERY], linewidth=LW, label=alg)
plt.fill_between(t[::EVERY],
yval[::EVERY] - 2 * std[::EVERY], yval[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
if what == 'margin':
ymax = min(ymax, np.min(yval - 2 * std))
else:
ymax = max(ymax, yval[-1] + 2 * std[-1])
T = max(T, np.max(t))
plt.plot([0,T], [0, 0], '-', c='gray', linewidth=0.8)
plt.xlim([0, T])
# if ymax > 0:
# plt.ylim([0, ymax])
# else:
# plt.ylim([ymax, 5])
plt.xlabel("Time")
if what == "regret":
plt.ylabel("Cumulative Regret")
else:
plt.ylabel("Average Budget")
plt.legend()
plt.savefig("jester_average_{}.png".format(what))
tikzplotlib.save("jester_average_{}.tex".format(what))
plt.show()
| ContextualBanditsAttacks-main | examples/merge_real_data_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 15:37:36 2019
@author: evrard
"""
filename = '/Users/evrardgarcelon/Desktop/monotone_mabs/20190825_113802_GLM_PAR_GLM_results.pickle'
import dill
import numpy as np
import pylab as plt
with open(filename, 'rb') as f:
results = dill.load(f)
n_models = 1
n_algos = len(results[0])
nb_simu,T = results[0][1][0][1]['regret'].shape
clevels = results[-1]
baseline_means = np.zeros(n_models)
q = 0.25
t = np.linspace(0, T-1,T, dtype = 'int')
nb_first_iteration = 50
algorithms = {}
true_alg_name = {'GLM-UCB': 'UCB-GLM', 'GLM-CUCB-0.1': 'CUCB-GLM-0.1'}
for alg_name, res in results[0][1] :
algorithms[true_alg_name[alg_name]] = {'regret' : np.zeros((n_models,nb_simu, T)),
'cum_rewards' : np.zeros((n_models,nb_simu, T)),
'norm_errors' : np.zeros((n_models,nb_simu, T))}
for m in range(n_models) :
res = results[m][1]
baseline_means[m] = results[m][-1]
for i,val in enumerate(res) :
alg_name = val[0]
val = val[1]
algorithms[true_alg_name[alg_name]]['regret'][m,:,:] = val['regret']
algorithms[true_alg_name[alg_name]]['cum_rewards'][m,:,:] = val['cum_rewards']
algorithms[true_alg_name[alg_name]]['norm_errors'][m,:,:] = val['norm_errors']
plt.figure(1, figsize = (10,10))
plt.figure(2, figsize = (10,10))
regret = {}
margin = {}
for alg_name, res in algorithms.items() :
temp = res['regret'].cumsum(axis = 2)
mean_regret = np.mean(temp, axis = (0,1))
low_quantile = np.quantile(temp,q, axis = (0,1))
high_quantile = np.quantile(temp, 1-q, axis = (0,1))
regret[alg_name] = (mean_regret, low_quantile, high_quantile)
plt.figure(1)
plt.plot(mean_regret, label = alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
if alg_name != 'UCB-GLM' :
res['cum_rewards'] = res['cum_rewards'].cumsum(axis = 2)
mean_margin = np.mean(res['cum_rewards'], axis = (0,1))
low_quantile = np.quantile(res['cum_rewards'], q, axis = (0,1))
high_quantile = np.quantile(res['cum_rewards'], 1-q, axis = (0,1))
margin[alg_name] = (mean_margin, low_quantile, high_quantile)
else :
for alpha in clevels :
a_name = alg_name + '-{}'.format(alpha)
temp = 1*algorithms[alg_name]['cum_rewards']
for m in range(n_models) :
temp[m] = temp[m] - (1-alpha)*baseline_means[m]
temp = temp.cumsum(axis = 2)
mean_margin = np.mean(temp, axis = (0,1))
low_quantile = np.quantile(temp, q, axis = (0,1))
high_quantile = np.quantile(temp, 1-q, axis = (0,1))
margin[a_name] = (mean_margin[:nb_first_iteration], low_quantile[nb_first_iteration], high_quantile[nb_first_iteration])
plt.figure(2)
plt.plot(mean_margin[:nb_first_iteration], label = alg_name)
plt.fill_between(t[:nb_first_iteration], low_quantile[:nb_first_iteration], high_quantile[:nb_first_iteration], alpha = 0.15)
plt.figure(2)
plt.plot(t[:nb_first_iteration], np.zeros(nb_first_iteration), color = 'red', linestyle = '--', label = '0')
plt.figure(1)
plt.legend()
plt.figure(2)
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/reader_GLM.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.isotonicsim import LIsotron
import matplotlib.pyplot as plt
import numpy as np
from isoexp.LPAV_cvx import cvx_lip_isotonic_regression
N = 500
m = 5
X = np.random.rand(N*m).reshape(N, m)
w = np.random.rand(m)
orda = np.argsort(np.dot(X, w))
X = X[orda, :]
y = 2*np.dot(X, w)
y = np.dot(X, w)**3 # + np.random.randn(N)
x = np.dot(X, w)
#reg = LIsotron()
#yn = reg.fit_transform(X, y, lipschitz_value=1, iterations=50)
ones = np.zeros_like(y)
iterations=400
wt = np.random.rand(X.shape[1])
wt = np.zeros(X.shape[1])
for t in range(iterations):
zt = np.dot(X, wt)
order = np.argsort(zt)
zt = zt[order]
print(zt)
y_iso = cvx_lip_isotonic_regression(x=zt, y=y[order], weights=ones, lipschitz_value=10)
print(y_iso)
print(y)
# plt.plot(xt, y[order], 'o')
# plt.plot(xt, y_iso, '+')
# plt.show()
wt = wt + np.mean((y[order] - y_iso)[:, np.newaxis] * X[order, :], axis=0)
print("true weights: {}".format(w))
print("weights: {}".format(wt))
plt.figure()
plt.plot(np.dot(X, w), y, '+', label="true")
#plt.plot(np.dot(X, w), np.dot(X, wt), 'o', label="learnt")
plt.plot(np.dot(X, w), y_iso, 'o', label="learnt2")
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/run_lisotron.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.knnmab import KnnMab
from isoexp.isomab import IsoMab
import isoexp.monenvs as monenvs
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import time
np.random.seed(12345)
env = monenvs.Env1()
env.show()
plt.show()
# define algorithms
knn_a = KnnMab(env=env, Lc=100)
iso_a = IsoMab(env=env, Lc=100)
algs = [(knn_a, "knn"),(iso_a, "iso")]
# define params
rep = 2
T = 2500
## force compilation of the function
from isoexp.knnmab import knn_select_arm
start = time.time()
knn_select_arm(np.zeros((4,5)), np.zeros((4,)), -1, 1, 3, 1)
end = time.time()
print("Elapsed (with compilation) = %s" % (end - start))
# from isoexp.isomab import construct_datadep_ci
# start = time.time()
# construct_datadep_ci(np.zeros((6,)), np.zeros((6,)), np.zeros((1,)), np.zeros((1,)), 1, -1)
# end = time.time()
# print("Elapsed (with compilation) = %s" % (end - start))
# prepare figure
rf = plt.figure(99)
for alg in algs:
regrets = np.zeros((rep, T))
for i in tqdm(range(rep)):
alg[0].reset()
regret = alg[0].run(iterations=T)
cr = np.cumsum(regret)
regrets[i,:] = cr
plt.figure(99)
mu = regrets.mean(axis=0)
sigma = regrets.std(axis=0) / np.sqrt(rep)
p = plt.plot(mu, label=alg[1])
plt.fill_between(np.arange(len(mu)), mu + 2*sigma, mu - 2*sigma, facecolor=p[-1].get_color(), alpha=0.5)
plt.figure()
X = np.linspace(0, 1, 100)
arms = np.zeros_like(X)
for i in range(len(X)):
arms[i] = alg[0].select_arm(np.array([X[i]]))
plt.plot(X, arms, '+')
plt.title("Arm selection")
plt.xlabel("Covariate X")
plt.ylabel("Arm")
plt.title(alg[1])
plt.figure(99)
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/main.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import sys
import shutil
from cycler import cycler
import tarfile
import json
ALPHA = 0.05
NAMES = {
'UCB': "UCB",
'CUCB-oracle-{}'.format(ALPHA): 'CUCB-Or',
'CUCB-old-{}-1'.format(ALPHA): 'CUCB',
'CUCB-new-{}-1'.format(ALPHA): 'CUCB-M',
'CSUCB-new-{}-1'.format(ALPHA): 'CUCB2',
'CSUCB-old-{}-1'.format(ALPHA): 'CUCB-C',
'CUCB-LBS-new-{}-1'.format(ALPHA): 'CUCB-ML',
'CUCB-LBS-old-{}-1'.format(ALPHA): 'CUCB-L',
}
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
n = 12 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-', '--', ':', '-.'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20190822_065452_Bernoulli_PAR_martingale_results.pickle'
print("Opening file %s..." % filename)
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Done.\n")
setting_name = filename[:-14] + 'settings.json'
print('Opening settings %s...' % setting_name)
with open(setting_name, 'r') as f:
settings = json.load(f)
baseline_pos = settings["baseline"]
conservative_levels = settings['conservative_levels'][-1]
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
with open(os.path.join(folder, setting_name), 'w') as f:
json.dump(settings, f)
EVERY = 100
LW = 2
LATEX = True
print("Generating all figures for regret ...")
# select "bad" model
fpoint = open(os.path.join(folder, "scores.txt"), "w")
bad_model = None
min_val = np.inf
total_experiments = {}
for m, model in enumerate(results):
cucb_M, cucb_H = 0, 0
plt.figure()
ymax = -np.inf
T = None
for alg_name, val in model[1]:
if alg_name not in total_experiments.keys():
total_experiments[alg_name] = []
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
mean_regret = np.mean(val['regret'], axis=0)
total_experiments[alg_name] += val['regret'].tolist()
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=NAMES[alg_name])
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
if alg_name in ["CUCB-new-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
cucb_M = mean_regret[-1] + 2 * std[-1]
if alg_name in ["CUCB-old-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
cucb_H = mean_regret[-1] - 2 * std[-1]
val = abs(cucb_M - cucb_H) / cucb_H
print(m, val)
fpoint.write("{} {}\n".format(m, val))
if val < min_val:
bad_model = m
min_val = val
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
# plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
plt.close()
fpoint.close()
print("Done.\n")
print("Generating all figures for margin ...")
avg_area = {}
avg_margin = {}
for m, model in enumerate(results):
plt.figure()
ymin = np.inf
ymax = -np.inf
maxt = 0
T = None
means = [el.mean for el in model[-1]]
action_b = np.argsort(means)[baseline_pos]
mean_baseline = means[action_b]
TOSAVE = {}
for alg_name, val in model[1]:
if alg_name not in avg_area.keys():
avg_area[alg_name] = []
avg_margin[alg_name] = []
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
cum_rewards = val['cum_rewards']
margin = cum_rewards - (1 - conservative_levels) * t * mean_baseline
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
M = np.concatenate((t.reshape(-1, 1), mean_margin.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
idxs = mean_margin < 2
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
area = np.sum(margin * (margin < 0), axis=1).mean()
print('min_margin(', alg_name, ')=', margin.min())
print('area(', alg_name, ')=', area)
print()
avg_area[alg_name] += [area]
avg_margin[alg_name] += margin.tolist()
np.savez_compressed(os.path.join(folder, "model{}_margin".format(m)), **TOSAVE)
plt.xlim([1, maxt])
plt.ylim([ymin, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
# plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}_margin.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}_margin.tex".format(m)))
plt.close()
print("Done.\n")
ymax = -np.inf
for alg_name in total_experiments.keys():
regret = np.array(total_experiments[alg_name])
rep, T = regret.shape
t = np.arange(1, T + 1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=NAMES[alg_name])
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
average_name = os.path.join(folder, "avg_regret.png")
print("Saving average performance to %s ..." % average_name)
plt.savefig(average_name)
average_name = os.path.join(folder, "avg_regret.tex")
tikzplotlib.save(average_name)
print("Done.\n")
for alg_name in avg_area.keys():
print("AverageAREA({}): {}".format(alg_name, np.mean(avg_area[alg_name])))
avg_margin_name = os.path.join(folder, "avg_margin.png")
print("Saving average margin to %s..." % avg_margin_name)
plt.figure(figsize=(10, 10))
ymax = -np.inf
ymin = np.inf
maxt = -np.inf
TOSAVE = {}
for alg_name in avg_margin.keys():
margin = np.array(avg_margin[alg_name])
rep, T = margin.shape
t = np.arange(1, T + 1)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
idxs = mean_margin < 2
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
M = np.concatenate((t.reshape(-1,1), mean_margin.reshape(-1,1), std.reshape(-1,1)), axis=1)
TOSAVE[alg_name] = M
plt.xlim([1, maxt])
# plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
plt.savefig(avg_margin_name)
average_name = os.path.join(folder, "avg_margin.tex")
plt.close()
np.savez_compressed(os.path.join(folder, "avg_margin"), **TOSAVE)
print("Done.\n")
print(bad_model, min_val)
plt.figure(figsize=(10, 10))
plt.title("Model: {}".format(bad_model))
ymax = -np.inf
T = None
for model in [results[bad_model]]: # results:
print(model[2])
# for el in model[2]:
# print(el.mean)
for alg_name, val in model[1]:
print(alg_name)
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
low_quantile = np.quantile(val['regret'], 0.25, axis=0)
high_quantile = np.quantile(val['regret'], 0.75, axis=0)
rwds = np.mean(val['cum_rewards'], axis=0)
low_quantile_rwds = np.quantile(val['cum_rewards'], 0.25, axis=0)
high_quantile_rwds = np.quantile(val['cum_rewards'], 0.75, axis=0)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.savefig(os.path.join(folder, "worst_mab_exp.png"))
print("Done.\n")
worst_name = os.path.join(folder, "worst_mab_exp.tex")
print("Saving worst model to %s ..." % worst_name)
tikzplotlib.save(worst_name)
print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s ..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
| ContextualBanditsAttacks-main | examples/parse_mab_results.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Created on Thu Aug 22 15:37:36 2019
@author: evrard
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
import tikzplotlib
import os
import sys
import shutil
from cycler import cycler
import tarfile
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
def plot_model(model, name):
ymax = -np.inf
batches = []
plt.figure()
plt.title('model: {}'.format(name))
area = 0.
for p, AAA in model[1]:
batches = []
UCB_vals = None
UCB_stds = None
CUCB1_vals = None
CUCB1_stds = None
CUCBB_vals = []
CUCBB_stds = []
for alg_name, mean_regret, std in AAA:
print(alg_name)
if alg_name == "UCB":
UCB_vals = mean_regret[-1]
UCB_stds = std[-1]
elif alg_name == "CUCB-new-0.1-1":
CUCB1_vals = mean_regret[-1]
CUCB1_stds = std[-1]
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
else:
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
# area += CUCBB_vals - UCB_vals
CUCBB_vals = np.array(CUCBB_vals)
CUCBB_stds = np.array(CUCBB_stds)
if CUCB1_vals is not None:
ax1 = plt.plot([batches[0], batches[-1]], [CUCB1_vals, CUCB1_vals], label='CUCB_p{}'.format(p),
marker='o')
ax1_col = ax1[0].get_color()
plt.fill_between([batches[0], batches[-1]], CUCB1_vals - 2 * CUCB1_stds, CUCB1_vals + 2 * CUCB1_stds,
alpha=0.15, color=ax1_col)
ymax = max(ymax, CUCB1_vals + 2 * CUCB1_stds)
if UCB_vals is not None:
ax1 = plt.plot([batches[0], batches[len(batches) - 1]], [UCB_vals, UCB_vals],
label='UCB_p{}'.format(p), marker='+')
ax1_col = ax1[0].get_color()
plt.fill_between(batches, UCB_vals - 2 * UCB_stds, UCB_vals + 2 * UCB_stds, alpha=0.15, color=ax1_col)
ymax = max(ymax, UCB_vals + 2 * UCB_stds)
if len(CUCBB_vals) > 0:
ax1 = plt.plot(batches, CUCBB_vals, label='CUCB_BATCH_p{}'.format(p), marker='d')
ax1_col = ax1[0].get_color()
plt.fill_between(batches, CUCBB_vals - 2 * CUCBB_stds, CUCBB_vals + 2 * CUCBB_stds,
alpha=0.15, color=ax1_col)
ymax = max(ymax, np.max(CUCBB_vals + 2 * CUCBB_stds))
N = np.asscalar(np.max(batches))
plt.xlim([0, N])
plt.ylim([0, ymax])
plt.xlabel("Checkpoint")
plt.ylabel("Regret")
plt.legend()
return area
def plot_all2(results):
processed_results = {}
T = None
for m, model in enumerate(results):
batches = []
for p, AAA in model[1]:
if p not in processed_results.keys():
processed_results[p] = []
batches = []
UCB_vals = None
UCB_stds = None
CUCB1_vals = None
CUCB1_stds = None
CUCBB_vals = []
CUCBB_stds = []
for alg_name, mean_regret, std in AAA:
print(alg_name)
if alg_name == "UCB":
T = len(mean_regret)
UCB_vals = mean_regret[-1]
UCB_stds = std[-1]
elif alg_name == "CUCB-new-0.1-1":
CUCB1_vals = mean_regret[-1]
CUCB1_stds = std[-1]
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
else:
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
# area += CUCBB_vals - UCB_vals
CUCBB_vals = np.array(CUCBB_vals)
CUCBB_stds = np.array(CUCBB_stds)
processed_results[p].append((CUCBB_vals - UCB_vals).tolist())
for p in processed_results.keys():
vals = np.array(processed_results[p])
mean = np.mean(vals, axis=0)
std = np.std(vals, axis=0) / np.sqrt(vals.shape[0])
ax1 = plt.plot(batches, mean, label="p={}".format(p))
ax1_col = ax1[0].get_color()
plt.fill_between(batches, mean - 2 * std, mean + 2 * std, alpha=0.15, color=ax1_col)
plt.legend()
plt.xlabel("Checkpoint ($T$)")
plt.ylabel("R_(CUCB2)({}) - R_(UCB)({})".format(T, T))
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 1:
filename = 'COMP_20190825_033627_batch_results.pickle'
else:
filename = sys.argv[1]
SUMMARIZE = False
print("Opening file %s..." % filename)
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Done.\n")
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
EVERY = 10
LW = 2
if SUMMARIZE:
new_results = []
for m, model in enumerate(results):
BBB = []
for pos, algorithms in model[1]:
AAA = []
for alg_name, val in algorithms:
# print(m, pos, alg_name)
rep, T = val['cum_rewards'].shape
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
AAA.append((alg_name, mean_regret, std))
BBB.append((pos, AAA))
new_results.append((m, BBB, model[2]))
with open("COMP_{}".format(filename), "wb") as f:
pickle.dump(new_results, f)
else:
bad_model = None
max_area = -np.inf
print("Generating all figures ...")
plot_all2(results)
# for m, model in enumerate(results):
#
# area = plot_model(model, name=m)
# plt.savefig(os.path.join(folder, "model{}.png".format(m)))
# tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
# plt.close()
#
# if area > max_area:
# bad_model = m
# max_area = area
# print(max_area)
#
# plot_model(results[bad_model], name=bad_model)
# plt.savefig(os.path.join(folder, "worst_mab_exp.png"))
# plt.close()
# print("Done.\n")
worst_name = os.path.join(folder, "mab_batch.tex")
print("Saving worst model to %s ..." % worst_name)
tikzplotlib.save(worst_name)
print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s ..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
# n_models = len(results)
# n_batch = len(results[0][1]) - 1
# nb_simu, T = results[0][1][0][1]['regret'].shape
# batches = []
# q = 0.25
# regret_batch_at_T = np.zeros((n_models, n_batch, nb_simu))
# regret_UCB_T = np.zeros((n_models, 1, nb_simu))
# for m in range(n_models):
# res = results[m][1]
# for i, val in enumerate(res):
# alg_name = val[0]
# val = val[1]
# if alg_name == 'UCB':
# regret_UCB_T[m] = val['regret'][:, -1]
# else:
# alg_name[13::]
# batches.append(int(alg_name[13::]))
# regret_batch_at_T[m, i - 1, :] = val['regret'][:, -1]
#
# batches = np.array(batches)
# batches = batches / T
# regret_diff = regret_batch_at_T - regret_UCB_T
# mean_regret_diff = np.mean(regret_diff, axis=(0, 2))
# high_quantile = np.quantile(regret_diff, 1 - q, axis=(0, 2))
# low_quantile = np.quantile(regret_diff, q, axis=(0, 2))
# plt.plot(batches, mean_regret_diff, color='green')
# plt.fill_between(batches, low_quantile, high_quantile, alpha=0.15, color='green')
# plt.show()
| ContextualBanditsAttacks-main | examples/parse_batch_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 15:37:36 2019
@author: evrard
"""
filename = '20190829_104727_linear_PAR_linear_results.pickle'
import pickle
import numpy as np
import pylab as plt
with open(filename, 'rb') as f:
results = pickle.load(f)
n_models = len(results)
n_algos = len(results[0])
nb_simu,T = results[0][1][0][1]['regret'].shape
q = 0.25
t = np.linspace(1, T,T)
algorithms = {}
for alg_name, res in results[0][1] :
algorithms[alg_name] = {'regret' : np.zeros((n_models,nb_simu, T)),
'cum_rewards' : np.zeros((n_models,nb_simu, T)),
'norm_errors' : np.zeros((n_models,nb_simu, T))}
for m in range(n_models) :
res = results[m][1]
for i,val in enumerate(res) :
alg_name = val[0]
val = val[1]
algorithms[alg_name]['regret'][m,:,:] = val['regret']
algorithms[alg_name]['cum_rewards'][m,:,:] = val['cum_rewards']
algorithms[alg_name]['norm_errors'][m,:,:] = val['norm_errors']
plt.figure(figsize = (10,10))
for alg_name, res in algorithms.items() :
res['regret'] = res['regret'].cumsum(axis = 2)
mean_regret = np.mean(res['regret'], axis = (0,1))
low_quantile = np.quantile(res['regret'], q, axis = (0,1))
high_quantile = np.quantile(res['regret'], 1-q, axis = (0,1))
plt.plot(mean_regret, label = alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.legend()
| ContextualBanditsAttacks-main | examples/reader_linear.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
np.random.seed(1234)
# we want 500 for training, 100 for test for wach class
n = 500
def get_total(data):
data_x, data_y = [], []
for k, v in data.items():
for i in range(len(v)):
data_x.append(v[i])
data_y.append(k)
d = {}
d['images'] = data_x
d['labels'] = data_y
return d
# loading the pickled data
with open(os.path.join('../data/miniimagenet/data.pkl'), 'rb') as f:
data_dict = pickle.load(f)
data = data_dict['images']
labels = data_dict['labels']
# split data into classes, 600 images per class
class_dict = {}
for i in range(len(set(labels))):
class_dict[i] = []
for i in range(len(data)):
class_dict[labels[i]].append(data[i])
# Split data for each class to 500 and 100
x_train, x_test = {}, {}
for i in range(len(set(labels))):
np.random.shuffle(class_dict[i])
x_test[i] = class_dict[i][n:]
x_train[i] = class_dict[i][:n]
# mix the data
d_train = get_total(x_train)
d_test = get_total(x_test)
with open(os.path.join('../data/miniimagenet/train.pkl'), 'wb') as f:
pickle.dump(d_train, f)
with open(os.path.join('../data/miniimagenet/test.pkl'), 'wb') as f:
pickle.dump(d_test, f) | Adversarial-Continual-Learning-main | ACL-resnet/data/split_miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, time, os
import numpy as np
import torch
import copy
import utils
from copy import deepcopy
from tqdm import tqdm
sys.path.append('../')
from networks.discriminator import Discriminator
class ACL(object):
def __init__(self, model, args, network):
self.args=args
self.nepochs=args.nepochs
self.sbatch=args.batch_size
# optimizer & adaptive lr
self.e_lr=args.e_lr
self.d_lr=args.d_lr
if not args.experiment == 'multidatasets':
self.e_lr=[args.e_lr] * args.ntasks
self.d_lr=[args.d_lr] * args.ntasks
else:
self.e_lr = [self.args.lrs[i][1] for i in range(len(args.lrs))]
self.d_lr = [self.args.lrs[i][1]/10. for i in range(len(args.lrs))]
print ("d_lrs : ", self.d_lr)
self.lr_min=args.lr_min
self.lr_factor=args.lr_factor
self.lr_patience=args.lr_patience
self.samples=args.samples
self.device=args.device
self.checkpoint=args.checkpoint
self.adv_loss_reg=args.adv
self.diff_loss_reg=args.orth
self.s_steps=args.s_step
self.d_steps=args.d_step
self.diff=args.diff
self.network=network
self.inputsize=args.inputsize
self.taskcla=args.taskcla
self.num_tasks=args.ntasks
# Initialize generator and discriminator
self.model=model
self.discriminator=self.get_discriminator(0)
self.discriminator.get_size()
self.latent_dim=args.latent_dim
self.task_loss=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_d=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_s=torch.nn.CrossEntropyLoss().to(self.device)
self.diff_loss=DiffLoss().to(self.device)
self.optimizer_S=self.get_S_optimizer(0)
self.optimizer_D=self.get_D_optimizer(0)
self.use_memory = True if self.args.use_memory == 'yes' else False
self.task_encoded={}
self.mu=0.0
self.sigma=1.0
print()
def get_discriminator(self, task_id):
discriminator=Discriminator(self.args, task_id).to(self.args.device)
return discriminator
def get_S_optimizer(self, task_id, e_lr=None):
if e_lr is None: e_lr=self.e_lr[task_id]
optimizer_S=torch.optim.SGD(self.model.parameters(), momentum=self.args.mom,
weight_decay=self.args.e_wd, lr=e_lr)
return optimizer_S
def get_D_optimizer(self, task_id, d_lr=None):
if d_lr is None: d_lr=self.d_lr[task_id]
optimizer_D=torch.optim.SGD(self.discriminator.parameters(), weight_decay=self.args.d_wd, lr=d_lr)
return optimizer_D
def train(self, task_id, dataset):
if task_id > 0:
self.model = self.prepare_model(task_id)
self.discriminator=self.get_discriminator(task_id)
best_loss=np.inf
best_model=utils.get_model(self.model)
best_loss_d=np.inf
best_model_d=utils.get_model(self.discriminator)
dis_lr_update=True
d_lr=self.d_lr[task_id]
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
e_lr=self.e_lr[task_id]
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(dataset['train'], task_id)
clock1=time.time()
train_res=self.eval_(dataset['train'], task_id)
utils.report_tr(train_res, e, self.sbatch, clock0, clock1)
# lowering the learning rate in the beginning if it predicts random chance for the first 5 epochs
if (self.args.experiment == 'cifar100' or self.args.experiment == 'miniimagenet') and e == 4:
random_chance=20.
threshold=random_chance + 2
if train_res['acc_t'] < threshold:
# Restore best validation model
d_lr=self.d_lr[task_id] / 10.
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print("Performance on task {} is {} so Dis's lr is decreased to {}".format(task_id, train_res[
'acc_t'], d_lr), end=" ")
e_lr=self.e_lr[task_id] / 10.
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
self.discriminator=self.get_discriminator(task_id)
if task_id > 0:
self.model=self.load_checkpoint(task_id - 1)
else:
self.model=self.network.Net(self.args).to(self.args.device)
# Valid
valid_res=self.eval_(dataset['valid'], task_id)
utils.report_val(valid_res)
# Adapt lr for S and D
if valid_res['loss_tot'] < best_loss:
best_loss=valid_res['loss_tot']
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *', end='')
else:
patience-=1
if patience <= 0:
e_lr/=self.lr_factor
print(' lr={:.1e}'.format(e_lr), end='')
if e_lr < self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
if train_res['loss_a'] < best_loss_d:
best_loss_d=train_res['loss_a']
best_model_d=utils.get_model(self.discriminator)
patience_d=self.lr_patience
else:
patience_d-=1
if patience_d <= 0 and dis_lr_update:
d_lr/=self.lr_factor
print(' Dis lr={:.1e}'.format(d_lr))
if d_lr < self.lr_min:
dis_lr_update=False
print("Dis lr reached minimum value")
print()
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print()
# Restore best validation model (early-stopping)
self.model.load_state_dict(copy.deepcopy(best_model))
self.discriminator.load_state_dict(copy.deepcopy(best_model_d))
self.save_all_models(task_id)
def train_epoch(self, train_loader, task_id):
self.model.train()
self.discriminator.train()
for data, target, tt, td in train_loader:
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
if self.use_memory:
# Detaching samples in the batch which do not belong to the current task before feeding them to P
t_current = task_id * torch.ones_like(tt)
body_mask = torch.eq(t_current, tt).cpu().numpy()
# x_task_module=data.to(device=self.device)
x_task_module = data.clone()
for index in range(x.size(0)):
if body_mask[index] == 0:
x_task_module[index] = x_task_module[index].detach()
x_task_module = x_task_module.to(device=self.device)
# Discriminator's real and fake task labels
t_real_D=td.to(self.device)
t_fake_D=torch.zeros_like(t_real_D).to(self.device)
# ================================================================== #
# Train Shared Module #
# ================================================================== #
# training S for s_steps
for s_step in range(self.s_steps):
self.optimizer_S.zero_grad()
self.model.zero_grad()
if self.use_memory:
output=self.model(x, x_task_module, tt)
else:
output = self.model(x, x)
# task_loss=self.task_loss(output, y)
task_loss=self.task_loss(output['out'], y)
shared_out, private_out = output['shared'], output['private']
dis_out_gen_training=self.discriminator.forward(shared_out)
adv_loss=self.adversarial_loss_s(dis_out_gen_training, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
total_loss.backward(retain_graph=True)
self.optimizer_S.step()
# ================================================================== #
# Train Discriminator #
# ================================================================== #
# training discriminator for d_steps
for d_step in range(self.d_steps):
self.optimizer_D.zero_grad()
self.discriminator.zero_grad()
# training discriminator on real data
if self.use_memory:
output=self.model(x, x_task_module, tt)
else:
output = self.model(x, x)
# training discriminator on real data
shared_out, private_out = output['shared'], output['private']
dis_real_out=self.discriminator.forward(shared_out.detach())
dis_real_loss=self.adversarial_loss_d(dis_real_out, t_real_D)
if self.args.experiment == 'miniimagenet':
dis_real_loss*=self.args.adv
dis_real_loss.backward(retain_graph=True)
# training discriminator on fake data
z_fake=torch.as_tensor(np.random.normal(self.mu, self.sigma, (x.size(0), self.latent_dim)),
dtype=torch.float32, device=self.device)
dis_fake_out=self.discriminator.forward(z_fake)
dis_fake_loss=self.adversarial_loss_d(dis_fake_out, t_fake_D)
if self.args.experiment == 'miniimagenet':
dis_fake_loss*=self.args.adv
dis_fake_loss.backward(retain_graph=True)
self.optimizer_D.step()
return
def eval_(self, data_loader, task_id):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t = 0, 0
num=0
batch=0
self.model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output = self.model(x, x)
shared_out, private_out = output['shared'], output['private']
_, pred=output['out'].max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator(shared_out)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
# Loss values
task_loss=self.task_loss(output['out'], y)
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss = task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
#
def test(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t=0, 0
num=0
batch=0
model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
t_real_D=td.to(self.device)
# Forward
output = model(x, x)
# shared_out, private_out = self.model.get_encoded_ftrs(x, x)
shared_out, private_out = output['shared'], output['private']
_, pred=output['out'].max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator(shared_out)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
# Loss values
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
task_loss=self.task_loss(output['out'], y)
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
def load_model(self, task_id):
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the previous shared module with the current one
current_shared_module=deepcopy(self.model.shared.state_dict())
net.shared.load_state_dict(current_shared_module)
net=net.to(self.args.device)
return net
def load_checkpoint(self, task_id):
print("Loading checkpoint for task {} ...".format(task_id))
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
net=net.to(self.args.device)
return net
def prepare_model(self, task_id):
# Load a previous model and grab its shared module
old_net = self.load_checkpoint(task_id-1)
old_shared_module = old_net.shared.state_dict()
# Instantiate a new model and replace its shared module
model = self.network.Net(self.args)
model.shared.load_state_dict(old_shared_module)
model = model.to(self.device)
return model
def loader_size(self, data_loader):
return data_loader.dataset.__len__()
def get_tsne_embeddings_first_ten_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
model.eval()
tag_ = '_diff_{}'.format(self.args.diff)
all_images, all_shared, all_private = [], [], []
# Test final model on first 10 tasks:
writer = SummaryWriter()
for t in range(10):
for itr, (data, _, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
print (torch.stack(all_shared).size())
tag = ['Shared10_{}_{}'.format(tag_,i) for i in range(1,11)]
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#, metadata_header=list(range(1,11)))
tag = ['Private10_{}_{}'.format(tag_, i) for i in range(1, 11)]
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#,metadata_header=list(range(1,11)))
writer.close()
def get_tsne_embeddings_last_three_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
# Test final model on last 3 tasks:
model.eval()
tag = '_diff_{}'.format(self.args.diff)
for t in [17,18,19]:
all_images, all_labels, all_shared, all_private = [], [], [], []
writer = SummaryWriter()
for itr, (data, target, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
# print (shared_out.size())
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
all_labels.append(y)
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Shared_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Private_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.close()
def inference(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total = 0, 0, 0, 0
correct_d, correct_t = 0, 0
num = 0
batch = 0
model.eval()
self.discriminator.eval()
res = {}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
# Forward
output = model.forward(x, x, tt, task_id)
shared_out, task_out = model.get_encoded_ftrs(x, x, task_id)
_, pred = output.max(1)
correct_t += pred.eq(y.view_as(pred)).sum().item()
if self.diff == 'yes':
diff_loss = self.diff_loss(shared_out, task_out)
else:
diff_loss = torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg = 0
# Loss values
task_loss = self.task_loss(output, y)
total_loss = task_loss + self.diff_loss_reg * diff_loss
loss_t += task_loss
# loss_a += adv_loss
loss_d += diff_loss
loss_total += total_loss
num += x.size(0)
res['loss_t'], res['acc_t'] = loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_d'] = loss_d.item() / (batch + 1)
res['loss_tot'] = loss_total.item() / (batch + 1)
res['size'] = self.loader_size(data_loader)
return res
#
class DiffLoss(torch.nn.Module):
# From: Domain Separation Networks (https://arxiv.org/abs/1608.06019)
# Konstantinos Bousmalis, George Trigeorgis, Nathan Silberman, Dilip Krishnan, Dumitru Erhan
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, D1, D2):
D1=D1.view(D1.size(0), -1)
D1_norm=torch.norm(D1, p=2, dim=1, keepdim=True).detach()
D1_norm=D1.div(D1_norm.expand_as(D1) + 1e-6)
D2=D2.view(D2.size(0), -1)
D2_norm=torch.norm(D2, p=2, dim=1, keepdim=True).detach()
D2_norm=D2.div(D2_norm.expand_as(D2) + 1e-6)
# return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
| Adversarial-Continual-Learning-main | ACL-resnet/src/acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from copy import deepcopy
import pickle
import time
import uuid
from subprocess import call
########################################################################################################################
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def report_tr(res, e, sbatch, clock0, clock1):
# Training performance
print(
'| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train losses={:.3f} | T: loss={:.3f}, acc={:5.2f}% | D: loss={:.3f}, acc={:5.1f}%, '
'Diff loss:{:.3f} |'.format(
e + 1,
1000 * sbatch * (clock1 - clock0) / res['size'],
1000 * sbatch * (time.time() - clock1) / res['size'], res['loss_tot'],
res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
def report_val(res):
# Validation performance
print(' Valid losses={:.3f} | T: loss={:.6f}, acc={:5.2f}%, | D: loss={:.3f}, acc={:5.2f}%, Diff loss={:.3f} |'.format(
res['loss_tot'], res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
########################################################################################################################
def get_model(model):
return deepcopy(model.state_dict())
########################################################################################################################
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
########################################################################################################################
def save_print_log(taskcla, acc, lss, output_path):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
print ('ACC: {:5.4f}%'.format((np.mean(acc[acc.shape[0]-1,:]))))
print()
print ('BWD Transfer = ')
print ()
print ("Diagonal R_ii")
for i in range(acc.shape[0]):
print('\t',end='')
print('{:5.2f}% '.format(np.diag(acc)[i]), end=',')
print()
print ("Last row")
for i in range(acc.shape[0]):
print('\t', end=',')
print('{:5.2f}% '.format(acc[-1][i]), end=',')
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on our UCB paper (https://openreview.net/pdf?id=HklUCCVKDB)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
with open(os.path.join(output_path, 'logs.p'), 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", os.path.join(output_path, 'logs.p'))
def print_log_acc_bwt(taskcla, acc, lss, output_path, run_id):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
avg_acc = np.mean(acc[acc.shape[0]-1,:])
print ('ACC: {:5.4f}%'.format(avg_acc))
print()
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on UCB paper (https://arxiv.org/abs/1906.02425)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
path = os.path.join(output_path, 'logs_run_id_{}.p'.format(run_id))
with open(path, 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", path)
return avg_acc, gem_bwt
def print_running_acc_bwt(acc, task_num):
print()
acc = acc[:task_num+1,:task_num+1]
avg_acc = np.mean(acc[acc.shape[0] - 1, :])
gem_bwt = sum(acc[-1] - np.diag(acc)) / (len(acc[-1]) - 1)
print('ACC: {:5.4f}% || BWT: {:5.2f}% '.format(avg_acc, gem_bwt))
print()
def make_directories(args):
uid = uuid.uuid4().hex
if args.checkpoint is None:
os.mkdir('checkpoints')
args.checkpoint = os.path.join('./checkpoints/',uid)
os.mkdir(args.checkpoint)
else:
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
args.checkpoint = os.path.join(args.checkpoint, uid)
os.mkdir(args.checkpoint)
def some_sanity_checks(args):
# Making sure the chosen experiment matches with the number of tasks performed in the paper:
datasets_tasks = {}
datasets_tasks['mnist5']=[5]
datasets_tasks['pmnist']=[10,20,30,40]
datasets_tasks['cifar100']=[20]
datasets_tasks['miniimagenet']=[20]
datasets_tasks['multidatasets']=[5]
if not args.ntasks in datasets_tasks[args.experiment]:
raise Exception("Chosen number of tasks ({}) does not match with {} experiment".format(args.ntasks,args.experiment))
# Making sure if memory usage is happenning:
if args.use_memory == 'yes' and not args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
if args.use_memory == 'no' and args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
def save_code(args):
cwd = os.getcwd()
des = os.path.join(args.checkpoint, 'code') + '/'
if not os.path.exists(des):
os.mkdir(des)
def get_folder(folder):
return os.path.join(cwd,folder)
folders = [get_folder(item) for item in ['dataloaders', 'networks', 'configs', 'main.py', 'acl.py', 'utils.py']]
for folder in folders:
call('cp -rf {} {}'.format(folder, des),shell=True)
def print_time():
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("Job finished at =", dt_string)
| Adversarial-Continual-Learning-main | ACL-resnet/src/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import utils
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='Adversarial Continual Learning...')
# Load the config file
parser.add_argument('--config', type=str, default='./configs/config_mnist5.yml')
flags = parser.parse_args()
args = OmegaConf.load(flags.config)
print()
########################################################################################################################
# Args -- Experiment
if args.experiment=='pmnist':
from dataloaders import pmnist as datagenerator
elif args.experiment=='mnist5':
from dataloaders import mnist5 as datagenerator
elif args.experiment=='cifar100':
from dataloaders import cifar100 as datagenerator
elif args.experiment=='miniimagenet':
from dataloaders import miniimagenet as datagenerator
elif args.experiment=='multidatasets':
from dataloaders import mulitidatasets as datagenerator
else:
raise NotImplementedError
from acl import ACL as approach
# Args -- Network
if args.experiment == 'mnist5' or args.experiment == 'pmnist':
from networks import mlp_acl as network
elif args.experiment == 'cifar100' or args.experiment == 'miniimagenet' or args.experiment == 'multidatasets':
if args.arch == 'alexnet':
from networks import alexnet_acl as network
elif args.arch == 'resnet':
from networks import resnet_acl as network
else:
raise NotImplementedError
else:
raise NotImplementedError
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# Faster run but not deterministic:
# torch.backends.cudnn.benchmark = True
# To get deterministic results that match with paper at cost of lower speed:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
# Model
net = network.Net(args)
net = net.to(args.device)
net.print_model_size()
# print (net)
# Approach
appr=approach(net,args,network=network)
# Loop tasks
acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Train
appr.train(t,dataset[t])
print('-'*250)
print()
for u in range(t+1):
# Load previous model and replace the shared module with the current one
test_model = appr.load_model(u)
test_res = appr.test(dataset[u]['test'], u, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
test_res['loss_t'],
test_res['acc_t']))
acc[t, u] = test_res['acc_t']
lss[t, u] = test_res['loss_t']
# Save
print()
print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')
# Extract embeddings to plot in tensorboard for miniimagenet
if args.tsne == 'yes' and args.experiment == 'miniimagenet':
appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))
avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)
return avg_acc, gem_bwt
#######################################################################################################################
def main(args):
utils.make_directories(args)
utils.some_sanity_checks(args)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
accuracies, forgetting = [], []
for n in range(args.num_runs):
args.seed = n
args.output = '{}_{}_tasks_seed_{}.txt'.format(args.experiment, args.ntasks, args.seed)
print ("args.output: ", args.output)
print (" >>>> Run #", n)
acc, bwt = run(args, n)
accuracies.append(acc)
forgetting.append(bwt)
print('*' * 100)
print ("Average over {} runs: ".format(args.num_runs))
print ('AVG ACC: {:5.4f}% \pm {:5.4f}'.format(np.array(accuracies).mean(), np.array(accuracies).std()))
print ('AVG BWT: {:5.2f}% \pm {:5.4f}'.format(np.array(forgetting).mean(), np.array(forgetting).std()))
print ("All Done! ")
print('[Elapsed time = {:.1f} min]'.format((time.time()-tstart)/(60)))
utils.print_time()
def test_trained_model(args, final_model_id):
args.seed = 0
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
def get_model(final_model_id, test_data_id):
# Load the test model
test_net = network.Net(args)
checkpoint_test = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(test_data_id)))
test_net.load_state_dict(checkpoint_test['model_state_dict'])
# Load your final trained model
net = network.Net(args)
checkpoint = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(final_model_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the shared module with the final model's shared module
final_shared = deepcopy(net.shared.state_dict())
test_net.shared.load_state_dict(final_shared)
test_net = test_net.to(args.device)
return test_net
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Model
test_model = get_model(final_model_id, test_data_id=t)
# Approach
appr = approach(test_model, args, network=network)
# Test
test_res = appr.inference(dataset[t]['test'], t, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.4f}% <<<'.format(t, dataset[t]['name'],
test_res['loss_t'],
test_res['acc_t']))
#######################################################################################################################
if __name__ == '__main__':
main(args)
# test_trained_model(args, final_model_id=4) | Adversarial-Continual-Learning-main | ACL-resnet/src/main.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from utils import *
class iCIFAR10(datasets.CIFAR10):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iCIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform, download=True)
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
if self.train:
train_data = []
train_labels = []
train_tt = [] # task module labels
train_td = [] # disctiminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
train_data.append(self.data[i])
train_labels.append(self.class_mapping[self.targets[i]])
train_tt.append(task_num)
train_td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
train_data.append(memory[task_id]['x'][i])
train_labels.append(memory[task_id]['y'][i])
train_tt.append(memory[task_id]['tt'][i])
train_td.append(memory[task_id]['td'][i])
self.train_data = np.array(train_data)
self.train_labels = train_labels
self.train_tt = train_tt
self.train_td = train_td
if not self.train:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
test_data = []
test_labels = []
test_tt = [] # task module labels
test_td = [] # disctiminator labels
for i in range(len(self.test_data)):
if self.test_labels[i] in classes:
test_data.append(self.test_data[i])
test_labels.append(self.class_mapping[self.test_labels[i]])
test_tt.append(task_num)
test_td.append(task_num + 1)
self.class_indices[self.class_mapping[self.test_labels[i]]].append(i)
self.test_data = np.array(test_data)
self.test_labels = test_labels
self.test_tt = test_tt
self.test_td = test_td
def __getitem__(self, index):
if self.train:
img, target, tt, td = self.train_data[index], self.train_labels[index], self.train_tt[index], self.train_td[index]
else:
img, target, tt, td = self.test_data[index], self.test_labels[index], self.test_tt[index], self.test_td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None:
img = self.transform(img)
except:
pass
try:
if self.target_transform is not None:
target = self.target_transform(target)
except:
pass
return img, target, tt, td
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
class iCIFAR100(iCIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,32,32]
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True, download=True, transform=self.transformation)
self.test_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'CIFAR100-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
| Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/cifar100.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/pytorch/vision/blob/8635be94d1216f10fb8302da89233bd86445e449/torchvision/datasets/utils.py
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import numpy as np
import torch
import codecs
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x | Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.