python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import random
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.utilities.seed import seed_everything
from src.lightning.data_modules.receptacle_data_module import \
ReceptacleDataModule
from src.lightning.modules.linear_probe_module import LinearProbeModule
from src.lightning.custom_callbacks import ConfusionLogger, ReceptacleImagePredictionLogger
class LinearProbeTrainer(object):
def __init__(self, conf):
self.conf = conf
seed_everything(self.conf.seed)
def run(self):
# Init our data pipeline
dm = ReceptacleDataModule(self.conf.batch_size, self.conf.data_path, self.conf.task)
# To access the x_dataloader we need to call prepare_data and setup.
dm.prepare_data()
dm.setup()
# Init our model
model = LinearProbeModule(self.conf)
wandb_logger = WandbLogger(project=self.conf.project_name,
name=self.conf.experiment_name,
job_type='train')
# defining callbacks
checkpoint_callback = ModelCheckpoint(dirpath=self.conf.checkpoint_path,
filename='model/model-{epoch}-{val_acc:.2f}',
verbose=True,
monitor='val_loss',
mode='min',
every_n_val_epochs=5)
learning_rate_callback = LearningRateMonitor(logging_interval='epoch')
input_callback = ReceptacleImagePredictionLogger()
confusion_callback = ConfusionLogger(self.conf.classes)
# set up the trainer
trainer = pl.Trainer(max_epochs=self.conf.epochs,
check_val_every_n_epoch=5,
progress_bar_refresh_rate=self.conf.progress_bar_refresh_rate,
gpus=self.conf.gpus,
logger=wandb_logger,
callbacks=[learning_rate_callback,
checkpoint_callback,
input_callback,
confusion_callback],
checkpoint_callback=True)
# Train the model
trainer.fit(model, dm)
# Evaluate the model on the held out test set
trainer.test()
# Close wandb run
wandb.finish()
| CSR-main | src/lightning/trainers/linear_probe_trainer.py |
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import wandb
from src.models.backbones import FeatureLearner
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchmetrics import Accuracy, ConfusionMatrix
class ReceptacleModule(pl.LightningModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
self.encoder = FeatureLearner(
in_channels=conf.in_channels,
channel_width=conf.channel_width,
pretrained=conf.pretrained,
num_classes=conf.num_classes,
backbone_str=conf.backbone)
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
self.val_confmat = ConfusionMatrix(num_classes=len(self.conf.classes))
self.test_confmat = ConfusionMatrix(num_classes=len(self.conf.classes))
self.val_misclass = {}
self.save_hyperparameters()
# will be used during inference
def forward(self, x):
return self.encoder(x)
# logic for a single training step
def training_step(self, batch, batch_idx):
x_dict, target = batch
x = torch.cat((x_dict['image'], x_dict['mask_1'], x_dict['mask_2']), 1)
pred = self(x)
loss = F.cross_entropy(pred, target)
# training metrics
acc = self.train_acc(torch.argmax(pred, dim=1), target)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
return loss
# logic for a single validation step
def validation_step(self, batch, batch_idx):
x_dict, target = batch
x = torch.cat((x_dict['image'], x_dict['mask_1'], x_dict['mask_2']), 1)
pred = self(x)
loss = F.cross_entropy(pred, target)
# training metrics
flat_preds = torch.argmax(pred, dim=1)
acc = self.val_acc(flat_preds, target)
self.val_confmat(flat_preds, target)
misclass_indicator = flat_preds != target
indices = torch.arange(x.shape[0])
self.val_misclass[batch_idx] = [indices[misclass_indicator],
flat_preds[misclass_indicator], target[misclass_indicator]]
self.log('val_loss', loss, on_epoch=True)
self.log('val_acc', acc, on_epoch=True)
return loss
# logic for a single testing step
def test_step(self, batch, batch_idx):
x_dict, target = batch
x = torch.cat((x_dict['image'], x_dict['mask_1'], x_dict['mask_2']), 1)
pred = self(x)
loss = F.cross_entropy(pred, target)
# validation metrics
flat_preds = torch.argmax(pred, dim=1)
acc = self.test_acc(flat_preds, target)
self.test_confmat(flat_preds, target)
self.log('test_loss', loss, prog_bar=True)
self.log('test_acc', acc, prog_bar=True)
return loss
def configure_optimizers(self):
optimizer = None
if self.conf.optimizer == 'SGD':
optimizer = SGD(self.parameters(), lr=self.conf.lr,
momentum=0.9, weight_decay=self.conf.weight_decay)
elif self.conf.optimizer == 'Adam':
optimizer = Adam(self.parameters(), lr=self.conf.lr,
weight_decay=self.conf.weight_decay)
else:
raise NotImplemented('Optimizer not supported, need to add it.')
scheduler = None
if self.conf.scheduler == 'CosineAnnealingLR':
scheduler = CosineAnnealingLR(
optimizer, T_max=self.conf.epochs, last_epoch=-1)
else:
raise NotImplemented('Scheduler not supported, need to add it.')
lr_scheduler = {'scheduler': scheduler, 'monitor': 'val_acc'}
return [optimizer], [lr_scheduler]
| CSR-main | src/lightning/modules/receptacle_module.py |
CSR-main | src/lightning/modules/__init__.py |
|
import os
from src.lightning.modules.sim_siam_module import SimSiamModule
from src.lightning.modules.moco2_module_old import MocoV2
from src.shared.utils import check_none_or_empty, load_lightning_inference, load_lightning_train
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import wandb
from src.models.backbones import FeatureLearner, FeedForward
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchmetrics import Accuracy, ConfusionMatrix
from torch.nn import Linear, modules
class LinearProbeModule(pl.LightningModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
assert conf.num_classes != 0
module = None
self.encoder = None
if not check_none_or_empty(conf.load_path) and os.path.exists(conf.load_path):
if conf.module == 'MocoV2':
module = None
if self.conf.freeze:
assert False
module = load_lightning_inference(conf.load_path, MocoV2)
else:
module = load_lightning_train(conf.load_path, MocoV2)
self.encoder = module.encoder_q[0]
elif conf.module == 'FeatureLearner':
state_dict = torch.load(conf.load_path)['state_dict']
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.resnet'):# and not k.startswith('encoder.resnet.fc'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
module = MocoV2(
in_channels=conf.in_channels,
channel_width=conf.channel_width,
pretrained=conf.pretrained,
backbone_str=conf.backbone)
module.encoder_q[0].load_state_dict(state_dict)
if conf.freeze:
module.eval()
module.freeze()
self.encoder = module.encoder_q[0]
# self.encoder = FeatureLearner(in_channels=conf.in_channels)
# self.encoder.load_state_dict(state_dict)
# if conf.freeze:
# self.encoder.eval()
else:
raise ValueError('Unsupported module type')
else:
if conf.pretrained:
print('[WARNING]: using ImageNet features')
else:
print('[WARNING]: using random features')
module = MocoV2(
in_channels=conf.in_channels,
channel_width=conf.channel_width,
pretrained=conf.pretrained,
backbone_str=conf.backbone)
if conf.freeze:
module.eval()
module.freeze()
self.encoder = module.encoder_q[0]
self.linear = Linear(512, conf.num_classes)
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
self.val_confmat = ConfusionMatrix(num_classes=len(self.conf.classes))
self.test_confmat = ConfusionMatrix(num_classes=len(self.conf.classes))
self.val_misclass = {}
self.save_hyperparameters()
# will be used during inference
def forward(self, x):
return self.linear(self.encoder(x))
# logic for a single training step
def training_step(self, batch, batch_idx):
x_dict, target = batch
x = torch.cat((x_dict['image'], x_dict['mask_1'], x_dict['mask_2']), 1)
pred = self(x)
loss = F.cross_entropy(pred, target)
# training metrics
acc = self.train_acc(torch.argmax(pred, dim=1), target)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
return loss
# logic for a single validation step
def validation_step(self, batch, batch_idx):
x_dict, target = batch
x = torch.cat((x_dict['image'], x_dict['mask_1'], x_dict['mask_2']), 1)
pred = self(x)
loss = F.cross_entropy(pred, target)
# training metrics
flat_preds = torch.argmax(pred, dim=1)
acc = self.val_acc(flat_preds, target)
self.val_confmat(flat_preds, target)
misclass_indicator = flat_preds != target
indices = torch.arange(x.shape[0])
self.val_misclass[batch_idx] = [indices[misclass_indicator],
flat_preds[misclass_indicator], target[misclass_indicator]]
self.log('val_loss', loss, on_epoch=True)
self.log('val_acc', acc, on_epoch=True)
return loss
# logic for a single testing step
def test_step(self, batch, batch_idx):
x_dict, target = batch
x = torch.cat((x_dict['image'], x_dict['mask_1'], x_dict['mask_2']), 1)
pred = self(x)
loss = F.cross_entropy(pred, target)
# validation metrics
flat_preds = torch.argmax(pred, dim=1)
acc = self.test_acc(flat_preds, target)
self.test_confmat(flat_preds, target)
self.log('test_loss', loss, prog_bar=True)
self.log('test_acc', acc, prog_bar=True)
return loss
def configure_optimizers(self):
optimizer = None
if self.conf.optimizer == 'SGD':
optimizer = SGD(self.parameters(), lr=self.conf.lr,
momentum=0.9, weight_decay=self.conf.weight_decay)
elif self.conf.optimizer == 'Adam':
optimizer = Adam(self.parameters(), lr=self.conf.lr,
weight_decay=self.conf.weight_decay)
else:
raise NotImplemented('Optimizer not supported, need to add it.')
scheduler = None
if self.conf.scheduler == 'CosineAnnealingLR':
scheduler = CosineAnnealingLR(
optimizer, T_max=self.conf.epochs, last_epoch=-1)
else:
raise NotImplemented('Scheduler not supported, need to add it.')
lr_scheduler = {'scheduler': scheduler, 'monitor': 'val_acc'}
return [optimizer], [lr_scheduler]
| CSR-main | src/lightning/modules/linear_probe_module.py |
"""
Adapted from: https://github.com/facebookresearch/moco
Original work is: Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
This implementation is: Copyright (c) PyTorch Lightning, Inc. and its affiliates. All Rights Reserved
This implementation is licensed under Attribution-NonCommercial 4.0 International;
You may not use this file except in compliance with the License.
You may obtain a copy of the License from the LICENSE file present in this folder.
"""
from argparse import ArgumentParser
from typing import Union
import pytorch_lightning as pl
import torch
import torchvision
from torchmetrics.functional import accuracy
from src.models.backbones import FeatureLearner, FeedForward
from src.shared.utils import my_shuffle_evaluate
from torch import nn
from torch.nn import functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR
class MocoV2(pl.LightningModule):
"""
PyTorch Lightning implementation of `Moco <https://arxiv.org/abs/2003.04297>`_
Paper authors: Xinlei Chen, Haoqi Fan, Ross Girshick, Kaiming He.
Code adapted from `facebookresearch/moco <https://github.com/facebookresearch/moco>`_ to Lightning by:
- `William Falcon <https://github.com/williamFalcon>`_
Further modifications by:
- `Samir Gadre <https://github.com/sagadre>`_
"""
def __init__(
self,
in_channels=5,
emb_dim: int = 512,
num_negatives: int = 1024, # 2048, # 8192, #16384, 65536,
encoder_momentum: float = 0.999,
softmax_temperature: float = 0.07,
learning_rate: float = 0.03,
momentum: float = 0.9,
weight_decay: float = 1e-4,
data_dir: str = './',
batch_size: int = 512,
use_mlp: bool = True,
num_workers: int = 8,
*args,
**kwargs
):
"""
Args:
base_encoder: torchvision model name or torch.nn.Module
emb_dim: feature dimension (default: 512)
num_negatives: queue size; number of negative keys (default: 65536)
encoder_momentum: moco momentum of updating key encoder (default: 0.999)
softmax_temperature: softmax temperature (default: 0.07)
learning_rate: the learning rate
momentum: optimizer momentum
weight_decay: optimizer weight decay
datamodule: the DataModule (train, val, test dataloaders)
data_dir: the directory to store data
batch_size: batch size
use_mlp: add an mlp to the encoders
num_workers: workers for the loaders
"""
super().__init__()
self.save_hyperparameters()
# create the encoders
# num_classes is the output fc dimension
self.emb_dim = emb_dim
self.encoder_q, self.encoder_k = self.init_encoders()
# if use_mlp: # hack: brute-force replacement
# dim_mlp = self.hparams.emb_dim
# self.encoder_q.fc = nn.Sequential(
# nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
# self.encoder_k.fc = nn.Sequential(
# nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue_edge", torch.randn(emb_dim, num_negatives))
self.queue_edge = nn.functional.normalize(self.queue_edge, dim=0)
self.register_buffer("queue_node", torch.randn(emb_dim, num_negatives))
self.queue_node = nn.functional.normalize(self.queue_node, dim=0)
self.register_buffer(
"queue_edge_ptr", torch.zeros(1, dtype=torch.long))
self.register_buffer(
"queue_node_ptr", torch.zeros(1, dtype=torch.long))
def init_encoders(self):
"""
Override to add your own encoders
"""
backbone_q = FeatureLearner(
in_channels=5,
channel_width=64,
pretrained=False,
num_classes=self.hparams.emb_dim,
backbone_str='resnet18')
backbone_k = FeatureLearner(
in_channels=5,
channel_width=64,
pretrained=False,
num_classes=self.hparams.emb_dim,
backbone_str='resnet18')
projection_q = FeedForward(
[self.emb_dim, self.emb_dim//2, self.emb_dim])
projection_k = FeedForward(
[self.emb_dim, self.emb_dim//2, self.emb_dim])
encoder_q = nn.Sequential(backbone_q, projection_q)
encoder_k = nn.Sequential(backbone_k, projection_k)
return encoder_q, encoder_k
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
em = self.hparams.encoder_momentum
param_k.data = param_k.data * em + param_q.data * (1. - em)
@torch.no_grad()
def _dequeue_and_enqueue(self, k_node, k_edge):
# gather keys before updating queue
if self.trainer.use_ddp or self.trainer.use_ddp2:
k_node = concat_all_gather(k_node)
k_edge = concat_all_gather(k_edge)
batch_size_node = k_node.shape[0]
batch_size_edge = k_edge.shape[0]
ptr_node = int(self.queue_node_ptr)
ptr_edge = int(self.queue_edge_ptr)
# if self.hparams.num_negatives % batch_size != 0:
# assert self.hparams.num_negatives % batch_size == 0 # for simplicity
if batch_size_node != 0:
# replace the keys at ptr (dequeue and enqueue)
if ptr_node + batch_size_node > self.hparams.num_negatives:
margin = self.hparams.num_negatives - ptr_node
self.queue_node[:, ptr_node:] = k_node.T[:, :margin]
self.queue_node[:, :batch_size_node -
margin] = k_node.T[:, margin:batch_size_node]
else:
self.queue_node[:, ptr_node:ptr_node +
batch_size_node] = k_node.T
# move pointer
ptr_node = (
ptr_node + batch_size_node) % self.hparams.num_negatives
self.queue_node_ptr[0] = ptr_node
if batch_size_edge != 0:
if ptr_edge + batch_size_edge > self.hparams.num_negatives:
margin = self.hparams.num_negatives - ptr_edge
self.queue_edge[:, ptr_edge:] = k_edge.T[:, :margin]
self.queue_edge[:, :batch_size_edge -
margin] = k_edge.T[:, margin:batch_size_edge]
else:
self.queue_edge[:, ptr_edge:ptr_edge +
batch_size_edge] = k_edge.T
# move pointer
ptr_edge = (
ptr_edge + batch_size_edge) % self.hparams.num_negatives
self.queue_edge_ptr[0] = ptr_edge
@torch.no_grad()
def _batch_shuffle_ddp(self, k): # pragma: no cover
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = k.shape[0]
k_gather = concat_all_gather(k)
batch_size_all = k_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return k_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle): # pragma: no cover
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self,
img_q,
img_k,
is_self_feature,
# queue_identifier,
update_queue=True):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(img_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
if update_queue:
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
if self.trainer.use_ddp or self.trainer.use_ddp2:
img_k, idx_unshuffle = self._batch_shuffle_ddp(img_k)
k = self.encoder_k(img_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
if self.trainer.use_ddp or self.trainer.use_ddp2:
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# split keys and queries into two streams for edge and self features
k_node = k[is_self_feature]
q_node = q[is_self_feature]
k_edge = k[~is_self_feature]
q_edge = q[~is_self_feature]
logits_nodes, labels_nodes, logits_edges, labels_edges = None, None, None, None
if k_node.shape[0] != 0:
l_pos_nodes = torch.einsum(
'nc,nc->n', [q_node, k_node]).unsqueeze(-1)
l_neg_nodes = torch.einsum(
'nc,ck->nk', [q_node, self.queue_node.clone().detach()])
logits_nodes = torch.cat([l_pos_nodes, l_neg_nodes], dim=1) / \
self.hparams.softmax_temperature
labels_nodes = torch.zeros(
logits_nodes.shape[0], dtype=torch.long).type_as(logits_nodes)
if k_edge.shape[0] != 0:
l_pos_edges = torch.einsum(
'nc,nc->n', [q_edge, k_edge]).unsqueeze(-1)
l_neg_edges = torch.einsum(
'nc,ck->nk', [q_edge, self.queue_edge.clone().detach()])
logits_edges = torch.cat([l_pos_edges, l_neg_edges], dim=1) / \
self.hparams.softmax_temperature
labels_edges = torch.zeros(
logits_edges.shape[0], dtype=torch.long).type_as(logits_edges)
# dequeue and enqueue
if update_queue:
self._dequeue_and_enqueue(k_node, k_edge)
return logits_nodes, labels_nodes, logits_edges, labels_edges
def training_step(self, batch, batch_idx):
return self._step_helper(batch, batch_idx, True)
def validation_step(self, batch, batch_idx):
return self._step_helper(batch, batch_idx, False)
def validation_epoch_end(self, outputs):
def mean(res, key):
# recursive mean for multilevel dicts
return torch.stack([x[key] if isinstance(x, dict) else mean(x, key) for x in res]).mean()
log = {}
for k in outputs[0]:
log[k] = mean(outputs, k)
self.log_dict(log)
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay
)
scheduler = CosineAnnealingLR(
optimizer, T_max=100, last_epoch=-1)
lr_scheduler = {'scheduler': scheduler, 'monitor': 'val_acc'}
return [optimizer], [lr_scheduler]
def _step_helper(self, batch, batch_idx, is_train):
prefix = 'val'
if is_train:
prefix = 'train'
q_dict, k_dict = batch
img_q = torch.cat(
(q_dict['image'], q_dict['mask_1'], q_dict['mask_2']), 1)
img_k = torch.cat(
(k_dict['image'], k_dict['mask_1'], k_dict['mask_2']), 1)
logits_node, labels_node, logits_edge, labels_edge = self(
img_q=img_q,
img_k=img_k,
is_self_feature=q_dict['is_self_feature'],
# queue_identifier=q_dict['queue_identifier'],
update_queue=is_train)
loss_node = torch.tensor(13., device=self.device)
loss_edge = torch.tensor(13., device=self.device)
acc1_node = torch.tensor(0., device=self.device)
acc5_node = torch.tensor(0., device=self.device)
acc1_edge = torch.tensor(0., device=self.device)
acc5_edge = torch.tensor(0., device=self.device)
if logits_node is not None:
loss_node = F.cross_entropy(
logits_node.float(), labels_node.long())
dist = F.softmax(logits_node, 1).detach()
target = labels_node.long().detach()
acc1_node = accuracy(
dist, target, top_k=1)
acc5_node = accuracy(
dist, target, top_k=5)
if logits_edge is not None:
loss_edge = F.cross_entropy(
logits_edge.float(), labels_edge.long())
dist = F.softmax(logits_edge, 1)
target = labels_edge.long().detach()
acc1_edge = accuracy(
dist, target, top_k=1)
acc5_edge = accuracy(
dist, target, top_k=5)
loss_total = loss_node + loss_edge
log = {f'{prefix}_loss': loss_total, # NOTE: DO NOT CHANGE THIS KEY IT IS USED FOR MONITOR
f'{prefix}_loss_node': loss_node,
f'{prefix}_loss_edge': loss_edge,
f'{prefix}_acc1_node': acc1_node,
f'{prefix}_acc5_node': acc5_node,
f'{prefix}_acc1_edge': acc1_edge,
f'{prefix}_acc5_edge': acc5_edge}
if is_train:
self.log_dict(log)
return loss_total
# case where we are taking a val step, return a dict for agg
return log
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor) for _ in range(
torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| CSR-main | src/lightning/modules/moco2_module.py |
"""
Adapted from: https://github.com/facebookresearch/moco
Original work is: Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
This implementation is: Copyright (c) PyTorch Lightning, Inc. and its affiliates. All Rights Reserved
This implementation is licensed under Attribution-NonCommercial 4.0 International;
You may not use this file except in compliance with the License.
You may obtain a copy of the License from the LICENSE file present in this folder.
"""
from argparse import ArgumentParser
from typing import Union
import pytorch_lightning as pl
import torch
import torchvision
from torchmetrics.functional import accuracy
from src.models.backbones import FeatureLearner, FeedForward
from src.shared.utils import my_shuffle_evaluate
from torch import nn
from torch.nn import functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR
class MocoV2(pl.LightningModule):
"""
PyTorch Lightning implementation of `Moco <https://arxiv.org/abs/2003.04297>`_
Paper authors: Xinlei Chen, Haoqi Fan, Ross Girshick, Kaiming He.
Code adapted from `facebookresearch/moco <https://github.com/facebookresearch/moco>`_ to Lightning by:
- `William Falcon <https://github.com/williamFalcon>`_
Further modifications by:
- `Samir Gadre <https://github.com/sagadre>`_
"""
def __init__(
self,
in_channels=5,
emb_dim: int = 512,
num_negatives: int = 1024, # 2048, # 8192, #16384, 65536,
encoder_momentum: float = 0.999,
softmax_temperature: float = 0.07,
learning_rate: float = 0.03,
momentum: float = 0.9,
weight_decay: float = 1e-4,
data_dir: str = './',
batch_size: int = 512,
use_mlp: bool = True,
num_workers: int = 8,
*args,
**kwargs
):
"""
Args:
base_encoder: torchvision model name or torch.nn.Module
emb_dim: feature dimension (default: 512)
num_negatives: queue size; number of negative keys (default: 65536)
encoder_momentum: moco momentum of updating key encoder (default: 0.999)
softmax_temperature: softmax temperature (default: 0.07)
learning_rate: the learning rate
momentum: optimizer momentum
weight_decay: optimizer weight decay
datamodule: the DataModule (train, val, test dataloaders)
data_dir: the directory to store data
batch_size: batch size
use_mlp: add an mlp to the encoders
num_workers: workers for the loaders
"""
super().__init__()
self.save_hyperparameters()
# create the encoders
# num_classes is the output fc dimension
self.emb_dim = emb_dim
self.encoder_q, self.encoder_k = self.init_encoders()
# if use_mlp: # hack: brute-force replacement
# dim_mlp = self.hparams.emb_dim
# self.encoder_q.fc = nn.Sequential(
# nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
# self.encoder_k.fc = nn.Sequential(
# nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(emb_dim, num_negatives))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def init_encoders(self):
"""
Override to add your own encoders
"""
backbone_q = FeatureLearner(
in_channels=5,
channel_width=64,
pretrained=False,
num_classes=self.hparams.emb_dim,
backbone_str='resnet18')
backbone_k = FeatureLearner(
in_channels=5,
channel_width=64,
pretrained=False,
num_classes=self.hparams.emb_dim,
backbone_str='resnet18')
projection_q = FeedForward(
[self.emb_dim, self.emb_dim//2, self.emb_dim])
projection_k = FeedForward(
[self.emb_dim, self.emb_dim//2, self.emb_dim])
encoder_q = nn.Sequential(backbone_q, projection_q)
encoder_k = nn.Sequential(backbone_k, projection_k)
return encoder_q, encoder_k
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
em = self.hparams.encoder_momentum
param_k.data = param_k.data * em + param_q.data * (1. - em)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
if self.trainer.use_ddp or self.trainer.use_ddp2:
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
if self.hparams.num_negatives % batch_size != 0:
assert self.hparams.num_negatives % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.hparams.num_negatives # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, k, shuffle_q, hard_q, hard_k, re_k): # pragma: no cover
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = k.shape[0]
k_gather = concat_all_gather(k)
hard_q_gather_this = None
hard_k_gather_this = None
re_k_gather_this = None
shuffle_q_gather_this = None
batch_size_all = k_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
if hard_q is not None:
hard_q_gather_this = concat_all_gather(hard_q)[idx_this]
if hard_k is not None:
hard_k_gather_this = concat_all_gather(hard_k)[idx_this]
if re_k is not None:
re_k_gather_this = concat_all_gather(re_k)[idx_this]
if shuffle_q is not None:
shuffle_q_gather_this = concat_all_gather(shuffle_q)[idx_this]
return k_gather[idx_this], shuffle_q_gather_this, hard_q_gather_this, hard_k_gather_this, re_k_gather_this, idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle): # pragma: no cover
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self,
img_q,
img_k,
shuffle_img_q=None,
shuffle_img_q_idx=None,
hard_q=None,
hard_q_idx=None,
hard_k=None,
hard_k_idx=None,
re_k=None,
re_k_idx=None,
update_queue=True):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(img_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
if update_queue:
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
if self.trainer.use_ddp or self.trainer.use_ddp2:
img_k, shuffle_img_q, hard_q, hard_k, re_k, idx_unshuffle = self._batch_shuffle_ddp(
img_k, shuffle_img_q, hard_q, hard_k, re_k)
k = self.encoder_k(img_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
shuffle_q = self.encoder_k(shuffle_img_q)
shuffle_q = nn.functional.normalize(shuffle_q, dim=1)
h_q = None
# h_k = None
# r_k = None
if hard_q is not None:
# h_q = self.encoder_k(hard_q.view(-1, 5, 224, 224))
h_q = self.encoder_k(hard_q)
h_q = nn.functional.normalize(h_q, dim=1)
# h_q = h_q.view(k.shape[0], -1, self.emb_dim)
# if hard_k is not None:
# h_k = self.encoder_k(hard_k.view(-1, 5, 224, 224))
# h_k = nn.functional.normalize(h_k, dim=1)
# h_k = h_k.view(k.shape[0], -1, self.emb_dim)
# if re_k is not None:
# r_k = self.encoder_k(re_k.view(-1, 5, 224, 224))
# r_k = nn.functional.normalize(re_k, dim=1)
# r_k = r_k.view(k.shape[0], -1, self.emb_dim)
# undo shuffle
if self.trainer.use_ddp or self.trainer.use_ddp2:
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
shuffle_q = self._batch_unshuffle_ddp(shuffle_q, idx_unshuffle)
if h_q is not None:
h_q = self._batch_unshuffle_ddp(h_q, idx_unshuffle)
# if h_k is not None:
# h_k = self._batch_unshuffle_ddp(h_k, idx_unshuffle)
# if r_k is not None:
# r_k = self._batch_unshuffle_ddp(r_k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
l_neg_shuffle = torch.einsum('nc,nc->n', [q, shuffle_q]).unsqueeze(-1)
# l_neg_h_q = torch.einsum('nc,nkc->nk', [q, h_q])
l_neg_h_q = torch.einsum('nc,nc->n', [q, h_q]).unsqueeze(-1)
# l_neg_h_k = torch.einsum('nc,nkc->nk', [q, h_k])
# logits: Nx(1+K) with temperature applied
logits = torch.cat([l_pos, l_neg], dim=1) / \
self.hparams.softmax_temperature
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).type_as(logits)
logits_h_q = torch.cat([l_pos, l_neg_h_q], dim=1) / \
self.hparams.softmax_temperature
logits_shuffle = torch.cat([l_pos, l_neg_shuffle], dim=1) / \
self.hparams.softmax_temperature
# logits_h_k = torch.cat([l_pos, l_neg_h_k], dim=1) / \
# self.hparams.softmax_temperature
labels_h_q = torch.zeros(
logits_h_q.shape[0], dtype=torch.long).type_as(logits)
# labels_h_k = torch.zeros(
# logits_h_k.shape[0], dtype=torch.long).type_as(logits)
labels_shuffle = torch.zeros(
logits_shuffle.shape[0], dtype=torch.long).type_as(logits)
# dequeue and enqueue
if update_queue:
self._dequeue_and_enqueue(k)
# , logits_h_q, labels_h_q, logits_h_k, labels_h_k
return logits, labels, logits_shuffle, labels_shuffle, logits_h_q, labels_h_q
def training_step(self, batch, batch_idx):
return self._step_helper(batch, batch_idx, True)
def validation_step(self, batch, batch_idx):
return self._step_helper(batch, batch_idx, False)
def validation_epoch_end(self, outputs):
def mean(res, key):
# recursive mean for multilevel dicts
return torch.stack([x[key] if isinstance(x, dict) else mean(x, key) for x in res]).mean()
log = {}
for k in outputs[0]:
log[k] = mean(outputs, k)
self.log_dict(log)
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay
)
scheduler = CosineAnnealingLR(
optimizer, T_max=100, last_epoch=-1)
lr_scheduler = {'scheduler': scheduler, 'monitor': 'val_acc'}
return [optimizer], [lr_scheduler]
def _step_helper(self, batch, batch_idx, is_train):
prefix = 'val'
if is_train:
prefix = 'train'
q_dict, k_dict = batch
img_q = torch.cat(
(q_dict['image'], q_dict['mask_1'], q_dict['mask_2']), 1)
img_k = torch.cat(
(k_dict['image'], k_dict['mask_1'], k_dict['mask_2']), 1)
shuffle_img_q = torch.cat(
(q_dict['shuffle_image'], q_dict['shuffle_mask_1'], q_dict['shuffle_mask_2']), 1)
in_frame_negatives = torch.cat(
(q_dict['image'], q_dict['in_frame_negative_mask_1'], q_dict['in_frame_negative_mask_2']), 1)
logits, labels, logits_shuffle, labels_shuffle, logits_h_q, labels_h_q = self(
img_q=img_q,
img_k=img_k,
shuffle_img_q=shuffle_img_q,
hard_q=in_frame_negatives,
# hard_q_idx=q_dict['padding_in_frame_negatives'],
# hard_k=k_dict['in_frame_negatives'],
# hard_k_idx=k_dict['padding_in_frame_negatives'],
update_queue=is_train)
loss_con = F.cross_entropy(logits.float(), labels.long())
dist = F.softmax(logits, 1).detach()
target = labels.long().detach()
acc1_con = accuracy(
dist, target, top_k=1)
acc5_con = accuracy(
dist, target, top_k=5)
# loss_h_q = F.cross_entropy(
# logits_h_q.float(), labels_h_q.long())
# acc1_h_q, _ = precision_at_k(
# logits_h_q, labels_h_q, top_k=(1, 5))
# loss_h_k = F.cross_entropy(
# logits_h_k.float(), labels_h_k.long())
# acc1_h_k, _ = precision_at_k(
# logits_h_k, labels_h_k, top_k=(1, 5))
shuffle_loss_mask = q_dict['has_shuffle_negative'] > 0.5
in_frame_negative_loss_mask = q_dict['has_in_frame_negative'] > 0.5
loss_shuffle = torch.tensor(13, device=self.device)
loss_in_frame_negative = torch.tensor(13, device=self.device)
if torch.any(shuffle_loss_mask):
# NOTE: this term considers a different number of terms, should be ok as reduction='mean'
# by default. Might want to scale down this loss as less examples so probably trust
# the gradients less
loss_shuffle = F.cross_entropy(
logits_shuffle[shuffle_loss_mask].float(), labels_shuffle[shuffle_loss_mask].long())
if torch.any(in_frame_negative_loss_mask):
loss_in_frame_negative = F.cross_entropy(
logits_h_q[in_frame_negative_loss_mask].float(), labels_h_q[in_frame_negative_loss_mask].long())
# acc1_shuffle, _ = precision_at_k(
# logits_shuffle, labels_shuffle, top_k=(1, 5))
loss_total = loss_con + 0.5 * loss_in_frame_negative # + loss_shuffle # + loss_h_q + loss_h_k
log = {f'{prefix}_loss_con': loss_con,
f'{prefix}_acc1_con': acc1_con,
f'{prefix}_acc5_con': acc5_con,
f'{prefix}_loss_h_q': loss_in_frame_negative,
# f'{prefix}_acc1_h_q': acc1_h_q,
# f'{prefix}_loss_h_k': loss_h_k,
# f'{prefix}_acc1_h_k': acc1_h_k,
f'{prefix}_loss_shuffle': loss_shuffle,
# f'{prefix}_acc1_shuffle': acc1_shuffle,
f'{prefix}_loss': loss_total}
if is_train:
self.log_dict(log)
return loss_total
# case where we are taking a val step, return a dict for agg
return log
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor) for _ in range(
torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| CSR-main | src/lightning/modules/moco2_module_old.py |
from src.shared.utils import worker_init_fn
import pytorch_lightning as pl
import src.dataloaders.augmentations as A
from src.dataloaders.contrastive_dataset import ContrastiveDataset
from src.dataloaders.contrastive_dataset_old import ContrastiveDatasetOld
from src.dataloaders.contrastive_dataset_object import ContrastiveDatasetObject
from src.shared.constants import (COLOR_JITTER_BRIGHTNESS,
COLOR_JITTER_CONTRAST, COLOR_JITTER_HUE,
COLOR_JITTER_SATURATION, DEFAULT_NUM_WORKERS,
GRAYSCALE_PROBABILITY, NORMALIZE_RGB_MEAN,
NORMALIZE_RGB_STD)
from src.shared.data_split import DataSplit
from torch.utils.data import DataLoader
class ContrastiveDataModule(pl.LightningDataModule):
def __init__(self, batch_size, data_dir, train_object_representation, use_old_dataset=False):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.train_object_representation = train_object_representation
self.use_old_dataset = use_old_dataset
def prepare_data(self):
pass
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
D = None
if self.use_old_dataset:
D = ContrastiveDatasetOld
else:
if self.train_object_representation:
D = ContrastiveDatasetObject
else:
D = ContrastiveDataset
if stage == 'fit' or stage is None:
self.train_set = D(
self.data_dir, A.TrainTransform, DataSplit.TRAIN)
self.val_set = D(
self.data_dir, A.TestTransform, DataSplit.VAL)
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.test_set = D(
self.data_dir, A.TestTransform, DataSplit.TEST)
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
def val_dataloader(self):
return DataLoader(self.val_set, batch_size=self.batch_size, shuffle=False, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
def test_dataloader(self):
return DataLoader(self.val_set, batch_size=self.batch_size, shuffle=False, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, worker_init_fn=worker_init_fn)
| CSR-main | src/lightning/data_modules/contrastive_data_module.py |
CSR-main | src/lightning/data_modules/__init__.py |
|
from src.dataloaders.pickupable_dataset import PickupableDataset
from src.shared.utils import worker_init_fn
import pytorch_lightning as pl
import src.dataloaders.augmentations as A
from src.dataloaders.contrastive_dataset import ContrastiveDataset
from src.shared.constants import (COLOR_JITTER_BRIGHTNESS,
COLOR_JITTER_CONTRAST, COLOR_JITTER_HUE,
COLOR_JITTER_SATURATION, DEFAULT_NUM_WORKERS,
GRAYSCALE_PROBABILITY, NORMALIZE_RGB_MEAN,
NORMALIZE_RGB_STD)
from src.shared.data_split import DataSplit
from torch.utils.data import DataLoader
class PickupableDataModule(pl.LightningDataModule):
def __init__(self, batch_size, data_dir):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
def prepare_data(self):
pass
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
self.train_set = PickupableDataset(
self.data_dir, A.TrainTransform, DataSplit.TRAIN)
self.val_set = PickupableDataset(
self.data_dir, A.TestTransform, DataSplit.VAL)
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.test_set = PickupableDataset(
self.data_dir, A.TestTransform, DataSplit.VAL)
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
def val_dataloader(self):
return DataLoader(self.val_set, batch_size=self.batch_size, shuffle=False, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
def test_dataloader(self):
return DataLoader(self.test_set, batch_size=self.batch_size, shuffle=False, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, worker_init_fn=worker_init_fn)
| CSR-main | src/lightning/data_modules/pickupable_data_module.py |
import pytorch_lightning as pl
import src.dataloaders.augmentations as A
from src.dataloaders.receptacle_dataset import ReceptacleDataset
from src.shared.constants import (COLOR_JITTER_BRIGHTNESS,
COLOR_JITTER_CONTRAST, COLOR_JITTER_HUE,
COLOR_JITTER_SATURATION, DEFAULT_NUM_WORKERS,
GRAYSCALE_PROBABILITY, NORMALIZE_RGB_MEAN,
NORMALIZE_RGB_STD, ROTATIONS)
from src.shared.data_split import DataSplit
from torch.utils.data import DataLoader
class ReceptacleDataModule(pl.LightningDataModule):
def __init__(self, batch_size, data_dir, task, drop_last=True):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.drop_last = drop_last
self.task = task
def prepare_data(self):
pass
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
self.train_set = ReceptacleDataset(
self.data_dir, A.TrainTransform, DataSplit.TRAIN, task=self.task)
self.val_set = ReceptacleDataset(
self.data_dir, A.TestTransform, DataSplit.VAL, task=self.task)
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.test_set = ReceptacleDataset(
self.data_dir, A.TestTransform, DataSplit.TEST, task=self.task)
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=self.drop_last)
def val_dataloader(self):
return DataLoader(self.val_set, batch_size=self.batch_size, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=self.drop_last)
def test_dataloader(self):
return DataLoader(self.test_set, batch_size=self.batch_size, num_workers=DEFAULT_NUM_WORKERS, pin_memory=True, drop_last=self.drop_last)
| CSR-main | src/lightning/data_modules/receptacle_data_module.py |
import itertools
import json
import os
import random
import numpy as np
import torch
from PIL import Image
from src.shared.constants import CLASSES_TO_IGNORE, DATALOADER_BOX_FRAC_THRESHOLD, IMAGE_SIZE
from src.shared.data_split import DataSplit
from src.shared.utils import get_box
from torch.utils.data import Dataset
from torchvision import transforms as T
class ContrastiveDatasetOld(Dataset):
def __init__(self, root_dir, transform, data_split: DataSplit, max_in_frame_negatives=2, relational=False, query_negatives=False, key_negatives=False, shuffle_pickup_only=True):
# set the root directory
self.root_dir = root_dir
self.shuffle_pickup_only = shuffle_pickup_only
self.toTensor = T.ToTensor()
self.relational = relational
# we are going to mine hard negatives from the same frame (different relationships)
# becase the number of such examples can be really large and differ for different
# frames we set max_in_frame_negatives, which will cap the number of these kinds
# of negatives sampled for each input data point
self.max_in_frame_negatives = max_in_frame_negatives
# set the dataset root, this is dependent on whether we are loading train or test data
self.labels_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}.json')
self.boxes_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}_boxes.json')
self.boxes_shuffle_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}_boxes_shuffle.json')
# save the data augmentations that are to be applied to the images
self.transform = transform
assert self.transform is not None
# load all of the ground truth actions into memory
self.data_refs = None
with open(self.labels_filepath) as f:
self.data_refs = json.load(f)
self.boxes = None
with open(self.boxes_filepath) as f:
self.boxes = json.load(f)
self.boxes_shuffle = None
with open(self.boxes_shuffle_filepath) as f:
self.boxes_shuffle = json.load(f)
self.relations = []
self.data = []
self.edge_histogram = {}
self.class_histogram = {}
c = 0
s = set()
for r in self.data_refs:
s.add(r['first_name'])
s.add(r['second_name'])
self.__set_fixed_dataset(self.data_refs, self.relational)
if self.relational:
print(f'dataset size: {len(self.relations)}')
else:
print(f'dataset size: {len(self.data)}')
def __set_fixed_dataset(self, data_refs, relational):
positives = {}
self.class_histogram = {}
for i, entry in enumerate(data_refs):
part_1 = entry['first_name']
part_2 = entry['second_name']
room_id = entry['room_id']
traj_id = entry['trajectory_id']
timestep = entry['timestep']
has_shuffle_negatives = entry['has_shuffle_negatives']
if part_1 == part_2:
assert not has_shuffle_negatives
if has_shuffle_negatives and self.shuffle_pickup_only:
with open(f'{self.root_dir}/{room_id}_{traj_id}_{timestep}_shuffle.txt', 'r') as f:
contents = f.readline()
if 'open' in contents:
has_shuffle_negatives = False
entry['has_shuffle_negatives'] = False
c_1 = part_1.split('_')[0]
c_2 = part_2.split('_')[0]
# do some area filtering in the dataset
top, bottom = self.boxes[f'{room_id}_{traj_id}_{entry["timestep"]}'][part_1]
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < DATALOADER_BOX_FRAC_THRESHOLD:
continue
top, bottom = self.boxes[f'{room_id}_{traj_id}_{entry["timestep"]}'][part_2]
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < DATALOADER_BOX_FRAC_THRESHOLD:
continue
hit_ignore_class = False
for c in CLASSES_TO_IGNORE:
if c in c_1 or c in c_2:
hit_ignore_class = True
break
if hit_ignore_class:
continue
if c_1 in self.class_histogram:
self.class_histogram[c_1] += 1
else:
self.class_histogram[c_1] = 1
if c_2 in self.class_histogram:
self.class_histogram[c_2] += 1
else:
self.class_histogram[c_2] = 1
key = f'{part_1},{part_2},{room_id},{traj_id}'
class_key = f'{c_1},{c_2}'
if key in positives:
positives[key].append((i, has_shuffle_negatives, class_key))
else:
positives[key] = [(i, has_shuffle_negatives, class_key)]
self.relations = []
self.data = []
self.edge_histogram = {}
for p in positives:
# if len(positives[p]) == 1 and not positives[p][0][1]:
# continue
if len(positives[p]) < 2:
continue
w_sh = [e[0] for e in positives[p] if e[1]]
wo_sh = [e[0] for e in positives[p] if not e[1]]
# prioritize samples with rearangement negatives
positive_pairs = list(itertools.product(w_sh, w_sh))
np.random.shuffle(positive_pairs)
positive_negative_pairs = list(itertools.product(w_sh, wo_sh))
np.random.shuffle(positive_negative_pairs)
negative_positive_pairs = list(itertools.product(wo_sh, w_sh))
np.random.shuffle(negative_positive_pairs)
negative_pairs = list(itertools.product(wo_sh, wo_sh))
np.random.shuffle(negative_pairs)
prelim = positive_pairs + positive_negative_pairs + \
negative_positive_pairs + negative_pairs
tmp = []
for t in prelim:
if t[0] != t[1]:
tmp.append(t)
assert len(tmp) == len(set(tmp))
if relational:
self.relations.append(tmp)
else:
s = tmp
if positives[p][0][2] in self.edge_histogram:
self.edge_histogram[positives[p][0][2]] += len(s)
else:
self.edge_histogram[positives[p][0][2]] = len(s)
self.data += s
def __len__(self):
if self.relational:
return len(self.relations)
return len(self.data)
def __getitem__(self, idx):
# randomly get a on example, under example, or unrelated example
key1, key2 = None, None
if self.relational:
key1, key2 = random.choice(self.relations[idx])
else:
key1, key2 = self.data[idx]
lookup_pair = [self.data_refs[key1], self.data_refs[key2]]
data_pair = []
for i in range(2):
entry = lookup_pair[0]
# load image
room_id = entry['room_id']
trajectory_id = entry['trajectory_id']
timestep = entry['timestep']
first_object = entry['first_name']
second_object = entry['second_name']
im = Image.open(os.path.join(
self.root_dir, f'{room_id}_{trajectory_id}_{timestep}.png'))
# load masks
m1 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][first_object])
m2 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][second_object])
data = {'mask_1': m1, 'mask_2': m2, 'image': im, 'room_id': room_id,
'trajectory_id': trajectory_id, 'timestep': timestep, 'self': int(first_object==second_object)}
if entry['has_shuffle_negatives']:
data['shuffle_image'] = Image.open(os.path.join(
self.root_dir, f'{room_id}_{trajectory_id}_{timestep}_shuffle.png'))
data['shuffle_mask_1'] = get_box(
self.boxes_shuffle[f'{room_id}_{trajectory_id}_{timestep}'][first_object])
data['shuffle_mask_2'] = get_box(
self.boxes_shuffle[f'{room_id}_{trajectory_id}_{timestep}'][second_object])
data['has_shuffle_negative'] = 1
else:
data['shuffle_image'] = im.copy()
data['shuffle_mask_1'] = torch.zeros(1, IMAGE_SIZE, IMAGE_SIZE)
data['shuffle_mask_2'] = torch.zeros(1, IMAGE_SIZE, IMAGE_SIZE)
data['has_shuffle_negative'] = 0
if len(entry['in_frame_negatives']) != 0:
negative_choice = entry['in_frame_negatives'][np.random.randint(
0, high=len(entry['in_frame_negatives']))]
data['in_frame_negative_mask_1'] = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][negative_choice[0]])
data['in_frame_negative_mask_2'] = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][negative_choice[1]])
data['has_in_frame_negative'] = 1
else:
data['in_frame_negative_mask_1'] = torch.zeros(
1, IMAGE_SIZE, IMAGE_SIZE)
data['in_frame_negative_mask_2'] = torch.zeros(
1, IMAGE_SIZE, IMAGE_SIZE)
data['has_in_frame_negative'] = 0
self.transform(data)
data_pair.append(data)
# create dict and return
return data_pair[0], data_pair[1]
| CSR-main | src/dataloaders/contrastive_dataset_old.py |
from genericpath import exists
import json
import os
import torch
import torch
from src.shared.data_split import DataSplit
from torch.utils.data import Dataset
class AverageFeatureDataset(Dataset):
def __init__(self, root_dir, data_split: DataSplit):
# set the root directory
self.feat_dir = os.path.join(root_dir, f'{data_split.name.lower()}_avg_feat')
# set the dataset root, this is dependent on whether we are loading train or test data
self.data_split = data_split
subset_labels_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}_subset.json')
self.data = None
with open(subset_labels_filepath, 'r') as f:
self.data = json.load(f)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
# randomly get a on example, under example, or unrelated example
entry = self.data[idx]
# get the label
label = entry['receptacle']
# load image
room_id = entry['room_id']
trajectory_id = entry['trajectory_id']
first_object = entry['first_name']
second_object = entry['second_name']
data = torch.load(os.path.join(self.feat_dir, f'{first_object}_{second_object}_{room_id}_{trajectory_id}.pt'), map_location='cpu')
# create dict and return
return data, label
| CSR-main | src/dataloaders/average_feature_dataset.py |
import itertools
import json
import os
import random
import numpy as np
import torch
from PIL import Image
from src.shared.constants import CLASSES_TO_IGNORE, DATALOADER_BOX_FRAC_THRESHOLD, IMAGE_SIZE
from src.shared.data_split import DataSplit
from src.shared.utils import get_box
from torch.utils.data import Dataset
from torchvision import transforms as T
class ContrastiveDataset(Dataset):
def __init__(self, root_dir, transform, data_split: DataSplit, balance_class=False, balance_instance=False, balance_self=False):
# set the root directory
self.root_dir = root_dir
# set the dataset root, this is dependent on whether we are loading train or test data
self.labels_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}.json')
self.boxes_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}_boxes.json')
# save the data augmentations that are to be applied to the images
self.transform = transform
assert self.transform is not None
# load all of the ground truth actions into memory
self.data_refs = None
with open(self.labels_filepath) as f:
self.data_refs = json.load(f)
self.boxes = None
with open(self.boxes_filepath) as f:
self.boxes = json.load(f)
# self.data = []
self.nested_dict_node = {}
self.nested_dict_edge = {}
self.__set_fixed_dataset(self.data_refs)
# print(f'dataset size: {len(self.data)}')
def __set_fixed_dataset(self, data_refs):
nested_dict_node = {}
nested_dict_edge = {}
for i, entry in enumerate(data_refs):
name_1 = entry['first_name']
name_2 = entry['second_name']
room_id = entry['room_id']
traj_id = entry['trajectory_id']
c_1 = name_1.split('_')[0]
c_2 = name_2.split('_')[0]
# do some area filtering in the dataset
top, bottom = self.boxes[f'{room_id}_{traj_id}_{entry["timestep"]}'][name_1]
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < DATALOADER_BOX_FRAC_THRESHOLD:
continue
top, bottom = self.boxes[f'{room_id}_{traj_id}_{entry["timestep"]}'][name_2]
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < DATALOADER_BOX_FRAC_THRESHOLD:
continue
hit_ignore_class = False
for c in CLASSES_TO_IGNORE:
if c in c_1 or c in c_2:
hit_ignore_class = True
break
if hit_ignore_class:
continue
instance_key = None
class_key = f'{c_1},{c_2}'
if name_1 == name_2:
instance_key = f'{name_1},{name_2},{room_id},{traj_id}' # name_1
if class_key in nested_dict_node:
if instance_key in nested_dict_node[class_key]:
nested_dict_node[class_key][instance_key].add(i)
else:
nested_dict_node[class_key][instance_key] = set([i])
else:
nested_dict_node[class_key] = {}
nested_dict_node[class_key][instance_key] = set([i])
else:
instance_key = f'{name_1},{name_2},{room_id},{traj_id}'
if class_key in nested_dict_edge:
if instance_key in nested_dict_edge[class_key]:
nested_dict_edge[class_key][instance_key].add(i)
else:
nested_dict_edge[class_key][instance_key] = set([i])
else:
nested_dict_edge[class_key] = {}
nested_dict_edge[class_key][instance_key] = set([i])
for c in nested_dict_node:
keys_to_del = []
for inst in nested_dict_node[c]:
if len(nested_dict_node[c][inst]) < 2:
keys_to_del.append(inst)
else:
nested_dict_node[c][inst] = list(
itertools.permutations(nested_dict_node[c][inst], 2))
for k in keys_to_del:
del nested_dict_node[c][k]
for c in nested_dict_edge:
keys_to_del = []
for inst in nested_dict_edge[c]:
if len(nested_dict_edge[c][inst]) < 2:
keys_to_del.append(inst)
else:
nested_dict_edge[c][inst] = list(
itertools.permutations(nested_dict_edge[c][inst], 2))
for k in keys_to_del:
del nested_dict_edge[c][k]
keys_to_del = []
for c in nested_dict_node:
if len(nested_dict_node[c]) == 0:
keys_to_del.append(c)
for k in keys_to_del:
del nested_dict_node[k]
keys_to_del = []
for c in nested_dict_edge:
if len(nested_dict_edge[c]) == 0:
keys_to_del.append(c)
for k in keys_to_del:
del nested_dict_edge[k]
self.nested_dict_node = nested_dict_node
self.nested_dict_edge = nested_dict_edge
def __len__(self):
return 800000
def __getitem__(self, idx):
# randomly get a on example, under example, or unrelated example
data_bank = None
if idx % 2 == 0:
data_bank = self.nested_dict_edge
else:
data_bank = self.nested_dict_node
# sample class
sampled_class = random.choice(list(data_bank.keys()))
# sample instance
sampled_instance = random.choice(list(data_bank[sampled_class].keys()))
# sample pair
key1, key2 = random.choice(list(data_bank[sampled_class][sampled_instance]))
lookup_pair = [self.data_refs[key1], self.data_refs[key2]]
data_pair = []
for i in range(2):
entry = lookup_pair[i]
# load image
room_id = entry['room_id']
trajectory_id = entry['trajectory_id']
timestep = entry['timestep']
first_object = entry['first_name']
second_object = entry['second_name']
im = Image.open(os.path.join(
self.root_dir, f'{room_id}_{trajectory_id}_{timestep}.png'))
# load masks
m1 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][first_object])
m2 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][second_object])
is_self_feature = first_object == second_object
queue_identifier = None
if is_self_feature:
queue_identifier = abs(hash(first_object)) % (10 ** 8)
else:
queue_identifier = abs(
hash(f'{first_object},{second_object},{room_id},{trajectory_id}')) % (10 ** 8)
data = {'mask_1': m1, 'mask_2': m2, 'image': im, 'room_id': room_id,
'trajectory_id': trajectory_id, 'timestep': timestep,
'is_self_feature': is_self_feature,
'queue_identifier': queue_identifier}
self.transform(data)
data_pair.append(data)
# create dict and return
return data_pair[0], data_pair[1]
| CSR-main | src/dataloaders/contrastive_dataset.py |
import json
import os
import random
import shutil
from typing import Any, Dict, List, cast
import numpy as np
import src.dataloaders.augmentations as A
from src.shared.constants import IMAGE_SIZE
from src.shared.utils import compute_3d_dist
from src.simulation.constants import ROOMR_CONTROLLER_COMMIT_ID
from src.simulation.environment import (RearrangeTaskSpec,
RearrangeTHOREnvironment)
from src.simulation.rearrange_utils import load_rearrange_data_from_path
from tqdm import tqdm
def get_waypoint(env, ip, reachable, name_to_meta):
event = env.controller.step(
action="GetInteractablePoses",
objectId=ip["objectId"],
positions=reachable,
horizons=[-30, 0, 30],
rotations=[0, 90, 180, 270],
standings=[True]
)
obj_loc = name_to_meta[ip["name"]]["position"]
possible_waypoints = event.metadata['actionReturn']
if len(possible_waypoints) == 0:
return None
dists = [compute_3d_dist(obj_loc, w) for w in possible_waypoints]
return possible_waypoints[np.argmin(dists)]
def get_rearrange_task_spec(roomr_data, floor_plan, index, stage):
rearrangement_args = roomr_data[floor_plan][index]
task_spec = RearrangeTaskSpec(scene=rearrangement_args['scene'],
agent_position=rearrangement_args['agent_position'],
agent_rotation=rearrangement_args['agent_rotation'],
openable_data=rearrangement_args['openable_data'],
starting_poses=rearrangement_args['starting_poses'],
target_poses=rearrangement_args['target_poses'],
stage=stage,
runtime_sample=stage == 'train',)
return task_spec
def find_meta(roomr_dirpath='/home/samirg/datasets/roomr/', stage='train', dump_dirpath='/home/samirg/datasets/roomr_meta2'):
data = load_rearrange_data_from_path(
stage, roomr_dirpath)
if not os.path.exists(dump_dirpath):
os.mkdir(dump_dirpath)
meta_filepath = os.path.join(dump_dirpath, f'{stage}.json')
env = RearrangeTHOREnvironment(force_cache_reset=stage != 'train', controller_kwargs={
'commit_id': ROOMR_CONTROLLER_COMMIT_ID,
'height': IMAGE_SIZE,
'width': IMAGE_SIZE,
'visibilityDistance': 1.5,
'rotateStepDegrees': 90,
'quality': "Very Low"})
moved_dict = {}
for scene_name in tqdm(data):
for num, rearrangement_args in enumerate(data[scene_name]):
assert num == rearrangement_args['index']
room_instance = f'{scene_name}_{num}'
moved_dict[room_instance] = {'objects': {}}
task_spec = RearrangeTaskSpec(scene=rearrangement_args['scene'],
agent_position=rearrangement_args['agent_position'],
agent_rotation=rearrangement_args['agent_rotation'],
openable_data=rearrangement_args['openable_data'],
starting_poses=rearrangement_args['starting_poses'],
target_poses=rearrangement_args['target_poses'],
stage=stage,
runtime_sample=stage == 'train',)
# scene description that we are trying to recover
env.reset(task_spec)
walkthrough_reachable = env.controller.step(
"GetReachablePositions").metadata["actionReturn"]
walkthrough_name_to_meta = {
e['name']: e for e in env.controller.last_event.metadata['objects']}
walkthrough_id_to_name = {e['objectId']: e['name']
for e in env.controller.last_event.metadata['objects']}
env.shuffle()
unshuffle_reachable = env.controller.step(
"GetReachablePositions").metadata["actionReturn"]
unshuffle_name_to_meta = {
e['name']: e for e in env.controller.last_event.metadata['objects']}
unshuffle_id_to_name = {e['objectId']: e['name']
for e in env.controller.last_event.metadata['objects']}
ips, gps, cps = env.poses
pose_diffs = cast(
List[Dict[str, Any]], env.compare_poses(
goal_pose=gps, cur_pose=cps)
)
# positions that are reachable before and after the shuffle
reachable = [
x for x in walkthrough_reachable if x in unshuffle_reachable]
# gt what has moved
pose_indices = []
for i in range(len(pose_diffs)):
shuffled_object_detected = False
if pose_diffs[i]['iou'] is not None and pose_diffs[i]['iou'] < 0.5:
from_receptacle = None
to_receptacle = None
shuffled_object_detected = True
if walkthrough_name_to_meta[ips[i]["name"]]['parentReceptacles'] is not None:
from_receptacle = [walkthrough_id_to_name[e]
for e in walkthrough_name_to_meta[ips[i]["name"]]['parentReceptacles']]
else:
print(
f'warning! no from receptacle for {ips[i]["name"]} in {room_instance}')
if unshuffle_name_to_meta[ips[i]["name"]]['parentReceptacles'] is not None:
to_receptacle = [unshuffle_id_to_name[e]
for e in unshuffle_name_to_meta[ips[i]["name"]]['parentReceptacles']]
else:
print(
f'warning! no to receptacle for {ips[i]["name"]} in {room_instance}')
moved_dict[room_instance]['objects'][ips[i]["name"]] = {
"has_opened": False}
moved_dict[room_instance]['objects'][ips[i]
["name"]]["from"] = from_receptacle
moved_dict[room_instance]['objects'][ips[i]
["name"]]["to"] = to_receptacle
moved_dict[room_instance]['objects'][ips[i]["name"]
]["position_dist"] = pose_diffs[i]["position_dist"]
moved_dict[room_instance]['objects'][ips[i]["name"]
]["rotation_dist"] = pose_diffs[i]["rotation_dist"]
if pose_diffs[i]['openness_diff'] is not None and pose_diffs[i]['openness_diff'] >= 0.2:
shuffled_object_detected = True
moved_dict[room_instance]['objects'][ips[i]["name"]] = {
"has_opened": True}
moved_dict[room_instance]['objects'][ips[i]["name"]
]["openness_diff"] = pose_diffs[i]["openness_diff"]
if shuffled_object_detected:
waypoint = get_waypoint(
env, ips[i], reachable, unshuffle_name_to_meta)
moved_dict[room_instance]['objects'][ips[i]
["name"]]['unshuffle_waypoint'] = waypoint
pose_indices.append(i)
moved_dict[room_instance]["position_diff_count"] = rearrangement_args['position_diff_count']
moved_dict[room_instance]["open_diff_count"] = rearrangement_args['open_diff_count']
# kinda a hack, but reset and then find the walkthrough waypoints
env.reset(task_spec)
for i in pose_indices:
waypoint = get_waypoint(
env, gps[i], reachable, walkthrough_name_to_meta)
moved_dict[room_instance]['objects'][gps[i]
["name"]]['walkthrough_waypoint'] = waypoint
# if stage != 'train':
# assert rearrangement_args['position_diff_count'] + rearrangement_args['open_diff_count'] == len(moved_dict[room_instance]['objects'])
with open(meta_filepath, 'w') as f:
json.dump(moved_dict, f, indent=4)
def find_waypoint_plan(start_location, instance_data, has_shuffled):
def are_same(p1, p2, tol=0.001):
sub_keys = ['x', 'y', 'z', 'rotation', 'horizon']
for k in sub_keys:
if abs(p1[k] - p2[k]) > tol:
return False
return p1['standing'] == p2['standing']
# given a sequency of waypoints, compute a reasonable sequence to visit them
all_points = []
for k in instance_data['objects']:
if instance_data['objects'][k]['walkthrough_waypoint'] is not None and instance_data['objects'][k]['unshuffle_waypoint'] is not None:
walkthrough_waypoint = instance_data['objects'][k]['walkthrough_waypoint']
unshuffle_waypoint = instance_data['objects'][k]['unshuffle_waypoint']
if len(all_points):
if not any([are_same(p, walkthrough_waypoint) for p in all_points]):
all_points.append(walkthrough_waypoint)
else:
all_points.append(walkthrough_waypoint)
if len(all_points):
if not any([are_same(p, unshuffle_waypoint) for p in all_points]):
all_points.append(unshuffle_waypoint)
else:
all_points.append(unshuffle_waypoint)
sequence = []
# greedy algo to determine a sequence of waypoints
while len(all_points) != 0:
dists = [compute_3d_dist(start_location, w) for w in all_points]
sequence.append(all_points[np.argmin(dists)])
all_points.remove(all_points[np.argmin(dists)])
if has_shuffled:
# random.shuffle(sequence)
sequence = sequence[::-1]
return sequence
| CSR-main | src/dataloaders/roomr_dataset_utils.py |
import itertools
import json
import os
import random
import numpy as np
import torch
from PIL import Image
from src.shared.constants import CLASSES_TO_IGNORE, DATALOADER_BOX_FRAC_THRESHOLD, IMAGE_SIZE
from src.shared.data_split import DataSplit
from src.shared.utils import get_box
from torch.utils.data import Dataset
from torchvision import transforms as T
class ContrastiveDatasetObject(Dataset):
def __init__(self, root_dir, transform, data_split: DataSplit, balance_class=False, balance_instance=False, balance_self=False):
# set the root directory
self.root_dir = root_dir
# set the dataset root, this is dependent on whether we are loading train or test data
self.labels_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}.json')
self.boxes_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}_boxes.json')
# save the data augmentations that are to be applied to the images
self.transform = transform
assert self.transform is not None
# load all of the ground truth actions into memory
self.data_refs = None
with open(self.labels_filepath) as f:
self.data_refs = json.load(f)
self.boxes = None
with open(self.boxes_filepath) as f:
self.boxes = json.load(f)
# self.data = []
self.nested_dict_node = {}
self.__set_fixed_dataset(self.data_refs)
# print(f'dataset size: {len(self.data)}')
def __set_fixed_dataset(self, data_refs):
nested_dict_node = {}
for i, entry in enumerate(data_refs):
name_1 = entry['first_name']
name_2 = entry['second_name']
room_id = entry['room_id']
traj_id = entry['trajectory_id']
c_1 = name_1.split('_')[0]
c_2 = name_2.split('_')[0]
# do some area filtering in the dataset
top, bottom = self.boxes[f'{room_id}_{traj_id}_{entry["timestep"]}'][name_1]
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < DATALOADER_BOX_FRAC_THRESHOLD:
continue
top, bottom = self.boxes[f'{room_id}_{traj_id}_{entry["timestep"]}'][name_2]
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < DATALOADER_BOX_FRAC_THRESHOLD:
continue
hit_ignore_class = False
for c in CLASSES_TO_IGNORE:
if c in c_1 or c in c_2:
hit_ignore_class = True
break
if hit_ignore_class:
continue
instance_key = None
class_key = f'{c_1},{c_2}'
if name_1 == name_2:
instance_key = name_1
if class_key in nested_dict_node:
if instance_key in nested_dict_node[class_key]:
nested_dict_node[class_key][instance_key].add(i)
else:
nested_dict_node[class_key][instance_key] = set([i])
else:
nested_dict_node[class_key] = {}
nested_dict_node[class_key][instance_key] = set([i])
for c in nested_dict_node:
keys_to_del = []
for inst in nested_dict_node[c]:
if len(nested_dict_node[c][inst]) < 2:
keys_to_del.append(inst)
else:
nested_dict_node[c][inst] = list(
itertools.permutations(nested_dict_node[c][inst], 2))
for k in keys_to_del:
del nested_dict_node[c][k]
keys_to_del = []
for c in nested_dict_node:
if len(nested_dict_node[c]) == 0:
keys_to_del.append(c)
for k in keys_to_del:
del nested_dict_node[k]
self.nested_dict_node = nested_dict_node
def __len__(self):
return 800000
def __getitem__(self, idx):
data_bank = self.nested_dict_node
# sample class
sampled_class = random.choice(list(data_bank.keys()))
# sample instance
sampled_instance = random.choice(list(data_bank[sampled_class].keys()))
# sample pair
key1, key2 = random.choice(
list(data_bank[sampled_class][sampled_instance]))
lookup_pair = [self.data_refs[key1], self.data_refs[key2]]
data_pair = []
for i in range(2):
entry = lookup_pair[i]
# load image
room_id = entry['room_id']
trajectory_id = entry['trajectory_id']
timestep = entry['timestep']
first_object = entry['first_name']
second_object = entry['second_name']
im = Image.open(os.path.join(
self.root_dir, f'{room_id}_{trajectory_id}_{timestep}.png'))
# load masks
m1 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][first_object])
m2 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][second_object])
assert first_object == second_object
queue_identifier = abs(hash(first_object)) % (10 ** 8)
data = {'mask_1': m1, 'mask_2': m2, 'image': im, 'room_id': room_id,
'trajectory_id': trajectory_id, 'timestep': timestep,
'is_self_feature': True,
'queue_identifier': queue_identifier}
self.transform(data)
data_pair.append(data)
# create dict and return
return data_pair[0], data_pair[1]
| CSR-main | src/dataloaders/contrastive_dataset_object.py |
import json
import os
import random
from src.shared.utils import get_box
from src.shared.constants import CLASSES_TO_IGNORE
from PIL import Image
from src.shared.data_split import DataSplit
from torch.utils.data import Dataset
class ReceptacleDataset(Dataset):
def __init__(self, root_dir, transform, data_split: DataSplit, dump_data_subset: bool = False, load_data_subset: bool = True, balance: bool = True, task: str = 'receptacle'):
# set the root directory
self.root_dir = root_dir
self.task = task
assert self.task in {'receptacle', 'sibling', 'combined'}
self.dump_data_subset = dump_data_subset
self.load_data_subset = load_data_subset
self.balance = balance
# set the dataset root, this is dependent on whether we are loading train or test data
self.labels_filepath = None
self.data_split = data_split
sufix = ''
if load_data_subset:
sufix = '_subset'
self.labels_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}{sufix}.json')
self.boxes_filepath = os.path.join(
root_dir, f'{data_split.name.lower()}_boxes.json')
# save the data augmentations that are to be applied to the images
self.transform = transform
# load all of the ground truth actions into memory
data_references_raw = None
with open(self.labels_filepath, 'r') as f:
data_references_raw = json.load(f)
self.boxes = None
with open(self.boxes_filepath) as f:
self.boxes = json.load(f)
self.data = []
self.__set_fixed_dataset(data_references_raw)
def __set_fixed_dataset(self, data_references_raw):
data_references = {'on': [], 'under': [], 'unrelated': [], 'sibling': []}
for entry in data_references_raw:
part_1 = entry['first_name']
part_2 = entry['second_name']
hit_ignore_class = False
for c in CLASSES_TO_IGNORE:
if c in part_1 or c in part_2:
hit_ignore_class = True
break
if hit_ignore_class:
continue
if entry['receptacle'] == 1:
assert(entry['receptacle_sibling'] != 1)
data_references['on'].append(entry)
elif entry['receptacle'] == 2:
assert(entry['receptacle_sibling'] != 1)
data_references['under'].append(entry)
elif (entry['receptacle_sibling'] == 1) and (entry['first_name'] != entry['second_name']):
data_references['sibling'].append(entry)
elif entry['first_name'] != entry['second_name']:
assert(entry['receptacle_sibling'] != 1)
data_references['unrelated'].append(entry)
assert len(data_references['on']) == len(data_references['under'])
samples_per_category = min(min(len(data_references['on']), len(data_references['unrelated'])), len(data_references['sibling']))
# balance the dataset with random unrelated samples
self.data += random.sample(
data_references['unrelated'], samples_per_category)
if self.task == 'sibling' or self.task == 'combined':
self.data += random.sample(
data_references['sibling'], samples_per_category)
if self.task == 'receptacle' or self.task == 'combined':
self.data += random.sample(
data_references['on'], samples_per_category)
self.data += random.sample(
data_references['under'], samples_per_category)
if self.dump_data_subset:
with open(os.path.join(self.root_dir, f'{self.data_split.name.lower()}_subset.json'), 'w') as f:
json.dump(self.data, f, indent=4)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
# randomly get a on example, under example, or unrelated example
entry = self.data[idx]
# get the label
label = None
if self.task == 'receptacle':
label = entry['receptacle']
elif self.task == 'sibling':
if entry['receptacle_sibling'] == 1:
label = 1
else:
label = 0
else:
if entry['receptacle_sibling'] == 1:
label = 3
else:
label = entry['receptacle']
# load image
room_id = entry['room_id']
trajectory_id = entry['trajectory_id']
timestep = entry['timestep']
first_object = entry['first_name']
second_object = entry['second_name']
im = Image.open(os.path.join(
self.root_dir, f'{room_id}_{trajectory_id}_{timestep}.png'))
# load masks
m1 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][first_object])
m2 = get_box(
self.boxes[f'{room_id}_{trajectory_id}_{timestep}'][second_object])
data = {'mask_1': m1, 'mask_2': m2, 'image': im, 'room_id': room_id,
'trajectory_id': trajectory_id, 'timestep': timestep, 'data_id': idx}
# if there are transformations/augmentations apply them
if self.transform is not None:
self.transform(data)
# create dict and return
return data, label
| CSR-main | src/dataloaders/receptacle_dataset.py |
import random
import torchvision.transforms.functional as F
from src.shared.constants import (COLOR_JITTER_BRIGHTNESS,
COLOR_JITTER_CONTRAST, COLOR_JITTER_HUE,
COLOR_JITTER_SATURATION,
GRAYSCALE_PROBABILITY, IMAGE_SIZE, NORMALIZE_RGB_MEAN,
NORMALIZE_RGB_STD)
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
from torchvision.transforms import InterpolationMode
class Compose(object):
def __init__(self, transforms):
super().__init__()
self.transforms = transforms
def __call__(self, data):
for t in self.transforms:
t(data)
return data
class ToZeroOne(object):
def __init__(self):
super().__init__()
self.toTensor = T.ToTensor()
def __call__(self, data):
if 'image' in data and type(data['image']) != torch.Tensor:
data['image'] = self.toTensor(data['image'])
if 'shuffle_image' in data:
data['shuffle_image'] = self.toTensor(data['shuffle_image'])
# if 'mask_1' in data:
# data['mask_1'] = self.toTensor(data['mask_1'])
# if 'mask_2' in data:
# data['mask_2'] = self.toTensor(data['mask_2'])
class Normalize(object):
"""ImageNet RGB normalization."""
def __init__(self, mean, std):
super().__init__()
self.normalize = T.Normalize(mean=mean, std=std)
def __call__(self, data):
if 'image' in data:
data['image'] = self.normalize(data['image'])
if 'shuffle_image' in data:
data['shuffle_image'] = self.normalize(data['shuffle_image'])
class ColorJitter(object):
"""[summary]
Args:
object ([type]): [description]
"""
def __init__(self, brightness, contrast, saturation, hue):
super().__init__()
self.colorJitter = T.ColorJitter(brightness, contrast, saturation, hue)
def __call__(self, data):
"""[summary]
Args:
curr_image ([type]): [description]
next_image ([type]): [description]
Returns:
[type]: [description]
"""
if 'image' in data:
data['image'] = self.colorJitter(data['image'])
if 'shuffle_image' in data:
data['shuffle_image'] = self.colorJitter(data['shuffle_image'])
class RandomGrayscale(object):
"""[summary]
Args:
object ([type]): [description]
"""
def __init__(self, p):
super().__init__()
self.grayscale = T.RandomGrayscale(p=p)
def __call__(self, data):
if 'image' in data:
data['image'] = self.grayscale(data['image'])
if 'shuffle_image' in data:
data['shuffle_image'] = self.grayscale(data['shuffle_image'])
class Rotate:
"""Rotate by one of the given angles."""
def __init__(self, angles):
self.angles = angles
def __call__(self, data):
angle = random.choice(self.angles)
if 'image' in data:
data['image'] = F.rotate(data['image'], angle)
if 'mask_1' in data:
data['mask_1'] = F.rotate(data['mask_1'], angle)
if 'mask_2' in data:
data['mask_2'] = F.rotate(data['mask_2'], angle)
if 'shuffle_image' in data:
data['shuffle_image'] = F.rotate(data['shuffle_image'], angle)
if 'shuffle_mask_1' in data:
data['shuffle_mask_1'] = F.rotate(data['shuffle_mask_1'], angle)
if 'shuffle_mask_2' in data:
data['shuffle_mask_2'] = F.rotate(data['shuffle_mask_2'], angle)
class Blur:
def __init__(self) -> None:
pass
def __call__(self, data):
if 'image' in data:
data['image'] = F.gaussian_blur(data['image'], 3)
class Resize:
"""Rotate by one of the given angles."""
def __init__(self, size):
self.size = size
self.interp = InterpolationMode.BILINEAR
def __call__(self, data):
if 'image' in data:
data['image'] = F.resize(data['image'], (self.size, self.size), interpolation=self.interp)
if 'mask_1' in data:
data['mask_1'] = F.resize(data['mask_1'], (self.size, self.size), interpolation=self.interp)
if 'mask_2' in data:
data['mask_2'] = F.resize(data['mask_2'], (self.size, self.size), interpolation=self.interp)
if 'shuffle_image' in data:
data['shuffle_image'] = F.resize(data['shuffle_image'], (self.size, self.size), interpolation=self.interp)
if 'shuffle_mask_1' in data:
data['shuffle_mask_1'] = F.resize(data['shuffle_mask_1'], (self.size, self.size), interpolation=self.interp)
if 'shuffle_mask_2' in data:
data['shuffle_mask_2'] = F.resize(data['shuffle_mask_2'], (self.size, self.size), interpolation=self.interp)
TrainTransform = Compose([
ColorJitter(
COLOR_JITTER_BRIGHTNESS,
COLOR_JITTER_CONTRAST,
COLOR_JITTER_SATURATION,
COLOR_JITTER_HUE),
RandomGrayscale(GRAYSCALE_PROBABILITY),
ToZeroOne(),
Normalize(NORMALIZE_RGB_MEAN, NORMALIZE_RGB_STD),
])
TestTransform = Compose([
ToZeroOne(),
Normalize(NORMALIZE_RGB_MEAN, NORMALIZE_RGB_STD),
])
RealWorldTestTransfrom = Compose([
Resize(IMAGE_SIZE),
ToZeroOne(),
Normalize(NORMALIZE_RGB_MEAN, NORMALIZE_RGB_STD),
])
RealWorldFigTransfrom = Compose([
Resize(IMAGE_SIZE),
ToZeroOne(),
]) | CSR-main | src/dataloaders/augmentations.py |
import sys
import cv2
import numpy as np
import torch
import torchvision.transforms as T
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultPredictor
from detectron2.utils.logger import setup_logger
from PIL import Image, ImageDraw
from src.shared.constants import CLASSES_TO_IGNORE, IMAGE_SIZE
from src.shared.utils import get_device
from src.simulation.constants import OMNI_CATEGORIES, OMNI_TO_ITHOR
from src.simulation.utils import compute_iou
from torchvision.models.detection.mask_rcnn import maskrcnn_resnet50_fpn
from torchvision.ops import nms
class GtBoxModule(object):
def __init__(self, box_conf_threshold, box_frac_threshold, model_type, model_path, device_num, moved_detection_counts, get_roi_features, debug) -> None:
super().__init__()
self.model_types = {'alfred', 'ithor',
'retinanet', 'maskrcnn', 'lvis', 'rpn'}
if model_type not in self.model_types:
raise ValueError('Unsupported model type')
self.transform = T.Compose([T.ToTensor()])
self.debug = debug
self.box_conf_threshold = box_conf_threshold
self.box_frac_threshold = box_frac_threshold
self.model = None
self.model_type = model_type
self.moved_detection_counts = moved_detection_counts
self.get_roi_features = get_roi_features
if get_roi_features:
setup_logger()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
'COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml'))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = box_conf_threshold
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01
if device_num < 0:
cfg.MODEL.DEVICE = 'cpu'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1235
cfg.MODEL.WEIGHTS = model_path
cfg.INPUT.MIN_SIZE_TEST = 300
self.cfg = cfg
self.model = DefaultPredictor(cfg)
def reset(self):
for o in self.moved_detection_counts:
self.moved_detection_counts[o]['count'] = 0
def get_boxes(self, event):
step_instances = []
boxes = {}
interaction_points = {}
areas = {}
count = 0
boxes_for_detectron = []
for o in event.metadata['objects']:
object_id = o['objectId']
object_name = o['name']
if event.instance_detections2D is not None and object_id in event.instance_detections2D and o['visible']:
if o['objectType'] in CLASSES_TO_IGNORE:
continue
top = (event.instance_detections2D[object_id][0],
event.instance_detections2D[object_id][1])
bottom = (event.instance_detections2D[object_id][2] - 1,
event.instance_detections2D[object_id][3] - 1)
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < self.box_frac_threshold:
continue
step_instances.append(object_name)
boxes_for_detectron.append(event.instance_detections2D[object_id])
box = Image.new('L', (IMAGE_SIZE, IMAGE_SIZE))
tmp = ImageDraw.Draw(box)
tmp.rectangle([top, bottom], fill="white")
trans = T.ToTensor()
boxes[count] = trans(box)
mask_idx = event.instance_masks[object_id].nonzero()
idx = np.random.choice(range(mask_idx[0].shape[0]))
y = float(mask_idx[0][idx]) / IMAGE_SIZE
x = float(mask_idx[1][idx]) / IMAGE_SIZE
interaction_points[count] = {'x': x, 'y': y}
areas[count] = area
count += 1
assert count == len(step_instances)
feats = None
if self.get_roi_features:
img = event.frame.copy()
inputs = [{"image": torch.as_tensor(img.astype("float32")).permute(2, 0, 1), "height": 224, "width": 224}]
with torch.no_grad():
images = self.model.model.preprocess_image(inputs) # don't forget to preprocess
features = self.model.model.backbone(images.tensor) # set of cnn features
proposals, _ = self.model.model.proposal_generator(images, features, None) # RPN
dev = proposals[0].proposal_boxes.tensor.get_device()
proposals[0].proposal_boxes.tensor = torch.tensor(boxes_for_detectron).float().to(dev)
features_ = [features[f] for f in self.model.model.roi_heads.box_in_features]
box_features = self.model.model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
feats = self.model.model.roi_heads.box_head(box_features) # features of all 1k candidates
if feats is not None:
assert feats.shape[0] == len(step_instances)
return step_instances, boxes, interaction_points, areas, feats
class PredBoxModule(object):
def __init__(self, box_conf_threshold, box_frac_threshold, model_type, model_path, device_num, moved_detection_counts, get_roi_features, debug) -> None:
super().__init__()
self.model_types = {'alfred', 'ithor',
'retinanet', 'maskrcnn', 'lvis', 'rpn'}
if model_type not in self.model_types:
raise ValueError('Unsupported model type')
self.transform = T.Compose([T.ToTensor()])
self.debug = debug
self.box_conf_threshold = box_conf_threshold
self.box_frac_threshold = box_frac_threshold
self.model = None
self.model_type = model_type
self.moved_detection_counts = moved_detection_counts
self.get_roi_features = get_roi_features
self._init_model(model_type, model_path,
box_conf_threshold, device_num)
def reset(self):
for o in self.moved_detection_counts:
self.moved_detection_counts[o]['count'] = 0
def get_boxes(self, event):
# get the pred boxes
boxes = None
img = event.frame.copy()
feats = None
if self.get_roi_features:
img = event.frame.copy()
inputs = [{"image": torch.as_tensor(img.astype("float32")).permute(2, 0, 1), "height": 224, "width": 224}]
with torch.no_grad():
images = self.model.model.preprocess_image(inputs) # don't forget to preprocess
features = self.model.model.backbone(images.tensor) # set of cnn features
proposals, _ = self.model.model.proposal_generator(images, features, None) # RPN
features_ = [features[f] for f in self.model.model.roi_heads.box_in_features]
box_features = self.model.model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
box_features = self.model.model.roi_heads.box_head(box_features) # features of all 1k candidates
predictions = self.model.model.roi_heads.box_predictor(box_features)
pred_instances, pred_inds = self.model.model.roi_heads.box_predictor.inference(predictions, proposals)
pred_instances = self.model.model.roi_heads.forward_with_given_boxes(features, pred_instances)
# output boxes, masks, scores, etc
pred_instances = self.model.model._postprocess(pred_instances, inputs, images.image_sizes) # scale box to orig size
# features of the proposed boxes
feats = box_features[pred_inds]
boxes = pred_instances[0]['instances'].pred_boxes
ithor_idx = []
tmp = []
for i in range(len(pred_instances[0]['instances'])):
omni_cat = OMNI_CATEGORIES[pred_instances[0]['instances'][i].pred_classes]
if omni_cat in OMNI_TO_ITHOR:
ithor_idx.append(i)
tmp.append(omni_cat)
boxes = boxes[ithor_idx]
feats = feats[ithor_idx]
else:
if self.model_type == 'ithor' or self.model_type == 'retinanet' or self.model_type == 'maskrcnn' or self.model_type == 'lvis':
outputs = self.model(img)
boxes = outputs['instances'].pred_boxes
ithor_idx = []
tmp = []
if self.model_type == 'ithor':
for i in range(len(outputs['instances'])):
omni_cat = OMNI_CATEGORIES[outputs['instances'][i].pred_classes]
if omni_cat in OMNI_TO_ITHOR:
ithor_idx.append(i)
tmp.append(omni_cat)
boxes = boxes[ithor_idx]
elif self.model_type == 'rpn':
outputs = self.model(img)
idx = torch.sigmoid(
outputs['proposals'].objectness_logits) > self.box_conf_threshold
boxes = outputs['proposals'][idx].proposal_boxes
gt_step_instances = []
gt_boxes = {}
gt_interaction_points = {}
pred_boxes = {}
pred_interaction_points = {}
pred_areas = {}
count = 0
feature_idx = []
for i in range(len(boxes)):
box = boxes[i].tensor[0]
top = (box[0], box[1])
bottom = (box[2], box[3])
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (IMAGE_SIZE * IMAGE_SIZE) < self.box_frac_threshold:
continue
if self.model_type == 'ithor':
pass
if feats is not None:
feature_idx.append(i)
box = Image.new('L', (IMAGE_SIZE, IMAGE_SIZE))
tmp = ImageDraw.Draw(box)
tmp.rectangle([top, bottom], fill="white")
trans = T.ToTensor()
pred_boxes[count] = trans(box)
pred_areas[count] = area
count += 1
for o in event.metadata['objects']:
object_id = o['objectId']
object_name = o['name']
if event.instance_detections2D is not None and object_id in event.instance_detections2D and o['visible']:
if o['objectType'] in CLASSES_TO_IGNORE:
continue
top = (event.instance_detections2D[object_id][0],
event.instance_detections2D[object_id][1])
bottom = (event.instance_detections2D[object_id][2] - 1,
event.instance_detections2D[object_id][3] - 1)
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
# if area / (IMAGE_SIZE * IMAGE_SIZE) < self.box_frac_threshold:
# continue
gt_step_instances.append(object_name)
box = Image.new('L', (IMAGE_SIZE, IMAGE_SIZE))
tmp = ImageDraw.Draw(box)
tmp.rectangle([top, bottom], fill="white")
trans = T.ToTensor()
gt_boxes[object_name] = trans(box)
mask_idx = event.instance_masks[object_id].nonzero()
idx = np.random.choice(range(mask_idx[0].shape[0]))
y = float(mask_idx[0][idx]) / IMAGE_SIZE
x = float(mask_idx[1][idx]) / IMAGE_SIZE
gt_interaction_points[object_name] = {'x': x, 'y': y}
# NOTE: implementation detail for finding a control point for each pred detection
# to do this we look for overlap with GT, this is used to implement picking up
# by object box
class_maps = {}
step_instances = [] # NOTE: keep track at the matching gt box name for metrics
for pred_box_id in pred_boxes:
max_iou = 0.0
class_maps[pred_box_id] = None
pred_interaction_points[pred_box_id] = {'x': 0.0, 'y': 0.0}
step_instance = 'None'
for gt_box_id in gt_boxes:
computed_iou = compute_iou(
pred_boxes[pred_box_id].long(), gt_boxes[gt_box_id].long()).item()
if computed_iou > max_iou:
max_iou = computed_iou
pred_interaction_points[pred_box_id] = gt_interaction_points[gt_box_id]
class_maps[pred_box_id] = gt_box_id
step_instance = gt_box_id
step_instances.append(step_instance)
for i in step_instances:
if i in self.moved_detection_counts:
self.moved_detection_counts[i]['count'] += 1
if feats is not None:
feats = feats[feature_idx]
assert feats.shape[0] == len(step_instances)
return step_instances, pred_boxes, pred_interaction_points, pred_areas, feats
def _init_model(self, model_type: str, model_path, box_conf_threshold: float, device_num: int):
if model_type == 'alfred':
self.model = maskrcnn_resnet50_fpn(num_classes=119)
d = torch.load(model_path, map_location=get_device(device_num))
self.model.load_state_dict(d)
self.model.eval()
elif model_type == 'ithor':
setup_logger()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
'COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml'))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = box_conf_threshold
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01
if device_num < 0:
cfg.MODEL.DEVICE = 'cpu'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1235
cfg.MODEL.WEIGHTS = model_path
cfg.INPUT.MIN_SIZE_TEST = 300
self.cfg = cfg
self.model = DefaultPredictor(cfg)
elif model_type == 'retinanet':
setup_logger()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
"COCO-Detection/retinanet_R_50_FPN_3x.yaml"))
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = box_conf_threshold
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-Detection/retinanet_R_50_FPN_3x.yaml")
cfg.INPUT.FORMAT = 'RGB'
if device_num < 0:
cfg.MODEL.DEVICE = 'cpu'
self.cfg = cfg
self.model = DefaultPredictor(cfg)
elif model_type == 'maskrcnn':
setup_logger()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = box_conf_threshold
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.INPUT.FORMAT = 'RGB'
self.cfg = cfg
if device_num < 0:
cfg.MODEL.DEVICE = 'cpu'
self.model = DefaultPredictor(cfg)
elif model_type == 'lvis':
setup_logger()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
"LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = box_conf_threshold
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
cfg.INPUT.FORMAT = 'RGB'
self.cfg = cfg
if device_num < 0:
cfg.MODEL.DEVICE = 'cpu'
self.model = DefaultPredictor(cfg)
elif model_type == 'rpn':
setup_logger()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
"COCO-Detection/rpn_R_50_FPN_1x.yaml"))
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-Detection/rpn_R_50_FPN_1x.yaml")
cfg.INPUT.FORMAT = 'RGB'
# low threshold means more pruning
cfg.MODEL.RPN.NMS_THRESH = 0.01
self.cfg = cfg
if device_num < 0:
cfg.MODEL.DEVICE = 'cpu'
self.model = DefaultPredictor(cfg)
else:
raise ValueError('Unsupported model type')
| CSR-main | src/simulation/module_box.py |
import itertools
import json
import os
import random
import numpy as np
import src.dataloaders.augmentations as A
import torch
import torch.nn.functional as F
from ai2thor.controller import Controller
from PIL import Image, ImageDraw
from scipy.optimize import linear_sum_assignment
from sklearn.metrics.cluster import adjusted_rand_score, rand_score
from src.lightning.modules.moco2_module import MocoV2
from src.shared.constants import CLASSES_TO_IGNORE, IMAGE_SIZE
from src.shared.utils import (check_none_or_empty, load_lightning_inference,
render_adj_matrix)
class AgentObjNavExpert(object):
def __init__(
self,
scene_name='FloorPlan1',
image_size=IMAGE_SIZE,
visibility_distance=1.5,
random_start=True,
trajectory=None,
vision_model_path=None,
rotation_step_degrees=90,
dump_dir='./',
box_frac_threshold=0.008,
cos_sim_match_threshold=0.4,
debug=False
) -> None:
super().__init__()
self.controller = Controller(
renderDepthImage=True,
renderInstanceSegmentation=True,
width=image_size,
height=image_size,
visibilityDistance=visibility_distance,
rotateStepDegrees=rotation_step_degrees
)
self.model = None
if not check_none_or_empty(vision_model_path):
self.model = load_lightning_inference(
vision_model_path, MocoV2).encoder_q
self.reset(
scene_name,
image_size,
visibility_distance,
random_start,
trajectory,
rotation_step_degrees,
dump_dir,
box_frac_threshold,
cos_sim_match_threshold,
debug
)
def reset(
self,
scene_name,
image_size,
visibilityDistance,
random_start,
trajectory,
rotation_step_degrees,
dump_dir,
box_frac_threshold,
cos_sim_match_threshold,
debug
):
self.debug = debug
self.thor_state = None
self.box_frac_threshold = box_frac_threshold
self.cos_sim_match_threshold = cos_sim_match_threshold
if not os.path.exists(dump_dir):
os.mkdir(dump_dir)
self.dump_dir = dump_dir
self.image_size = image_size
self.rotation_step_degrees = rotation_step_degrees
self.instance_map = {}
self.gt_adjacency_matrix = np.zeros((0, 0))
self.gt_assignments = []
self.feature_bank = None
self.feature_match_counts = None
self.assignments = []
self.cluster_meta = {}
self.correct_assignments = 0
self.total_assignments = 0
if random_start and trajectory is not None:
raise ValueError(
'cannot set `random_start=True` and also pass a predefined `trajectory`')
self.controller.reset(scene=scene_name,
width=image_size,
height=image_size,
visibilityDistance=visibilityDistance,
rotateStepDegrees=rotation_step_degrees)
self.room_id = scene_name.split('FloorPlan')[1]
event = self.controller.step(action="GetReachablePositions")
assert event.metadata["lastActionSuccess"]
self.reachable_spots = event.metadata["actionReturn"]
self.steps = 0
if random_start:
i = random.randint(0, len(self.reachable_spots)-1)
rot_y = random.choice(
[i for i in range(0, 360, int(self.rotation_step_degrees))])
event = self.controller.step(
action='Teleport',
position=self.reachable_spots[i],
rotation=dict(x=0, y=rot_y, z=0),
horizon=random.choice([-30, 0, 30, 60]),
standing=True
)
assert event.metadata["lastActionSuccess"]
self.rollout = []
self.replay = False
if trajectory is not None:
event = self.controller.step(
action='Teleport',
position=trajectory[0]['position'],
rotation=trajectory[0]['rotation'],
horizon=trajectory[0]['horizon'],
standing=True
)
assert event.metadata["lastActionSuccess"]
self.rollout = trajectory
self.replay = True
def get_not_visible_object_ids(self):
event = self.controller.step(
action='Done',
)
assert event.metadata["lastActionSuccess"]
objs = []
for o in event.metadata['objects']:
if not o['visible']:
objs.append(o['objectId'])
return objs
def get_action(self, object_id):
action = None
event = self.controller.step(
action='ObjectNavExpertAction',
objectId=object_id
)
if not event.metadata["lastActionSuccess"]:
raise ValueError('ObjectNavExpertAction failed')
action = event.metadata["actionReturn"]
return action
def get_state(self):
if self.thor_state is None:
event = self.controller.step(
action='Done',
)
assert event.metadata["lastActionSuccess"]
self.thor_state = event
return self.thor_state
def take_action(self, object_id):
action = None
event = None
if not self.replay:
pose = self.get_pose()
self.rollout.append(pose)
action = self.get_action(object_id)
if action is None:
return action
event = self.controller.step(
action=action
)
else:
if self.steps + 1 == len(self.rollout):
return None
action = "Teleport"
event = self.controller.step(
action="Teleport",
position=self.rollout[self.steps+1]['position'],
rotation=self.rollout[self.steps+1]['rotation'],
horizon=int(self.rollout[self.steps+1]['horizon']),
standing=True
)
if not event.metadata["lastActionSuccess"]:
raise ValueError(f'{action} failed')
self.thor_state = event
self.steps += 1
return action
def move(self, object_ids):
event = self.controller.step(
action='Done',
)
assert event.metadata["lastActionSuccess"]
include_set = set(object_ids)
excluded_ids = []
for obj in event.metadata['objects']:
if obj['objectType'] not in include_set:
excluded_ids.append(obj['objectId'])
else:
print(obj['objectId'])
event = self.controller.step(action="InitialRandomSpawn",
randomSeed=0,
forceVisible=False,
numPlacementAttempts=30,
placeStationary=True,
excludedObjectIds=excluded_ids
)
assert event.metadata["lastActionSuccess"]
def get_pose(self):
event = self.controller.step(
action='Done',
)
assert event.metadata["lastActionSuccess"]
position = event.metadata["agent"]["position"]
rotation = event.metadata["agent"]["rotation"]
horizon = round(event.metadata["agent"]["cameraHorizon"], 2)
return {'position': position, 'rotation': rotation, 'horizon': horizon}
def dump_observation(self):
im = Image.fromarray(self.get_state().frame)
im.save(f'{self.dump_dir}/{self.room_id}_1_{self.steps}.png', 'PNG')
with open(os.path.join(self.dump_dir, f'{self.room_id}_1_{self.steps}.json'), 'w') as f:
json.dump(self.thor_state.metadata, f, indent=4)
def dump_clusters(self):
assert self.feature_bank.shape[1] == len(self.cluster_meta)
with open(f'{self.dump_dir}/cluster_meta_{self.room_id}_1_{self.steps}.json', 'w') as f:
json.dump(self.cluster_meta, f, indent=4)
torch.save(self.feature_bank, os.path.join(
self.dump_dir, f'cluster_{self.room_id}_1_{self.steps}.pt'))
def update_clusters(self):
event = self.get_state()
im = Image.fromarray(event.frame)
step_instances = []
new_count = 0
boxes = {}
for o in event.metadata['objects']:
objecy_id = o['objectId']
object_name = o['name']
if objecy_id in event.instance_detections2D and o['visible']:
if o['objectType'] in CLASSES_TO_IGNORE:
continue
top = (event.instance_detections2D[objecy_id][0],
event.instance_detections2D[objecy_id][1])
bottom = (event.instance_detections2D[objecy_id][2] - 1,
event.instance_detections2D[objecy_id][3] - 1)
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (self.image_size * self.image_size) < self.box_frac_threshold:
continue
if object_name not in self.instance_map:
self.instance_map[object_name] = len(self.instance_map)
self.gt_assignments.append([])
new_count += 1
step_instances.append(object_name)
self.gt_assignments[self.instance_map[object_name]].append(
object_name)
box = Image.new('L', (self.image_size, self.image_size))
tmp = ImageDraw.Draw(box)
tmp.rectangle([top, bottom], fill="white")
# if self.debug:
box.save(
f'{self.dump_dir}/{self.room_id}_1_{self.steps}_{object_name}_box.png', 'PNG')
boxes[object_name] = box
if new_count > 0:
# update gt adjacency matrix
d_old = self.gt_adjacency_matrix.shape[0]
d_new = d_old + new_count
new_gt_adjacency_matrx = np.zeros((d_new, d_new))
new_gt_adjacency_matrx[:d_old, :d_old] = self.gt_adjacency_matrix
self.gt_adjacency_matrix = new_gt_adjacency_matrx
# fill in the gt adjacency matrix
step_pairs = list(itertools.product(step_instances, repeat=2))
for p in step_pairs:
i = self.instance_map[p[0]]
j = self.instance_map[p[1]]
self.gt_adjacency_matrix[i, j] = 1
self.gt_adjacency_matrix[j, i] = 1
if len(step_instances) == 0:
# case where there are no detections, just want to return the action
return
# run inference on the self-features
query_features = []
for s in step_instances:
data = {'mask_1': boxes[s].copy(),
'mask_2': boxes[s].copy(),
'image': im.copy()}
# if there are transformations/augmentations apply them
A.TestTransform(data)
x = torch.cat((data['image'], data['mask_1'],
data['mask_2']), 0).unsqueeze(0)
feat = self.model(x)
query_features.append(F.normalize(feat, dim=1))
query_features = torch.cat(tuple(query_features), 0)
# start with all features being unmatched with the history
unmatched_queries = set(
[i for i in range(query_features.shape[0])])
if self.feature_bank is None:
# if there is no feature bank, then the features we create the bank
self.feature_bank = torch.transpose(query_features, 0, 1)
# keep track of the number of matches per feature in the bank for weighted averages
self.feature_match_counts = torch.ones(
self.feature_bank.shape[1])
# initialize the pred assignments
self.assignments = [[s] for s in step_instances]
# create data structure to keep track of cluster to instance name matching (for metrics)
self.cluster_meta = {i: {s: 1, 'representative': s}
for i, s in enumerate(step_instances)}
# for first step all asignments are correct assignments (assuiming GT boxes)
self.total_assignments += len(step_instances)
self.correct_assignments += len(step_instances)
else:
# create a reward matrix between current observation and the feature bank
sim = torch.matmul(query_features, self.feature_bank)
# hungarian matching to get the maximal assignment
query_idx, history_idx = linear_sum_assignment(
sim.numpy(), maximize=True)
assert len(query_idx) == len(history_idx)
# add the number of queries (denom for a metric)
self.total_assignments += query_features.shape[0]
# get the identies of the clusters before updating
prev_representatives = set(
[self.cluster_meta[i]['representative'] for i in self.cluster_meta])
for i in range(len(query_idx)):
cluster_number = history_idx[i]
if sim[query_idx[i], history_idx[i]] > self.cos_sim_match_threshold:
# considered a match if the sim is greater than the threshold
# remove from the unmatched queries set
unmatched_queries.remove(query_idx[i])
# weighted average to integrate the query feature into the history
self.feature_bank[:, cluster_number] = self.feature_bank[:, cluster_number] * \
self.feature_match_counts[cluster_number] + \
query_features[query_idx[i]]
self.feature_match_counts[cluster_number] += 1
self.feature_bank[:,
cluster_number] /= self.feature_match_counts[cluster_number]
# renormalize
self.feature_bank[:, cluster_number] = F.normalize(
self.feature_bank[:, cluster_number], dim=0)
# add the gt label of the assignment to this cluster for metrics
assigned_label = step_instances[query_idx[i]]
self.assignments[cluster_number].append(assigned_label)
# find the current representative of the cluster
representative_label = self.cluster_meta[cluster_number]['representative']
if assigned_label in self.cluster_meta[cluster_number]:
self.cluster_meta[cluster_number][assigned_label] += 1
if assigned_label == representative_label:
# we are assining the feature the a cluster with the same gt label, this is good
self.correct_assignments += 1
else:
# here we are adding to a cluster that has never before seen this instance, not good
self.cluster_meta[cluster_number][assigned_label] = 1
if self.cluster_meta[cluster_number][representative_label] <= self.cluster_meta[cluster_number][assigned_label]:
# update the gt label identity of the cluster for purposes of metrics
# NOTE: this is fine to do in the loop as the linear assignment ensures each cluster_number is unique for the update
self.cluster_meta[cluster_number]['representative'] = assigned_label
# get the queries that have not matched
unmatched_queries = list(unmatched_queries)
for u in unmatched_queries:
if step_instances[u] not in prev_representatives:
# case where we correctly assign a new cluster for this instance
self.correct_assignments += 1
for u in unmatched_queries:
# add a cluster for each new unmatched query
self.assignments.append([step_instances[u]])
self.cluster_meta[len(self.cluster_meta)] = {
step_instances[u]: 1, 'representative': step_instances[u]}
# append features to the feature bank
new_features = torch.transpose(
query_features[unmatched_queries], 0, 1)
self.feature_bank = torch.cat(
(self.feature_bank, new_features), 1)
self.feature_match_counts = torch.cat((self.feature_match_counts, torch.ones(
len(unmatched_queries))), 0)
def rand_metrics(self):
gt_labels = []
for i, c in enumerate(self.gt_assignments):
gt_labels += [i] * len(c)
pred_labels = []
for i, c in enumerate(self.assignments):
pred_labels += [i] * len(c)
return rand_score(gt_labels, pred_labels), adjusted_rand_score(gt_labels, pred_labels)
def atomic_mistake_metric(self):
if self.total_assignments == 0:
return 1.0
return self.correct_assignments / float(self.total_assignments)
def num_objects_mape_metric(self):
if self.total_assignments == 0:
return 0.0
return abs(len(self.gt_assignments) - len(self.assignments)) / float(len(self.gt_assignments))
def dump_gt_adjacency_matrix(self):
row_labels = [k for k, _ in sorted(
self.instance_map.items(), key=lambda item: item[1])]
mat = render_adj_matrix(self.gt_adjacency_matrix, row_labels)
sim_mat = Image.fromarray(mat, 'RGB')
sim_mat.save(
f'{self.dump_dir}/{self.room_id}_1_{self.steps-1}_adj.png')
sim_mat.close()
| CSR-main | src/simulation/agent_obj_nav_expert.py |
from src.simulation.module_planner import PlannerModule
from src.simulation.environment import RearrangeTHOREnvironment
from typing import Any, Dict
import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score, rand_score
def rand_metrics(assignments, gt_assignments):
gt_labels = []
for i, c in enumerate(gt_assignments):
gt_labels += [i] * len(c)
pred_labels = []
for i, c in enumerate(assignments):
pred_labels += [i] * len(c)
return rand_score(gt_labels, pred_labels), adjusted_rand_score(gt_labels, pred_labels)
def atomic_mistake_metric(correct_assignments, total_assignments):
if total_assignments == 0:
return 1.0
return correct_assignments / float(total_assignments)
def num_objects_mape_metric(assignments, gt_assignments, total_assignments):
if total_assignments == 0:
return 0.0
return abs(len(gt_assignments) - len(assignments)) / float(len(gt_assignments))
def rearrangement_metrics(env: RearrangeTHOREnvironment, planner: PlannerModule, roomr_metadata: Dict, with_error: bool) -> Dict[str, Any]:
# modified from: https://github.com/allenai/ai2thor-rearrangement/blob/94d27845b1716cb9be3c77424f56c960905b1daf/rearrange/tasks.py
if not env.shuffle_called:
return {}
ips, gps, cps = env.poses
end_energies = env.pose_difference_energy(gps, cps)
start_energy = env.start_energies.sum()
end_energy = end_energies.sum()
start_misplaceds = env.start_energies > 0.0
end_misplaceds = end_energies > 0.0
num_broken = sum(cp["broken"] for cp in cps)
num_initially_misplaced = start_misplaceds.sum()
num_fixed = num_initially_misplaced - \
(start_misplaceds & end_misplaceds).sum()
num_newly_misplaced = (
end_misplaceds & np.logical_not(start_misplaceds)).sum()
prop_fixed = (
1.0 if num_initially_misplaced == 0 else num_fixed / num_initially_misplaced
)
metrics = {
"start_energy": float(start_energy),
"end_energy": float(end_energy),
"success": float(end_energy == 0),
"prop_fixed": float(prop_fixed),
"prop_fixed_strict": float((num_newly_misplaced == 0) * prop_fixed),
"num_misplaced": int(end_misplaceds.sum()),
"num_newly_misplaced": int(num_newly_misplaced.sum()),
"num_initially_misplaced": int(num_initially_misplaced),
"num_fixed": int(num_fixed.sum()),
"num_broken": int(num_broken),
}
try:
change_energies = env.pose_difference_energy(ips, cps)
change_energy = change_energies.sum()
changeds = change_energies > 0.0
metrics["change_energy"] = float(change_energy)
metrics["num_changed"] = int(changeds.sum())
except AssertionError as _:
pass
if num_initially_misplaced > 0:
metrics["prop_misplaced"] = float(
end_misplaceds.sum() / num_initially_misplaced)
if start_energy > 0:
metrics["energy_prop"] = float(end_energy / start_energy)
_, ars_un = rand_metrics(planner.scene_module_unshuffle.assignments,
planner.scene_module_unshuffle.gt_assignments)
_, ars_w = rand_metrics(planner.scene_module_walkthrough.assignments,
planner.scene_module_walkthrough.gt_assignments)
amm_un = atomic_mistake_metric(
planner.scene_module_unshuffle.correct_assignments, planner.scene_module_unshuffle.total_assignments)
amm_w = atomic_mistake_metric(planner.scene_module_walkthrough.correct_assignments,
planner.scene_module_walkthrough.total_assignments)
mape_un = num_objects_mape_metric(planner.scene_module_unshuffle.assignments,
planner.scene_module_unshuffle.gt_assignments, planner.scene_module_unshuffle.total_assignments)
mape_w = num_objects_mape_metric(planner.scene_module_walkthrough.assignments,
planner.scene_module_walkthrough.gt_assignments, planner.scene_module_walkthrough.total_assignments)
metrics['adjusted_rand_unshuffle'] = ars_un
metrics['adjusted_rand_walkthrough'] = ars_w
metrics['atomic_success_unshuffle'] = amm_un
metrics['atomic_success_walkthrough'] = amm_w
metrics['mape_unshuffle'] = mape_un
metrics['mape_walkthrough'] = mape_w
assert len(planner.box_stats_walkthrough) == len(planner.box_stats_unshuffle)
metrics['object_count'] = len(planner.box_stats_walkthrough)
metrics['objects_detected_walkthrough'] = []
metrics['objects_detected_unshuffle'] = []
metrics['objects_undetected_either'] = []
for d in planner.box_stats_walkthrough:
if planner.box_stats_walkthrough[d]['count'] > 0:
metrics['objects_detected_walkthrough'].append(d)
else:
metrics['objects_undetected_either'].append(d)
for d in planner.box_stats_unshuffle:
if planner.box_stats_unshuffle[d]['count'] > 0:
metrics['objects_detected_unshuffle'].append(d)
else:
metrics['objects_undetected_either'].append(d)
metrics['objects_undetected_either'] = list(set(metrics['objects_undetected_either']))
# task_info = metrics["task_info"]
# task_info["scene"] = env.scene
# task_info["index"] = env.current_task_spec.metrics.get(
# "index")
# task_info["stage"] = env.current_task_spec.stage
# del metrics["task_info"]
# if self.task_spec_in_metrics:
# task_info["task_spec"] = {
# **env.current_task_spec.__dict__}
# task_info["poses"] = env.poses
# task_info["gps_vs_cps"] = env.compare_poses(gps, cps)
# task_info["ips_vs_cps"] = env.compare_poses(ips, cps)
# task_info["gps_vs_ips"] = env.compare_poses(gps, ips)
# task_info["unshuffle_actions"] = self.actions_taken
# task_info["unshuffle_action_successes"] = self.actions_taken_success
# task_info["unique_id"] = env.current_task_spec.unique_id
# if metrics_from_walkthrough is not None:
# mes = {**metrics_from_walkthrough}
# task_info["walkthrough_actions"] = mes["task_info"]["walkthrough_actions"]
# task_info["walkthrough_action_successes"] = mes["task_info"][
# "walkthrough_action_successes"
# ]
# del mes[
# "task_info"
# ] # Otherwise already summarized by the unshuffle task info
# metrics = {
# "task_info": task_info,
# "ep_length": metrics["ep_length"] + mes["walkthrough/ep_length"],
# **{f"unshuffle/{k}": v for k, v in metrics.items()},
# **mes,
# }
# else:
# metrics = {
# "task_info": task_info,
# **{f"unshuffle/{k}": v for k, v in metrics.items()},
# }
# precision metrics for the assignments
return metrics
| CSR-main | src/simulation/metrics.py |
# based on https://github.com/allenai/ai2thor-rearrangement/blob/main/rearrange/expert.py#L41
import copy
from typing import (
Dict,
Tuple,
Any,
Optional,
Union,
List,
Sequence,
)
import ai2thor.controller
import ai2thor.server
import networkx as nx
from torch.distributions.utils import lazy_property
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
from src.simulation.constants import STEP_SIZE
AgentLocKeyType = Tuple[float, float, int, int]
class ShortestPathNavigatorTHOR:
"""Tracks shortest paths in AI2-THOR environments.
Assumes 90 degree rotations and fixed step sizes.
# Attributes
controller : The AI2-THOR controller in which shortest paths are computed.
"""
def __init__(
self,
controller: ai2thor.controller.Controller,
grid_size: float = STEP_SIZE,
include_move_left_right: bool = False,
):
"""Create a `ShortestPathNavigatorTHOR` instance.
# Parameters
controller : An AI2-THOR controller which represents the environment in which shortest paths should be
computed.
grid_size : The distance traveled by an AI2-THOR agent when taking a single navigational step.
include_move_left_right : If `True` the navigational actions will include `MoveLeft` and `MoveRight`, otherwise
they wil not.
"""
self._cached_graphs: Dict[str, nx.DiGraph] = {}
self._current_scene: Optional[nx.DiGraph] = None
self._current_graph: Optional[nx.DiGraph] = None
self._grid_size = grid_size
self.controller = controller
self._include_move_left_right = include_move_left_right
@lazy_property
def nav_actions_set(self) -> frozenset:
"""Navigation actions considered when computing shortest paths."""
nav_actions = [
"LookUp",
"LookDown",
"RotateLeft",
"RotateRight",
"MoveAhead",
]
if self._include_move_left_right:
nav_actions.extend(["MoveLeft", "MoveRight"])
return frozenset(nav_actions)
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"]
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
def on_reset(self):
"""Function that must be called whenever the AI2-THOR controller is
reset."""
self._current_scene = None
@property
def graph(self) -> nx.DiGraph:
"""A directed graph representing the navigation graph of the current
scene."""
if self._current_scene == self.scene_name:
return self._current_graph
if self.scene_name not in self._cached_graphs:
g = nx.DiGraph()
points = self.reachable_points_with_rotations_and_horizons()
for p in points:
self._add_node_to_graph(g, self.get_key(p))
self._cached_graphs[self.scene_name] = g
self._current_scene = self.scene_name
self._current_graph = self._cached_graphs[self.scene_name].copy()
return self._current_graph
def reachable_points_with_rotations_and_horizons(
self,
) -> List[Dict[str, Union[float, int]]]:
"""Get all the reaachable positions in the scene along with possible
rotation/horizons."""
self.controller.step(action="GetReachablePositions")
assert self.last_action_success
points_slim = self.last_event.metadata["actionReturn"]
points = []
for r in [0, 90, 180, 270]:
for horizon in [0]:#[-30, 0, 30]:
for p in points_slim:
p = copy.copy(p)
p["rotation"] = r
p["horizon"] = horizon
points.append(p)
return points
@staticmethod
def location_for_key(key, y_value=0.0) -> Dict[str, Union[float, int]]:
"""Return a agent location dictionary given a graph node key."""
x, z, rot, hor = key
loc = dict(x=x, y=y_value, z=z, rotation=rot, horizon=hor)
return loc
@staticmethod
def get_key(input_dict: Dict[str, Any], ndigits: int = 2) -> AgentLocKeyType:
"""Return a graph node key given an input agent location dictionary."""
if "x" in input_dict:
x = input_dict["x"]
z = input_dict["z"]
rot = input_dict["rotation"]
hor = input_dict["horizon"]
else:
x = input_dict["position"]["x"]
z = input_dict["position"]["z"]
rot = input_dict["rotation"]["y"]
hor = input_dict["cameraHorizon"]
return (
round(x, ndigits),
round(z, ndigits),
round_to_factor(rot, 30) % 360,
round_to_factor(hor, 30) % 360,
)
def update_graph_with_failed_action(self, failed_action: str):
"""If an action failed, update the graph to let it know this happened
so it won't try again."""
if (
self.scene_name not in self._cached_graphs
or failed_action not in self.nav_actions_set
):
return
source_key = self.get_key(self.last_event.metadata["agent"])
self._check_contains_key(source_key)
edge_dict = self.graph[source_key]
to_remove_key = None
for target_key in self.graph[source_key]:
if edge_dict[target_key]["action"] == failed_action:
to_remove_key = target_key
break
if to_remove_key is not None:
self.graph.remove_edge(source_key, to_remove_key)
def _add_from_to_edge(
self, g: nx.DiGraph, s: AgentLocKeyType, t: AgentLocKeyType,
):
"""Add an edge to the graph."""
def ae(x, y):
return abs(x - y) < 0.001
s_x, s_z, s_rot, s_hor = s
t_x, t_z, t_rot, t_hor = t
l1_dist = round(abs(s_x - t_x) + abs(s_z - t_z), 2)
angle_dist = (round_to_factor(t_rot - s_rot, 90) % 360) // 90
horz_dist = (round_to_factor(t_hor - s_hor, 30) % 360) // 30
# If source and target differ by more than one action, continue
if sum(x != 0 for x in [l1_dist, angle_dist, horz_dist]) != 1:
return
grid_size = self._grid_size
action = None
if angle_dist != 0:
if angle_dist == 1:
action = "RotateRight"
elif angle_dist == 3:
action = "RotateLeft"
elif horz_dist != 0:
if horz_dist == 11:
action = "LookUp"
elif horz_dist == 1:
action = "LookDown"
elif ae(l1_dist, grid_size):
if s_rot == 0:
forward = round((t_z - s_z) / grid_size)
right = round((t_x - s_x) / grid_size)
elif s_rot == 90:
forward = round((t_x - s_x) / grid_size)
right = -round((t_z - s_z) / grid_size)
elif s_rot == 180:
forward = -round((t_z - s_z) / grid_size)
right = -round((t_x - s_x) / grid_size)
elif s_rot == 270:
forward = -round((t_x - s_x) / grid_size)
right = round((t_z - s_z) / grid_size)
else:
raise NotImplementedError(
f"source rotation == {s_rot} unsupported.")
if forward > 0:
g.add_edge(s, t, action="MoveAhead")
elif self._include_move_left_right:
if forward < 0:
# Allowing MoveBack results in some really unintuitive
# expert trajectories (i.e. moving backwards to the goal and the
# rotating, for now it's disabled.
pass # g.add_edge(s, t, action="MoveBack")
elif right > 0:
g.add_edge(s, t, action="MoveRight")
elif right < 0:
g.add_edge(s, t, action="MoveLeft")
if action is not None:
g.add_edge(s, t, action=action)
@lazy_property
def possible_neighbor_offsets(self) -> Tuple[AgentLocKeyType, ...]:
"""Offsets used to generate potential neighbors of a node."""
grid_size = round(self._grid_size, 2)
offsets = []
for rot_diff in [-90, 0, 90]:
for horz_diff in [-30, 0, 30, 60]:
for x_diff in [-grid_size, 0, grid_size]:
for z_diff in [-grid_size, 0, grid_size]:
if (rot_diff != 0) + (horz_diff != 0) + (x_diff != 0) + (
z_diff != 0
) == 1:
offsets.append(
(x_diff, z_diff, rot_diff, horz_diff))
return tuple(offsets)
def _add_node_to_graph(self, graph: nx.DiGraph, s: AgentLocKeyType):
"""Add a node to the graph along with any adjacent edges."""
if s in graph:
return
existing_nodes = set(graph.nodes())
graph.add_node(s)
for x_diff, z_diff, rot_diff, horz_diff in self.possible_neighbor_offsets:
t = (
s[0] + x_diff,
s[1] + z_diff,
(s[2] + rot_diff) % 360,
(s[3] + horz_diff) % 360,
)
if t in existing_nodes:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
def _check_contains_key(self, key: AgentLocKeyType, add_if_not=True) -> bool:
"""Check if a node key is in the graph.
# Parameters
key : The key to check.
add_if_not : If the key doesn't exist and this is `True`, the key will be added along with
edges to any adjacent nodes.
"""
key_in_graph = key in self.graph
if not key_in_graph:
get_logger().debug(
"{} was not in the graph for scene {}.".format(
key, self.scene_name)
)
if add_if_not:
self._add_node_to_graph(self.graph, key)
if key not in self._cached_graphs[self.scene_name]:
self._add_node_to_graph(
self._cached_graphs[self.scene_name], key)
return key_in_graph
def shortest_state_path(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
) -> Optional[Sequence[AgentLocKeyType]]:
"""Get the shortest path between node keys."""
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
# noinspection PyBroadException
path = nx.shortest_path(
G=self.graph, source=source_state_key, target=goal_state_key
)
return path
def action_transitioning_between_keys(self, s: AgentLocKeyType, t: AgentLocKeyType):
"""Get the action that takes the agent from node s to node t."""
self._check_contains_key(s)
self._check_contains_key(t)
if self.graph.has_edge(s, t):
return self.graph.get_edge_data(s, t)["action"]
else:
return None
def shortest_path_next_state(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
):
"""Get the next node key on the shortest path from the source to the
goal."""
if source_state_key == goal_state_key:
raise RuntimeError(
"called next state on the same source and goal state")
state_path = self.shortest_state_path(source_state_key, goal_state_key)
return state_path[1]
def shortest_path_next_action(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
):
"""Get the next action along the shortest path from the source to the
goal."""
next_state_key = self.shortest_path_next_state(
source_state_key, goal_state_key)
return self.graph.get_edge_data(source_state_key, next_state_key)["action"]
def shortest_path_next_action_multi_target(
self,
source_state_key: AgentLocKeyType,
goal_state_keys: Sequence[AgentLocKeyType],
):
"""Get the next action along the shortest path from the source to the
closest goal."""
self._check_contains_key(source_state_key)
terminal_node = (-1.0, -1.0, -1, -1)
self.graph.add_node(terminal_node)
for gsk in goal_state_keys:
self._check_contains_key(gsk)
self.graph.add_edge(gsk, terminal_node, action=None)
next_state_key = self.shortest_path_next_state(
source_state_key, terminal_node)
action = self.graph.get_edge_data(
source_state_key, next_state_key)["action"]
self.graph.remove_node(terminal_node)
return action
def shortest_path_length(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
):
"""Get the path shorest path length between the source and the goal."""
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
try:
return nx.shortest_path_length(self.graph, source_state_key, goal_state_key)
except nx.NetworkXNoPath as _:
return float("inf")
| CSR-main | src/simulation/shortest_path_navigator.py |
import os
import numpy as np
import src.dataloaders.augmentations as A
import torch
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image, ImageDraw
from pytorch_lightning import seed_everything
from src.dataloaders.roomr_dataset_utils import get_rearrange_task_spec
from src.shared.constants import CLASSES_TO_IGNORE, IMAGE_SIZE
from src.simulation.constants import ROOMR_CONTROLLER_COMMIT_ID
from src.simulation.environment import RearrangeTHOREnvironment
from src.simulation.module_box import GtBoxModule
from src.simulation.module_exploration import GtExplorationModule
from src.simulation.module_state_graph import StateGraphModule
from src.simulation.rearrange_utils import (load_rearrange_data_from_path,
load_rearrange_meta_from_path)
from src.simulation.rearrangement_args import RearrangementArgs
class AgentDataGen(object):
def __init__(
self,
rearrangement_args: RearrangementArgs
) -> None:
super().__init__()
if not os.path.exists(rearrangement_args.dump_dir):
os.mkdir(rearrangement_args.dump_dir)
self.dump_dir = rearrangement_args.dump_dir
self.env = None
self.roomr_metadata = load_rearrange_meta_from_path(
rearrangement_args.data_split, rearrangement_args.roomr_meta_dir)
self.reset(rearrangement_args=rearrangement_args)
def reset(self, rearrangement_args=None):
seed_everything(0)
if rearrangement_args is not None:
self.rearrangement_args = rearrangement_args
# initialize modules based on flags
self.box_module = None
self.exploration_module = None
# create env with basic controller
if self.env is None:
self.env = RearrangeTHOREnvironment(
force_cache_reset=False,
controller_kwargs={
'commit_id': ROOMR_CONTROLLER_COMMIT_ID,
'height': IMAGE_SIZE,
'width': IMAGE_SIZE,
'renderInstanceSegmentation': self.rearrangement_args.render_instance_segmentation,
'renderDepthImage': False,
'visibilityDistance': self.rearrangement_args.visibility_distance,
'quality': "Very Low"})
# BOX MODULE
if self.rearrangement_args.use_gt_boxes:
box_frac_threshold = self.rearrangement_args.box_frac_threshold
self.box_module = GtBoxModule(box_frac_threshold)
else:
raise NotImplementedError()
# EXPLORATION MODULE
if self.rearrangement_args.use_gt_exploration:
split = self.rearrangement_args.data_split
data = load_rearrange_data_from_path(
split, self.rearrangement_args.roomr_dir)
room_id = self.rearrangement_args.room_id
dump_dir = self.rearrangement_args.dump_dir
floor_plan = 'FloorPlan' + str(room_id)
instance_id = self.rearrangement_args.instance_id
exploration_strategy = self.rearrangement_args.gt_exploration_strategy
num_steps = self.rearrangement_args.num_steps
rotation_degrees = self.rearrangement_args.rotation_degrees
task_spec = get_rearrange_task_spec(
data, floor_plan, instance_id, split)
metadata = self.roomr_metadata[f'{floor_plan}_{instance_id}']
self.exploration_module = GtExplorationModule(
self.env, task_spec, exploration_strategy, metadata, num_steps, rotation_degrees, room_id, instance_id, dump_dir)
else:
raise NotImplementedError()
# BETWEEN TRAJECTORY CORRESPONDENCE MODULE
if self.rearrangement_args.use_gt_object_matching:
room_id = self.rearrangement_args.room_id
dump_dir = self.rearrangement_args.dump_dir
instance_id = self.rearrangement_args.instance_id
else:
raise NotImplementedError()
assert self.env.controller == self.exploration_module.navi.controller
def walkthrough_pipeline(self):
self.explore_shared(True)
def explore_shared(self, from_walkthrough):
# initial state and initialize the representation
thor_state = self.exploration_module.env.last_event
if self.rearrangement_args.debug:
self.exploration_module.dump_observation()
thor_state = not None
while True:
thor_state, _ = self.exploration_module.take_action()
if thor_state is None:
break
if self.rearrangement_args.debug:
self.exploration_module.dump_observation()
| CSR-main | src/simulation/agent_data_gen.py |
from dataclasses import dataclass
@dataclass
class RearrangementArgs(object):
instance_id: int = -1
room_id: int = -1
num_steps: int = 250,
render_instance_segmentation: bool = False
visibility_distance: float = 20.0
rotation_degrees: int = 30
box_frac_threshold: float = 0.008
box_conf_threshold: float = 0.5
cos_sim_match_threshold: float = 0.5
cos_sim_object_threshold: float = 0.5
cos_sim_moved_threshold: float = 0.65
averaging_strategy: str = 'weighted'
gt_exploration_strategy: str = 'waypoint'
# inference models
boxes_model_path: str = ''
boxes_model_type: str = 'maskrcnn'
exploration_model_path: str = ''
exploration_cache_dir: str = ''
are_close_model_path: str = ''
relation_tracking_model_path: str = ''
object_tracking_model_path: str = ''
device_relation_tracking: int = -1 # -1 indicates cpu
# flags to toggle using gt
use_gt_boxes: bool = True
use_gt_exploration: bool = True
use_gt_are_close: bool = True
use_gt_relation_tracking: bool = True
use_gt_object_matching: bool = True
use_roi_feature_within_traj: bool = False
use_roi_feature_between_traj: bool = False
use_box_within_traj: bool = False
# debug options
debug: bool = False
dump_dir: str = './tmp'
roomr_dir: str = ''
roomr_meta_dir: str = ''
data_split: str = 'val'
| CSR-main | src/simulation/rearrangement_args.py |
import json
import os
import random
from time import time
from typing import Dict
from PIL import Image
from ai2thor.controller import RECEPTACLE_OBJECTS
from src.dataloaders.roomr_dataset_utils import find_waypoint_plan
from src.models.exploration_model import StatefulExplorationModel
from src.shared.utils import check_none_or_empty, compute_3d_dist
from src.simulation.constants import EXPLORATION_ACTION_ORDER, OPENABLE_OBJECTS, PICKUPABLE_OBJECTS, REARRANGE_SIM_OBJECTS_COLOR_LOOKUP
from src.simulation.environment import (RearrangeTaskSpec,
RearrangeTHOREnvironment)
from src.simulation.shortest_path_navigator import ShortestPathNavigatorTHOR
from src.simulation.utils import get_agent_map_data
from allenact.embodiedai.mapping.mapping_utils.point_cloud_utils import \
depth_frame_to_camera_space_xyz, camera_space_xyz_to_world_xyz
import torch
import trimesh
class GtExplorationModule(object):
def __init__(self,
env: RearrangeTHOREnvironment,
task_spec: RearrangeTaskSpec,
exploration_strategy: str,
task_metadata: Dict,
num_steps: int,
rotation_degrees: int,
room_id: int,
instance_id: int,
dump_dir: str) -> None:
super().__init__()
self.env = env
self.task_spec = task_spec
self.exploration_strategy = exploration_strategy
self.task_metadata = task_metadata
self.num_steps = num_steps
self.rotation_degrees = rotation_degrees
self.room_id = room_id
self.instance_id = instance_id
self.dump_dir = dump_dir
self.reset()
def reset(self, shuffle=False):
self.env.reset(self.task_spec,
force_axis_aligned_start=True)
if shuffle:
self.env.shuffle()
self.navi = ShortestPathNavigatorTHOR(self.env.controller)
self.navi_reachable_spots = self.navi.reachable_points_with_rotations_and_horizons()
self.env.controller.step('Done')
self.steps = 0
self.turn_count = 0
self.rollout = []
self.last_action = None
self.turn_direction = None
self.targets = None
self.target_key = None
if self.exploration_strategy == 'fps':
self.targets = [self.get_pose()['position']]
self.target_key = self._get_fps_target_key()
elif self.exploration_strategy == 'waypoint':
self.targets = find_waypoint_plan(
self.get_pose()['position'], self.task_metadata, self.env.shuffle_called)
if len(self.targets):
self.target_key = self.navi.get_key(self.targets.pop(0))
else:
self.target_key = None
else:
raise
def take_action(self):
if self.target_key is None:
return None, False
curr_key = self.navi.get_key(self.navi.last_event.metadata["agent"])
self.last_action = None
max_retries = 50
retries = 0
while 1:
event = None
update_state_graph = False
assert retries < max_retries
if self.turn_direction is not None:
self.last_action = self.turn_direction
event = self.navi.controller.step(
action=self.last_action, degrees=self.rotation_degrees, **self.env.physics_step_kwargs)
self.turn_count += self.rotation_degrees
assert event.metadata['lastActionSuccess']
else:
self.last_action = self.navi.shortest_path_next_action(
curr_key, self.target_key)
if self.last_action == 'RotateRight' or self.last_action == 'RotateLeft':
self.turn_direction = self.last_action
event = self.navi.controller.step(
action=self.last_action, degrees=self.rotation_degrees, **self.env.physics_step_kwargs)
self.turn_count += self.rotation_degrees
else:
event = self.navi.controller.step(
action=self.last_action, **self.env.physics_step_kwargs)
update_state_graph = True
if not event.metadata['lastActionSuccess']:
self.navi.update_graph_with_failed_action(self.last_action)
retries += 1
continue
self.steps += 1
if self.turn_count == 90:
self.turn_direction = None
self.turn_count = 0
update_state_graph = True
# # turns are decomposed into 3 subactions but only want to increment steps once
# self.steps += 1
self.rollout.append(self.get_pose())
if self.steps == self.num_steps:
self.target_key = None
elif self.at_goal():
if self.exploration_strategy == 'fps':
self.targets.append(self.get_pose()['position'])
self.target_key = self.get_fps_target_key()
elif self.exploration_strategy == 'waypoint':
if len(self.targets) != 0:
# print('hit a target!')
self.target_key = self.navi.get_key(
self.targets.pop(0))
else:
self.target_key = None
return event, True
return event, update_state_graph
def dump_observation(self):
prefix = 'walkthrough'
if self.env.shuffle_called:
prefix = 'unshuffle'
im = Image.fromarray(self.env.last_event.frame)
im.save(
f'{self.dump_dir}/{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.png', 'PNG')
with open(os.path.join(self.dump_dir, f'{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.json'), 'w') as f:
json.dump(self.env.last_event.metadata, f, indent=4)
def at_goal(self):
return self.navi.get_key(self.navi.last_event.metadata["agent"]) == self.target_key
def get_pose(self):
event = self.navi.controller.step(
action='Done', **self.env.physics_step_kwargs
)
assert event.metadata["lastActionSuccess"]
position = event.metadata["agent"]["position"]
rotation = event.metadata["agent"]["rotation"]
horizon = round(event.metadata["agent"]["cameraHorizon"], 2)
return {'position': position, 'rotation': rotation, 'horizon': horizon}
def _get_random_target_key(self):
target_index = random.randint(0, len(self.navi_reachable_spots)-1)
return self.navi.get_key(self.navi_reachable_spots[target_index])
def _get_fps_target_key(self):
avg = {'x': 0., 'y': 0., 'z': 0.}
for t in self.targets:
avg['x'] += t['x']
avg['y'] += t['y']
avg['z'] += t['z']
avg['x'] /= len(self.targets)
avg['y'] /= len(self.targets)
avg['z'] /= len(self.targets)
max_dist = 0.0
max_arg = None
for i, e in enumerate(self.navi_reachable_spots):
d = compute_3d_dist(avg, e)
if d > max_dist:
max_arg = i
max_dist = d
return self.navi.get_key(self.navi_reachable_spots[max_arg])
class ReplayExplorationModule(object):
def __init__(self,
env: RearrangeTHOREnvironment,
task_spec: RearrangeTaskSpec,
cache: Dict,
rotation_degrees: int,
room_id: int,
instance_id: int,
dump_dir: str) -> None:
super().__init__()
self.env = env
self.task_spec = task_spec
self.rotation_degrees = rotation_degrees
self.room_id = room_id
self.instance_id = instance_id
self.dump_dir = dump_dir
self.trajectories = cache
self.trajectory = None
self.reset()
def reset(self, shuffle=False):
self.env.reset(self.task_spec,
force_axis_aligned_start=True)
self.trajectory = None
if shuffle:
self.env.shuffle()
self.trajectory = self.trajectories['unshuffle'][:50]
else:
self.trajectory = self.trajectories['walkthrough'][:50]
self.env.controller.step('Done')
self.steps = 0
self.turn_count = 0
self.rollout = []
self.last_action = None
self.turn_direction = None
# for result fig
self.points = None
self.colors = None
def take_action(self):
if self.steps >= len(self.trajectory) and self.turn_count == 0:
return None, False
self.last_action = None
event = None
update_state_graph = False
if self.turn_direction is not None:
self.last_action = self.turn_direction
event = self.env.controller.step(
action=self.last_action, degrees=self.rotation_degrees, **self.env.physics_step_kwargs)
self.turn_count += self.rotation_degrees
if not event.metadata['lastActionSuccess']:
raise ValueError(event.metadata['errorMessage'])
else:
# print(self.trajectory[self.steps])
self.last_action = EXPLORATION_ACTION_ORDER[self.trajectory[self.steps]]
if self.last_action == 'RotateRight' or self.last_action == 'RotateLeft':
self.turn_direction = self.last_action
event = self.env.controller.step(
action=self.last_action, degrees=self.rotation_degrees, **self.env.physics_step_kwargs)
self.turn_count += self.rotation_degrees
else:
event = self.env.controller.step(
action=self.last_action, **self.env.physics_step_kwargs)
update_state_graph = True
# we are replaying a trajectory so it should never fail
if not event.metadata['lastActionSuccess']:
raise ValueError(event.metadata['errorMessage'])
self.steps += 1
if self.turn_count == 90:
self.turn_direction = None
self.turn_count = 0
update_state_graph = True
self.rollout.append(self.get_pose())
# if self.at_goal():
# return event, True
return event, update_state_graph
def dump_observation(self):
prefix = 'walkthrough'
if self.env.shuffle_called:
prefix = 'unshuffle'
im = Image.fromarray(self.env.last_event.frame)
im.save(
f'{self.dump_dir}/{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.png', 'PNG')
with open(os.path.join(self.dump_dir, f'{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.json'), 'w') as f:
json.dump(self.env.last_event.metadata, f, indent=4)
camera_space_xyz = depth_frame_to_camera_space_xyz(
depth_frame=torch.as_tensor(self.env.last_event.depth_frame), mask=None, fov=90
)
x = self.env.last_event.metadata['agent']['position']['x']
y = self.env.last_event.metadata['agent']['position']['y']
z = self.env.last_event.metadata['agent']['position']['z']
world_points = camera_space_xyz_to_world_xyz(
camera_space_xyzs=camera_space_xyz,
camera_world_xyz=torch.as_tensor([x, y, z]),
rotation=self.env.last_event.metadata['agent']['rotation']['y'],
horizon=self.env.last_event.metadata['agent']['cameraHorizon'],
).reshape(3, self.env.controller.height, self.env.controller.width).permute(1, 2, 0)
for mask_instance in self.env.last_event.instance_masks:
detection_category = mask_instance.split('|')[0]
if detection_category in REARRANGE_SIM_OBJECTS_COLOR_LOOKUP:
# if (detection_category in PICKUPABLE_OBJECTS) or\
# (detection_category in OPENABLE_OBJECTS) or\
# (detection_category in RECEPTACLE_OBJECTS):
# register this box in 3D
mask = self.env.last_event.instance_masks[mask_instance]
obj_points = world_points[mask]
obj_colors = torch.as_tensor(REARRANGE_SIM_OBJECTS_COLOR_LOOKUP[detection_category])
obj_colors.unsqueeze_(0)
obj_colors = obj_colors.repeat(obj_points.shape[0], 1)
if self.points is None:
self.points = obj_points
self.colors = obj_colors
else:
self.points = torch.cat((self.points, obj_points), 0)
self.colors = torch.cat((self.colors, obj_colors), 0)
assert self.points.shape == self.colors.shape
rgba_colors = torch.ones(self.colors.shape[0], 4)
rgba_colors[:, :3] = self.colors
ply = trimesh.points.PointCloud(vertices=self.points.numpy(), colors=rgba_colors.numpy())
ply.export(f'{self.dump_dir}/{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.ply')
# top_down_data = get_agent_map_data(self.env.controller)
# top_img = Image.fromarray(top_down_data['frame'])
# top_img.save(os.path.join(
# self.dump_dir, f'{prefix}_{self.room_id}_{self.instance_id}_{self.steps}_top.png'), 'PNG')
return
def at_goal(self):
tmp = self.steps + 1
return tmp == len(self.trajectory)
def get_pose(self):
event = self.env.controller.step(
action='Done', **self.env.physics_step_kwargs
)
assert event.metadata["lastActionSuccess"]
position = event.metadata["agent"]["position"]
rotation = event.metadata["agent"]["rotation"]
horizon = round(event.metadata["agent"]["cameraHorizon"], 2)
return {'position': position, 'rotation': rotation, 'horizon': horizon}
# class PredExplorationModule(object):
# def __init__(self,
# env: RearrangeTHOREnvironment,
# task_spec: RearrangeTaskSpec,
# exploration_model_path: str,
# rotation_degrees: int,
# room_id: int,
# instance_id: int,
# dump_dir: str) -> None:
# super().__init__()
# self.env = env
# self.task_spec = task_spec
# self.rotation_degrees = rotation_degrees
# self.room_id = room_id
# self.instance_id = instance_id
# self.dump_dir = dump_dir
# if not check_none_or_empty(exploration_model_path):
# self.relation_tracking_model = StatefulExplorationModel(exploration_model_path)
# else:
# raise ValueError('exploration_model_path should not be None or empty')
# self.reset()
# def reset(self, shuffle=False):
# self.env.reset(self.task_spec,
# force_axis_aligned_start=True)
# self.trajectory = None
# self.relation_tracking_model.reset()
# if shuffle:
# self.env.shuffle()
# self.trajectory = self.trajectories['unshuffle']
# else:
# self.trajectory = self.trajectories['walkthrough']
# self.navi = ShortestPathNavigatorTHOR(self.env.controller)
# self.navi_reachable_spots = self.navi.reachable_points_with_rotations_and_horizons()
# self.env.controller.step('Done')
# self.turn_count = 0
# self.rollout = []
# self.last_action = None
# self.turn_direction = None
# def take_action(self):
# # curr_key = self.navi.get_key(self.navi.last_event.metadata["agent"])
# self.last_action = None
# event = None
# update_state_graph = False
# if self.turn_direction is not None:
# self.last_action = self.turn_direction
# event = self.navi.controller.step(
# action=self.last_action, degrees=self.rotation_degrees, **self.env.physics_step_kwargs)
# self.turn_count += self.rotation_degrees
# assert event.metadata['lastActionSuccess']
# else:
# # print(self.trajectory[self.steps])
# action = self.relation_tracking_model.get_action(self.env.controller)
# if action is None:
# return None, False
# self.last_action = EXPLORATION_ACTION_ORDER[action]
# if self.last_action == 'RotateRight' or self.last_action == 'RotateLeft':
# self.turn_direction = self.last_action
# event = self.navi.controller.step(
# action=self.last_action, degrees=self.rotation_degrees, **self.env.physics_step_kwargs)
# self.turn_count += self.rotation_degrees
# else:
# event = self.navi.controller.step(
# action=self.last_action, **self.env.physics_step_kwargs)
# update_state_graph = True
# # we are replaying a trajectory so it should never fail
# assert event.metadata['lastActionSuccess']
# if self.turn_count == 90:
# self.turn_direction = None
# self.turn_count = 0
# update_state_graph = True
# self.rollout.append(self.get_pose())
# # if self.at_goal():
# # return event, True
# return event, update_state_graph
# def dump_observation(self):
# prefix = 'walkthrough'
# if self.env.shuffle_called:
# prefix = 'unshuffle'
# im = Image.fromarray(self.env.last_event.frame)
# im.save(
# f'{self.dump_dir}/{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.png', 'PNG')
# with open(os.path.join(self.dump_dir, f'{prefix}_{self.room_id}_{self.instance_id}_{self.steps}.json'), 'w') as f:
# json.dump(self.env.last_event.metadata, f, indent=4)
# def at_goal(self):
# tmp = self.steps + 1
# return tmp == len(self.trajectory)
# def get_pose(self):
# event = self.navi.controller.step(
# action='Done', **self.env.physics_step_kwargs
# )
# assert event.metadata["lastActionSuccess"]
# position = event.metadata["agent"]["position"]
# rotation = event.metadata["agent"]["rotation"]
# horizon = round(event.metadata["agent"]["cameraHorizon"], 2)
# return {'position': position, 'rotation': rotation, 'horizon': horizon}
| CSR-main | src/simulation/module_exploration.py |
import matplotlib.pyplot as plt
from networkx import draw_networkx
from networkx.algorithms.shortest_paths.generic import shortest_path
from networkx.classes.digraph import DiGraph
from PIL import ImageChops
from src.simulation.constants import ACTION_NEGATIONS
from src.simulation.state import State
class StateGraphModule(object):
def __init__(self) -> None:
super().__init__()
self.reset()
def reset(self):
self.graph = DiGraph()
self.pickupable_cluster_to_node = {}
self.openable_cluster_to_node = {}
self.cluster_to_biggest_box_node = {}
self.node_count = 0
self.current_state = None
def find_path(self, src_id, target_id):
if src_id >= self.node_count:
raise ValueError(f'src_id not in graph: {src_id}')
if target_id >= self.node_count:
raise ValueError(f'target_id not in graph: {target_id}')
if src_id == target_id:
return []
return shortest_path(self.graph, source=src_id, target=target_id)
def add_adjoint_node(self, state: State):
node_id = self.node_count
self.graph.add_node(node_id, attr={'state': state})
self.node_count += 1
return node_id
def add_adjoint_edge(self, node_src, node_dest, attr):
self.graph.add_edge(node_src, node_dest, attr=attr)
def add_edge(self, state: State, action: str):
assert abs(state.agent_rotation['y']) % 90.0 < 0.001
if action is None:
# special case where we are adding the first state
self.graph.add_node(0, attr={'state': state})
self.node_count = 1
self.current_state = state
if len(state.pickupable):
for cluster_id in state.pickupable:
if cluster_id in self.pickupable_cluster_to_node:
self.pickupable_cluster_to_node[cluster_id].append(0)
else:
self.pickupable_cluster_to_node[cluster_id] = [0]
if len(state.openable):
for cluster_id in state.openable:
if cluster_id in self.openable_cluster_to_node:
self.openable_cluster_to_node[cluster_id].append(0)
else:
self.openable_cluster_to_node[cluster_id] = [0]
for local_id, cluster_id in enumerate(state.instance_cluster_ids):
if cluster_id not in self.cluster_to_biggest_box_node:
self.cluster_to_biggest_box_node[cluster_id] = (state.boxes[local_id], state.areas[local_id], 0)
else:
if state.areas[local_id] > self.cluster_to_biggest_box_node[cluster_id][1]:
self.cluster_to_biggest_box_node[cluster_id] = (state.boxes[local_id], state.areas[local_id], 0)
return
assert self.node_count > 0
assert self.current_state is not None
if action not in ACTION_NEGATIONS:
raise ValueError(f'action: {action} not supported')
node_id = self.state_to_node_id(state)
if node_id is None:
node_id = self.node_count
self.graph.add_node(node_id, attr={'state': state})
self.node_count += 1
src = self.state_to_node_id(self.current_state)
self.graph.add_edge(src, node_id, attr={'action': action})
negated_action = ACTION_NEGATIONS[action]
self.graph.add_edge(node_id, src, attr={
'action': negated_action})
self.current_state = state
if len(state.pickupable):
for cluster_id in state.pickupable:
if cluster_id in self.pickupable_cluster_to_node:
self.pickupable_cluster_to_node[cluster_id].append(node_id)
else:
self.pickupable_cluster_to_node[cluster_id] = [node_id]
if len(state.openable):
for cluster_id in state.openable:
if cluster_id in self.openable_cluster_to_node:
self.openable_cluster_to_node[cluster_id].append(node_id)
else:
self.openable_cluster_to_node[cluster_id] = [node_id]
for local_id, cluster_id in enumerate(state.instance_cluster_ids):
if cluster_id not in self.cluster_to_biggest_box_node:
self.cluster_to_biggest_box_node[cluster_id] = (state.boxes[local_id], state.areas[local_id], node_id)
else:
if state.areas[local_id] > self.cluster_to_biggest_box_node[cluster_id][1]:
self.cluster_to_biggest_box_node[cluster_id] = (state.boxes[local_id], state.areas[local_id], node_id)
def state_to_node_id(self, state: State):
nodes = self.graph.nodes()
for state_id in nodes:
existing_state = nodes[state_id]['attr']['state']
if self.are_same_agent_pose(state, existing_state):
return state_id
return None
def are_same_agent_pose(self, s1: State, s2: State, pos_thres=0.1, rot_thresh=10., hor_thresh=10.):
keys = ['x', 'y', 'z']
for k in keys:
if abs(s1.agent_position[k] - s2.agent_position[k]) > pos_thres:
return False
if abs(s1.agent_rotation[k] - s2.agent_rotation[k]) > rot_thresh:
return False
if abs(s1.agent_horizon - s2.agent_horizon) > rot_thresh:
return False
return True
def dump_graph(self, from_walkthrough):
plt.clf()
color = 'blue'
if from_walkthrough:
color = 'green'
options = {
'node_color': color,
'node_size': 200,
'width': 2,
'arrowstyle': '-|>',
'arrowsize': 12,
}
draw_networkx(self.graph, arrows=True, **options)
if from_walkthrough:
plt.savefig('walkthrough.png')
else:
plt.savefig('unshuffle.png')
| CSR-main | src/simulation/module_state_graph.py |
# modified from https://github.com/allenai/ai2thor-rearrangement/blob/main/rearrange/constants.py
import colorsys
import random
import numpy as np
random.seed(0)
MAX_HAND_METERS = 0.5
FOV = 90
STEP_SIZE = 0.25
# fmt: off
REARRANGE_SIM_OBJECTS = [
# A
"AlarmClock", "AluminumFoil", "Apple", "AppleSliced", "ArmChair",
"BaseballBat", "BasketBall", "Bathtub", "BathtubBasin", "Bed", "Blinds", "Book", "Boots", "Bottle", "Bowl", "Box",
# B
"Bread", "BreadSliced", "ButterKnife",
# C
"Cabinet", "Candle", "CD", "CellPhone", "Chair", "Cloth", "CoffeeMachine", "CoffeeTable", "CounterTop", "CreditCard",
"Cup", "Curtains",
# D
"Desk", "DeskLamp", "Desktop", "DiningTable", "DishSponge", "DogBed", "Drawer", "Dresser", "Dumbbell",
# E
"Egg", "EggCracked",
# F
"Faucet", "Floor", "FloorLamp", "Footstool", "Fork", "Fridge",
# G
"GarbageBag", "GarbageCan",
# H
"HandTowel", "HandTowelHolder", "HousePlant", "Kettle", "KeyChain", "Knife",
# L
"Ladle", "Laptop", "LaundryHamper", "Lettuce", "LettuceSliced", "LightSwitch",
# M
"Microwave", "Mirror", "Mug",
# N
"Newspaper",
# O
"Ottoman",
# P
"Painting", "Pan", "PaperTowel", "Pen", "Pencil", "PepperShaker", "Pillow", "Plate", "Plunger", "Poster", "Pot",
"Potato", "PotatoSliced",
# R
"RemoteControl", "RoomDecor",
# S
"Safe", "SaltShaker", "ScrubBrush", "Shelf", "ShelvingUnit", "ShowerCurtain", "ShowerDoor", "ShowerGlass",
"ShowerHead", "SideTable", "Sink", "SinkBasin", "SoapBar", "SoapBottle", "Sofa", "Spatula", "Spoon", "SprayBottle",
"Statue", "Stool", "StoveBurner", "StoveKnob",
# T
"TableTopDecor", "TargetCircle", "TeddyBear", "Television", "TennisRacket", "TissueBox", "Toaster", "Toilet",
"ToiletPaper", "ToiletPaperHanger", "Tomato", "TomatoSliced", "Towel", "TowelHolder", "TVStand",
# V
"VacuumCleaner", "Vase",
# W
"Watch", "WateringCan", "Window", "WineBottle",
]
# fmt: on
BIGGER = {
"Book": {"openable": True, "receptacle": False, "pickupable": True},
"Bread": {"openable": False, "receptacle": False, "pickupable": True},
"Potato": {"openable": False, "receptacle": False, "pickupable": True},
"SoapBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Kettle": {"openable": True, "receptacle": False, "pickupable": True},
"Pan": {"openable": False, "receptacle": True, "pickupable": True},
"Plate": {"openable": False, "receptacle": True, "pickupable": True},
"Tomato": {"openable": False, "receptacle": False, "pickupable": True},
"Vase": {"openable": False, "receptacle": False, "pickupable": True},
"WineBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Pot": {"openable": False, "receptacle": True, "pickupable": True},
"Lettuce": {"openable": False, "receptacle": False, "pickupable": True},
"Statue": {"openable": False, "receptacle": False, "pickupable": True},
"SprayBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Box": {"openable": True, "receptacle": True, "pickupable": True},
"Laptop": {"openable": True, "receptacle": False, "pickupable": True},
"Pillow": {"openable": False, "receptacle": False, "pickupable": True},
"WateringCan": {"openable": False, "receptacle": False, "pickupable": True},
"Boots": {"openable": False, "receptacle": False, "pickupable": True},
"BasketBall": {"openable": False, "receptacle": False, "pickupable": True},
"TennisRacket": {"openable": False, "receptacle": False, "pickupable": True},
"TeddyBear": {"openable": False, "receptacle": False, "pickupable": True},
"Cloth": {"openable": False, "receptacle": False, "pickupable": True},
"Dumbbell": {"openable": False, "receptacle": False, "pickupable": True},
"Footstool": {"openable": False, "receptacle": True, "pickupable": True},
"Towel": {"openable": False, "receptacle": False, "pickupable": True},
"HandTowel": {"openable": False, "receptacle": False, "pickupable": True},
"Plunger": {"openable": False, "receptacle": False, "pickupable": True},
"ToiletPaper": {"openable": False, "receptacle": False, "pickupable": True},
"ScrubBrush": {"openable": False, "receptacle": False, "pickupable": True},
}
# fmt: off
OBJECT_TYPES_WITH_PROPERTIES = {
"StoveBurner": {"openable": False, "receptacle": True, "pickupable": False},
"Drawer": {"openable": True, "receptacle": True, "pickupable": False},
"CounterTop": {"openable": False, "receptacle": True, "pickupable": False},
"Cabinet": {"openable": True, "receptacle": True, "pickupable": False},
"StoveKnob": {"openable": False, "receptacle": False, "pickupable": False},
"Window": {"openable": False, "receptacle": False, "pickupable": False},
"Sink": {"openable": False, "receptacle": True, "pickupable": False},
"Floor": {"openable": False, "receptacle": True, "pickupable": False},
"Book": {"openable": True, "receptacle": False, "pickupable": True},
"Bottle": {"openable": False, "receptacle": False, "pickupable": True},
"Knife": {"openable": False, "receptacle": False, "pickupable": True},
"Microwave": {"openable": True, "receptacle": True, "pickupable": False},
"Bread": {"openable": False, "receptacle": False, "pickupable": True},
"Fork": {"openable": False, "receptacle": False, "pickupable": True},
"Shelf": {"openable": False, "receptacle": True, "pickupable": False},
"Potato": {"openable": False, "receptacle": False, "pickupable": True},
"HousePlant": {"openable": False, "receptacle": False, "pickupable": False},
"Toaster": {"openable": False, "receptacle": True, "pickupable": False},
"SoapBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Kettle": {"openable": True, "receptacle": False, "pickupable": True},
"Pan": {"openable": False, "receptacle": True, "pickupable": True},
"Plate": {"openable": False, "receptacle": True, "pickupable": True},
"Tomato": {"openable": False, "receptacle": False, "pickupable": True},
"Vase": {"openable": False, "receptacle": False, "pickupable": True},
"GarbageCan": {"openable": False, "receptacle": True, "pickupable": False},
"Egg": {"openable": False, "receptacle": False, "pickupable": True},
"CreditCard": {"openable": False, "receptacle": False, "pickupable": True},
"WineBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Pot": {"openable": False, "receptacle": True, "pickupable": True},
"Spatula": {"openable": False, "receptacle": False, "pickupable": True},
"PaperTowelRoll": {"openable": False, "receptacle": False, "pickupable": True},
"Cup": {"openable": False, "receptacle": True, "pickupable": True},
"Fridge": {"openable": True, "receptacle": True, "pickupable": False},
"CoffeeMachine": {"openable": False, "receptacle": True, "pickupable": False},
"Bowl": {"openable": False, "receptacle": True, "pickupable": True},
"SinkBasin": {"openable": False, "receptacle": True, "pickupable": False},
"SaltShaker": {"openable": False, "receptacle": False, "pickupable": True},
"PepperShaker": {"openable": False, "receptacle": False, "pickupable": True},
"Lettuce": {"openable": False, "receptacle": False, "pickupable": True},
"ButterKnife": {"openable": False, "receptacle": False, "pickupable": True},
"Apple": {"openable": False, "receptacle": False, "pickupable": True},
"DishSponge": {"openable": False, "receptacle": False, "pickupable": True},
"Spoon": {"openable": False, "receptacle": False, "pickupable": True},
"LightSwitch": {"openable": False, "receptacle": False, "pickupable": False},
"Mug": {"openable": False, "receptacle": True, "pickupable": True},
"ShelvingUnit": {"openable": False, "receptacle": True, "pickupable": False},
"Statue": {"openable": False, "receptacle": False, "pickupable": True},
"Stool": {"openable": False, "receptacle": True, "pickupable": False},
"Faucet": {"openable": False, "receptacle": False, "pickupable": False},
"Ladle": {"openable": False, "receptacle": False, "pickupable": True},
"CellPhone": {"openable": False, "receptacle": False, "pickupable": True},
"Chair": {"openable": False, "receptacle": True, "pickupable": False},
"SideTable": {"openable": False, "receptacle": True, "pickupable": False},
"DiningTable": {"openable": False, "receptacle": True, "pickupable": False},
"Pen": {"openable": False, "receptacle": False, "pickupable": True},
"SprayBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Curtains": {"openable": False, "receptacle": False, "pickupable": False},
"Pencil": {"openable": False, "receptacle": False, "pickupable": True},
"Blinds": {"openable": True, "receptacle": False, "pickupable": False},
"GarbageBag": {"openable": False, "receptacle": False, "pickupable": False},
"Safe": {"openable": True, "receptacle": True, "pickupable": False},
"Painting": {"openable": False, "receptacle": False, "pickupable": False},
"Box": {"openable": True, "receptacle": True, "pickupable": True},
"Laptop": {"openable": True, "receptacle": False, "pickupable": True},
"Television": {"openable": False, "receptacle": False, "pickupable": False},
"TissueBox": {"openable": False, "receptacle": False, "pickupable": True},
"KeyChain": {"openable": False, "receptacle": False, "pickupable": True},
"FloorLamp": {"openable": False, "receptacle": False, "pickupable": False},
"DeskLamp": {"openable": False, "receptacle": False, "pickupable": False},
"Pillow": {"openable": False, "receptacle": False, "pickupable": True},
"RemoteControl": {"openable": False, "receptacle": False, "pickupable": True},
"Watch": {"openable": False, "receptacle": False, "pickupable": True},
"Newspaper": {"openable": False, "receptacle": False, "pickupable": True},
"ArmChair": {"openable": False, "receptacle": True, "pickupable": False},
"CoffeeTable": {"openable": False, "receptacle": True, "pickupable": False},
"TVStand": {"openable": False, "receptacle": True, "pickupable": False},
"Sofa": {"openable": False, "receptacle": True, "pickupable": False},
"WateringCan": {"openable": False, "receptacle": False, "pickupable": True},
"Boots": {"openable": False, "receptacle": False, "pickupable": True},
"Ottoman": {"openable": False, "receptacle": True, "pickupable": False},
"Desk": {"openable": False, "receptacle": True, "pickupable": False},
"Dresser": {"openable": False, "receptacle": True, "pickupable": False},
"Mirror": {"openable": False, "receptacle": False, "pickupable": False},
"DogBed": {"openable": False, "receptacle": True, "pickupable": False},
"Candle": {"openable": False, "receptacle": False, "pickupable": True},
"RoomDecor": {"openable": False, "receptacle": False, "pickupable": False},
"Bed": {"openable": False, "receptacle": True, "pickupable": False},
"BaseballBat": {"openable": False, "receptacle": False, "pickupable": True},
"BasketBall": {"openable": False, "receptacle": False, "pickupable": True},
"AlarmClock": {"openable": False, "receptacle": False, "pickupable": True},
"CD": {"openable": False, "receptacle": False, "pickupable": True},
"TennisRacket": {"openable": False, "receptacle": False, "pickupable": True},
"TeddyBear": {"openable": False, "receptacle": False, "pickupable": True},
"Poster": {"openable": False, "receptacle": False, "pickupable": False},
"Cloth": {"openable": False, "receptacle": False, "pickupable": True},
"Dumbbell": {"openable": False, "receptacle": False, "pickupable": True},
"LaundryHamper": {"openable": True, "receptacle": True, "pickupable": False},
"TableTopDecor": {"openable": False, "receptacle": False, "pickupable": True},
"Desktop": {"openable": False, "receptacle": False, "pickupable": False},
"Footstool": {"openable": False, "receptacle": True, "pickupable": True},
"BathtubBasin": {"openable": False, "receptacle": True, "pickupable": False},
"ShowerCurtain": {"openable": True, "receptacle": False, "pickupable": False},
"ShowerHead": {"openable": False, "receptacle": False, "pickupable": False},
"Bathtub": {"openable": False, "receptacle": True, "pickupable": False},
"Towel": {"openable": False, "receptacle": False, "pickupable": True},
"HandTowel": {"openable": False, "receptacle": False, "pickupable": True},
"Plunger": {"openable": False, "receptacle": False, "pickupable": True},
"TowelHolder": {"openable": False, "receptacle": True, "pickupable": False},
"ToiletPaperHanger": {"openable": False, "receptacle": True, "pickupable": False},
"SoapBar": {"openable": False, "receptacle": False, "pickupable": True},
"ToiletPaper": {"openable": False, "receptacle": False, "pickupable": True},
"HandTowelHolder": {"openable": False, "receptacle": True, "pickupable": False},
"ScrubBrush": {"openable": False, "receptacle": False, "pickupable": True},
"Toilet": {"openable": True, "receptacle": True, "pickupable": False},
"ShowerGlass": {"openable": False, "receptacle": False, "pickupable": False},
"ShowerDoor": {"openable": True, "receptacle": False, "pickupable": False},
"AluminumFoil": {"openable": False, "receptacle": False, "pickupable": True},
"VacuumCleaner": {"openable": False, "receptacle": False, "pickupable": False}
}
# fmt: on
def _get_colors(num_colors):
colors = []
for i in np.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + np.random.rand() * 10)/100.
saturation = (90 + np.random.rand() * 10)/100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
random.shuffle(colors)
return colors
REARRANGE_SIM_OBJECT_COLORS = _get_colors(len(OBJECT_TYPES_WITH_PROPERTIES))
REARRANGE_SIM_OBJECTS_COLOR_LOOKUP = {
p: REARRANGE_SIM_OBJECT_COLORS[i] for i, p in enumerate(OBJECT_TYPES_WITH_PROPERTIES)}
PICKUPABLE_OBJECTS = set(
sorted(
[
object_type
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["pickupable"]
]
)
)
OPENABLE_OBJECTS = set(
sorted(
[
object_type
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["openable"] and not properties["pickupable"]
]
)
)
RECEPTACLE_OBJECTS = set(
sorted(
[
object_type
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["receptacle"] and not properties["pickupable"]
]
)
)
MAX_OPEN_RETRIES_REARRANGE = 10
MAX_MOVE_RETRIES_REARRANGE = 150
CONTROLLER_COMMIT_ID = "6f13532966080a051127167c6eb2117e47d96f3a"
# "62bba7e2537fb6aaf2ed19125b9508c8b99bced3"
ROOMR_CONTROLLER_COMMIT_ID = "62bba7e2537fb6aaf2ed19125b9508c8b99bced3"#"f46d5ec42b65fdae9d9a48db2b4fb6d25afbd1fe"
OBJECT_TYPES_TO_NOT_MOVE = {
"Apple",
"Bread",
"Cloth",
"HandTowel",
"KeyChain",
"Lettuce",
"Pillow",
"Potato",
"Tomato",
}
OBJECT_TYPES_THAT_CAN_HAVE_IDENTICAL_MESHES = [
"AluminumFoil",
"CD",
"Dumbbell",
"Ladle",
"Vase",
]
ACTION_NEGATIONS = {
'MoveAhead': 'MoveBack',
'MoveBack': 'MoveAhead',
'RotateRight': 'RotateLeft',
'RotateLeft': 'RotateRight',
'LookDown': 'LookUp',
'LookUp': 'LookDown',
'MoveLeft': 'MoveRight',
'MoveRight': 'MoveLeft'
}
EXPLORATION_ACTION_ORDER = (
'MoveBack',
'MoveAhead',
'MoveLeft',
'MoveRight',
'RotateRight',
'RotateLeft',
'LookUp',
'LookDown'
# NOTE @samir add look up and look down here and also add it to the environment
)
# OMNI_CATEGORIES is a list of all of the lvis categories plus all of the unique ithor categories
OMNI_CATEGORIES = [
'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', 'alligator', 'almond', 'ambulance',
'amplifier', 'anklet', 'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', 'arctic_(type_of_shoe)',
'armband', 'armchair', 'armoire', 'armor', 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado',
'award', 'awning', 'ax', 'baboon', 'baby_buggy', 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna',
'banjo', 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',
'baseball_cap', 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', 'bathrobe',
'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', 'bedpan',
'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle',
'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball',
'box', 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread', 'breechcloth', 'bridal_gown',
'briefcase', 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog',
'bulldozer', 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)',
'business_card', 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet', 'locker', 'cake', 'calculator',
'calendar', 'calf', 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can', 'can_opener', 'candle', 'candle_holder',
'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape',
'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',
'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',
'cat', 'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', 'chalice',
'chandelier', 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', 'chopping_board',
'chopstick', 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',
'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower', 'clothes_hamper', 'clothespin',
'clutch_bag', 'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', 'compass', 'computer_keyboard',
'condiment', 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', 'cooking_utensil', 'cooler_(for_food)',
'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', 'costume', 'cougar',
'coverall', 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)',
'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', 'cushion', 'cylinder',
'cymbal', 'dagger', 'dalmatian', 'dartboard', 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', 'detergent', 'diaper', 'diary',
'die', 'dinghy', 'dining_table', 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', 'dishwasher_detergent', 'dispenser',
'diving_board', 'Dixie_cup', 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', 'domestic_ass', 'doorknob', 'doormat', 'doughnut',
'dove', 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', 'dresser', 'drill', 'drone', 'dropper',
'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle',
'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair',
'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret', 'Ferris_wheel',
'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose',
'fireplace', 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',
'food_processor', 'football_(American)', 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', 'freshener',
'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle',
'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', 'glass_(drink_container)',
'globe', 'glove', 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater', 'gravestone',
'gravy_boat', 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', 'handcart',
'handcuff', 'handkerchief', 'handle', 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', 'headboard', 'headlight',
'headscarf', 'headset', 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',
'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate',
'hot_sauce', 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey', 'jet_plane',
'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table',
'kite', 'kitten', 'kiwi_fruit', 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', 'ladder', 'ladle',
'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', 'lasagna', 'latch', 'lawn_mower',
'leather', 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor', 'lizard', 'log', 'lollipop', 'speaker_(stero_equipment)', 'loveseat', 'machine_gun',
'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger', 'manhole', 'map', 'marker',
'martini', 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick',
'meatball', 'medicine', 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy',
'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', 'motor_scooter', 'motor_vehicle',
'motorcycle', 'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument',
'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper', 'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad',
'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas', 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',
'papaya', 'paper_plate', 'paper_towel', 'paperback_book', 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol', 'parchment',
'parka', 'parking_meter', 'parrot', 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', 'pastry', 'patty_(food)', 'pea_(food)',
'peach', 'peanut_butter', 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg', 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', 'pin_(non_jewelry)',
'pineapple', 'pinecone', 'ping-pong_ball', 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', 'pitcher_(vessel_for_liquid)', 'pitchfork',
'pizza', 'place_mat', 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)', 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding',
'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', 'racket',
'radar', 'radiator', 'radio_receiver', 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', 'razorblade', 'reamer_(juicer)',
'rearview_mirror', 'receipt', 'recliner', 'record_player', 'reflector', 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat',
'road_map', 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade', 'rolling_pin', 'root_beer', 'router_(computer_equipment)', 'rubber_band',
'runner_(carpet)', 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',
'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver', 'scrubbing_brush',
'sculpture', 'seabird', 'seahorse', 'seaplane', 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',
'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants',
'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap', 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink', 'skateboard',
'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)', 'slipper_(footwear)', 'smoothie',
'snake', 'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball', 'solar_array', 'sombrero', 'soup', 'soup_bowl',
'soupspoon', 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)', 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)',
'steak_(food)', 'steak_knife', 'steering_wheel', 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup', 'stool', 'stop_sign',
'brake_light', 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', 'table_lamp', 'tablecloth',
'tachometer', 'taco', 'tag', 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)',
'tape_measure', 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera', 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', 'thermometer', 'thermos_bottle',
'thermostat', 'thimble', 'thread', 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)',
'toaster', 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover', 'tortilla',
'tow_truck', 'towel', 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)',
'trampoline', 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk',
'vat', 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest', 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', 'waffle',
'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', 'washbasin', 'automatic_washer',
'watch', 'water_bottle', 'water_cooler', 'water_faucet', 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', 'whistle',
'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', 'wineglass', 'blinder_(for_horses)',
'wok', 'wolf', 'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini',
'basket_ball', 'blinds', 'butter_knife', 'cd', 'cloth', 'counter_top', 'credit_card', 'curtains', 'house_plant', 'key_chain', 'clothes_hamper_lid',
'light_switch', 'plunger', 'safe', 'shelf', 'shower_door', 'shower_glass', 'sink_basin', 'spray_bottle', 'stove_burner', 'stove_burner', 'side_table',
'tissue_box', 'toilet_paper_hanger', 'television_set_stand', 'window', 'apple_sliced', 'tomato_sliced', 'lettuce_sliced', 'egg_cracked', 'bread_sliced',
'potato_sliced']
# ITHOR_TO_OMNI is a dictionary mapping ithor class names to the omni category list
ITHOR_TO_OMNI = {
"AlarmClock": "alarm_clock",
"Apple": "apple",
"ArmChair": "armchair",
"BaseballBat": "baseball_bat",
"Bathtub": "bathtub",
"BathtubBasin": "bathtub",
"BasketBall": "basket_ball",
"Bed": "bed",
"Blinds": "blinds",
"Book": "book",
"Boots": "boot",
"ButterKnife": "butter_knife",
"Bowl": "bowl",
"Box": "box",
"Bread": "bread",
"Cabinet": "cabinet",
"Candle": "candle",
"Cart": "cart",
"CellPhone": "cellular_telephone",
"CoffeeMachine": "coffee_maker",
"CounterTop": "counter_top",
"Chair": "chair",
"CD": "cd",
"Cup": "cup",
"Curtains": "curtains",
"Cloth": "cloth",
"CreditCard": "credit_card",
"Desk": "desk",
"DeskLamp": "table_lamp",
"DishSponge": "sponge",
"Drawer": "drawer",
"Dresser": "dresser",
"Egg": "egg",
"Footstool": "footstool",
"Fork": "fork",
"FloorLamp": "lamp",
"Fridge": "refrigerator",
"GarbageCan": "trash_can",
"Glassbottle": "bottle",
"HandTowel": "hand_towel",
"HandTowelHolder": "towel_rack",
"HousePlant": "house_plant",
"Kettle": "kettle",
"Knife": "knife",
"KeyChain": "key_chain",
"Ladle": "ladle",
"Lettuce": "lettuce",
"Laptop": "laptop_computer",
"LaundryHamper": "clothes_hamper",
"LaundryHamperLid": "clothes_hamper_lid",
"LightSwitch": "light_switch",
"Mirror": "mirror",
"Mug": "mug",
"Microwave": "microwave_oven",
"Newspaper": "newspaper",
"Ottoman": "ottoman",
"Painting": "painting",
"PaperTowel": "paper_towel",
"Pen": "pen",
"Pencil": "pencil",
"Pillow": "pillow",
"Plate": "plate",
"Poster": "poster",
"Pot": "pot",
"Pan": "frying_pan",
"Potato": "potato",
"PaperTowelRoll": "paper_towel",
"PepperShaker": "pepper_mill",
"Plunger": "plunger",
"RemoteControl": "remote_control",
"Sink": "sink",
"SinkBasin": "sink_basin",
"Sofa": "sofa",
"Spatula": "spatula",
"Spoon": "spoon",
"Safe": "safe",
"SoapBar": "soap",
"SoapBottle": "soap",
"SaltShaker": "saltshaker",
"ScrubBrush": "scrubbing_brush",
"Shelf": "shelf",
"ShowerDoor": "shower_door",
"ShowerGlass": "shower_glass",
"SprayBottle": "spray_bottle",
"Statue": "statue_(sculpture)",
"StoveBurner": "stove_burner",
"StoveKnob": "stove_burner",
"SideTable": "side_table",
"DiningTable": "dining_table",
"CoffeeTable": "coffee_table",
"TeddyBear": "teddy_bear",
"TennisRacket": "tennis_racket",
"Toaster": "toaster",
"Toilet": "toilet",
"Tomato": "tomato",
"Towel": "towel",
"Television": "television_set",
"TissueBox": "tissue_box",
"ToiletPaper": "toilet_tissue",
"ToiletPaperRoll": "toilet_tissue",
"ToiletPaperHanger": "toilet_paper_hanger",
"TowelHolder": "towel_rack",
"TVStand": "television_set_stand",
"Vase": "vase",
"Watch": "watch",
"WateringCan": "watering_can",
"WineBottle": "wine_bottle",
"Window": "window",
"ShowerCurtain": "shower_curtain",
"Lamp": "lamp",
"ShowerHead": "shower_head",
"Faucet": "faucet",
"AppleSliced": "apple_sliced",
"TomatoSliced": "tomato_sliced",
"LettuceSliced": "lettuce_sliced",
"EggCracked": "egg_cracked",
"BreadSliced": "bread_sliced",
"PotatoSliced": "potato_sliced"
}
# OMNI_TO_ITHOR is the inverse mappingn of ITHOR_TO_OMNI
OMNI_TO_ITHOR = {v: k for k, v in ITHOR_TO_OMNI.items()}
| CSR-main | src/simulation/constants.py |
CSR-main | src/simulation/__init__.py |
|
import json
import os
import random
from itertools import combinations
from src.simulation.constants import CONTROLLER_COMMIT_ID
from string import ascii_letters
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from ai2thor.controller import Controller
from PIL import Image
from src.lightning.modules.receptacle_module import ReceptacleModule
from src.shared.constants import (CLASSES_TO_IGNORE, IMAGE_SIZE, NORMALIZE_RGB_MEAN,
NORMALIZE_RGB_STD)
from src.shared.utils import check_none_or_empty
from src.simulation.shortest_path_navigator import (AgentLocKeyType,
ShortestPathNavigatorTHOR)
from torchvision.transforms.transforms import Compose, Normalize, ToTensor
class AgentReceptacle(object):
def __init__(
self,
scene_name='FloorPlan1',
image_size=IMAGE_SIZE,
random_start=True,
trajectory=None,
model_path=None
) -> None:
super().__init__()
self.controller = Controller(
commit_id=CONTROLLER_COMMIT_ID,
renderDepthImage=True,
renderInstanceSegmentation=True,
width=image_size,
height=image_size
)
if random_start and trajectory is not None:
raise ValueError(
'cannot set `random_start=True` and also pass a predefined `trajectory`')
self.model = None
self.set_scene(scene_name, random_start=random_start,
trajectory=trajectory, model_path=model_path)
self.inference_transform = Compose([
ToTensor(),
Normalize(NORMALIZE_RGB_MEAN, NORMALIZE_RGB_STD)
])
def set_scene(self, scene_name, random_start=True, trajectory=None, model_path=None):
self.controller.reset(scene=scene_name)
self.spn = ShortestPathNavigatorTHOR(self.controller)
self.reachable_spots = self.spn.reachable_points_with_rotations_and_horizons()
self.steps = 0
if random_start:
start_index = random.randint(0, len(self.reachable_spots)-1)
start_key = self.spn.get_key(self.reachable_spots[start_index])
self.spn.controller.step(
action='Teleport',
position=dict(x=start_key[0], y=0.9, z=start_key[1]),
rotation=dict(x=0, y=start_key[2], z=0),
horizon=start_key[3],
standing=True
)
self.target_key = None
self.relations_last_step = None
self.relationship_memory = {}
self.trajectory_memory = []
self.object_position_memory = {}
self.predefined_trajectory = None
if trajectory is not None:
start_key = trajectory[0][0]
self.spn.controller.step(
action='Teleport',
position=dict(x=start_key[0], y=0.9, z=start_key[1]),
rotation=dict(x=0, y=start_key[2], z=0),
horizon=start_key[3],
standing=True
)
self.target_key = tuple(trajectory[-1][-1])
self.predefined_trajectory = trajectory
if not check_none_or_empty(model_path):
self.model = ReceptacleModule.load_from_checkpoint(model_path)
self.model.eval()
self.model.freeze()
if self.model is not None:
self.agg_confidences = {}
self.relations_last_step_pred = None
self.relationship_memory_pred = {}
def set_random_target(self):
target_index = random.randint(0, len(self.reachable_spots)-1)
self.target_key = self.spn.get_key(self.reachable_spots[target_index])
return self.target_key
def set_target(self, target_key: AgentLocKeyType):
self.target_key = target_key
def take_next_action(self):
if self.target_key is None:
raise ValueError(
'self.target_key must be set before we can navigate')
curr_key = self.spn.get_key(self.spn.last_event.metadata["agent"])
action = None
while 1:
action = None
if self.predefined_trajectory is not None:
action = self.predefined_trajectory[self.steps][1]
else:
action = self.spn.shortest_path_next_action(
curr_key, self.target_key)
event = self.spn.controller.step(action=action)
if not event.metadata['lastActionSuccess']:
if self.predefined_trajectory is None:
self.spn.update_graph_with_failed_action(action)
continue
else:
raise ValueError(
'using predefined trajectory, but action is failing')
if self.predefined_trajectory is not None:
assert self.spn.get_key(self.spn.last_event.metadata["agent"]) == tuple(
self.predefined_trajectory[self.steps][2])
self.steps += 1
self.__update_relations_last_step()
self.trajectory_memory.append(
(curr_key, action, self.spn.get_key(self.spn.last_event.metadata["agent"])))
return event
def dump_relation_digraph_accumulated(self, save_path):
with open(save_path, 'w') as f:
json.dump(self.relationship_memory, f, indent=4)
def dump_mask_instances_last_step(self, save_dir, image_prefix):
for mask_instance in self.relations_last_step:
mask = self.spn.last_event.instance_masks[mask_instance]
Image.fromarray(mask).save(os.path.join(
save_dir, f'{image_prefix}_{mask_instance}.png'), 'PNG')
def dump_adjacency_gt(self, save_dir, image_prefix):
if len(self.relationship_memory) == 0:
return
sns.set_theme(style="white")
# Create the pandas DataFrame
df = nx.to_pandas_adjacency(self.get_gt_nx_digraph(), dtype=int)
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(df, cmap='jet', vmax=1.0, center=0.5,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.tight_layout()
plt.savefig(os.path.join(save_dir, f'{image_prefix}.png'))
def at_goal(self):
return self.spn.get_key(self.spn.last_event.metadata["agent"]) == self.target_key
def evaluate_accuracy_vs_confidence(self, threshold):
if self.model is None:
raise ValueError('model is None')
assert len(self.relationship_memory) == len(
self.relationship_memory_pred)
thresholds = [0.0, 0.5, 0.75, 0.85, 0.95,
0.96, 0.97, 0.98, 0.99, 0.995, 1.0]
gt_scores = []
confident_wrong = set()
unrelated_wrong = set()
for t in thresholds:
denom = len(self.relationship_memory)
if denom == 0:
return None, None, None, None
num = 0
for obj in self.relationship_memory:
if obj in self.agg_confidences and self.agg_confidences[obj] > t:
if self.relationship_memory[obj] == self.relationship_memory_pred[obj]:
num += 1
elif self.agg_confidences[obj] > threshold:
confident_wrong.add(
(obj, self.relationship_memory_pred[obj]))
else:
# case where we are implicitly saying there is no relationship for this object
if self.relationship_memory[obj] == None:
num += 1
elif obj in self.agg_confidences and self.agg_confidences[obj] < threshold:
unrelated_wrong.add(obj)
gt_scores.append(float(num) / denom)
return thresholds, gt_scores, confident_wrong, unrelated_wrong
def get_gt_nx_digraph(self):
G = nx.DiGraph(directed=True)
for src in self.relationship_memory:
G.add_node(src)
for src in self.relationship_memory:
if self.relationship_memory[src] is not None:
G.add_edge(src, self.relationship_memory[src])
return G
def __create_inference_minibatch(self):
pairs = list(combinations(self.relations_last_step.keys(), 2))
image = self.inference_transform(
Image.fromarray(self.spn.last_event.frame))
# fill the gt for these pairs
mini_batch_gt = []
for p in pairs:
if self.relations_last_step[p[0]] == p[1]:
mini_batch_gt.append(1)
elif self.relations_last_step[p[1]] == p[0]:
mini_batch_gt.append(2)
else:
mini_batch_gt.append(0)
masks = {}
for mask_instance in self.relations_last_step:
mask = self.spn.last_event.instance_masks[mask_instance]
masks[mask_instance] = torch.from_numpy(
mask).unsqueeze(0)
input_tensor = torch.empty(
(len(pairs), 5, image.shape[1], image.shape[2]))
for i, p in enumerate(pairs):
input_tensor[i] = torch.cat((image, masks[p[0]], masks[p[1]]), 0)
return input_tensor, pairs, mini_batch_gt
def __check_object_visible(self, object_metadata, ignore_classes=CLASSES_TO_IGNORE):
for c in ignore_classes:
key = c + '|'
if key in object_metadata['objectId']:
return False
return object_metadata['visible'] and object_metadata['objectId'] in self.spn.last_event.instance_masks
def __update_relations_last_step(self):
relations = {}
for entry in self.spn.last_event.metadata['objects']:
if self.__check_object_visible(entry):
self.object_position_memory[entry['objectId']
] = entry['position']
if entry['parentReceptacles'] is None:
relations[entry['objectId']] = None
else:
for parent_id in entry['parentReceptacles']:
for entry2 in self.spn.last_event.metadata['objects']:
if entry2['objectId'] == parent_id and \
entry['objectId'] in entry2['receptacleObjectIds'] and \
self.__check_object_visible(entry2):
relations[entry['objectId']] = parent_id
break
else:
relations[entry['objectId']] = None
self.relations_last_step = relations
for r in relations:
if r not in self.relationship_memory or self.relationship_memory[r] is None:
self.relationship_memory[r] = relations[r]
if self.model is not None:
input_tensor, pairs, _ = self.__create_inference_minibatch()
self.relations_last_step_pred = {}
if len(pairs) == 0:
for r in relations:
self.relations_last_step_pred[r] = None
if r not in self.relationship_memory_pred:
self.relationship_memory_pred[r] = None
assert len(self.relationship_memory) == len(
self.relationship_memory_pred)
return
probabilities = torch.softmax(self.model(input_tensor), dim=1)
conf, preds = torch.max(probabilities, dim=1)
for obj in self.relations_last_step:
self.relations_last_step_pred[obj] = None
for obj in self.relationship_memory:
if obj not in self.relationship_memory_pred:
self.relationship_memory_pred[obj] = None
if self.relationship_memory[obj] is not None \
and self.relationship_memory[obj] not in self.relationship_memory_pred:
self.relationship_memory_pred[self.relationship_memory[obj]] = None
assert len(self.relationship_memory) == len(
self.relationship_memory_pred)
for i, p in enumerate(pairs):
if preds[i].item() == 0:
continue
child = None
parent = None
if preds[i].item() == 1:
child, parent = p
if preds[i].item() == 2:
parent, child = p
if child in self.agg_confidences:
if conf[i].item() > self.agg_confidences[child]:
self.relations_last_step_pred[child] = parent
self.agg_confidences[child] = conf[i].item()
self.relationship_memory_pred[child] = parent
else:
self.relations_last_step_pred[child] = parent
self.agg_confidences[child] = conf[i].item()
self.relationship_memory_pred[child] = parent
assert len(self.relationship_memory) == len(
self.relationship_memory_pred)
| CSR-main | src/simulation/agent_receptacle.py |
import itertools
import json
import os
from time import time
import numpy as np
import src.dataloaders.augmentations as A
import torch
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image, ImageDraw
from pytorch_lightning import seed_everything
from src.dataloaders.roomr_dataset_utils import (find_waypoint_plan,
get_rearrange_task_spec)
from src.shared.constants import CLASSES_TO_IGNORE, IMAGE_SIZE
from src.simulation.constants import ROOMR_CONTROLLER_COMMIT_ID
from src.simulation.environment import RearrangeTHOREnvironment
from src.simulation.metrics import rand_metrics, rearrangement_metrics
from src.simulation.module_box import GtBoxModule, PredBoxModule
from src.simulation.module_exploration import GtExplorationModule, ReplayExplorationModule
from src.simulation.module_planner import PlannerModule
from src.simulation.module_relation_tracking import RelationTrackingModule
from src.simulation.module_state_graph import StateGraphModule
from src.simulation.rearrange_utils import (load_exploration_cache_dir, load_rearrange_data_from_path,
load_rearrange_meta_from_path)
from src.simulation.rearrangement_args import RearrangementArgs
from src.simulation.shortest_path_navigator import ShortestPathNavigatorTHOR
class AgentRoomr(object):
def __init__(
self,
rearrangement_args: RearrangementArgs
) -> None:
super().__init__()
if not os.path.exists(rearrangement_args.dump_dir):
os.mkdir(rearrangement_args.dump_dir)
self.dump_dir = rearrangement_args.dump_dir
self.env = None
self.roomr_metadata = load_rearrange_meta_from_path(
rearrangement_args.data_split, rearrangement_args.roomr_meta_dir)
self.exploration_cache = None
if not rearrangement_args.use_gt_exploration:
self.exploration_cache = load_exploration_cache_dir(
rearrangement_args.exploration_cache_dir)
self.reset(rearrangement_args=rearrangement_args)
def reset(self, rearrangement_args=None):
seed_everything(0)
if rearrangement_args is not None:
self.rearrangement_args = rearrangement_args
# initialize modules based on flags
self.box_module = None
self.exploration_module = None
self.relation_tracking_module = None
self.planner = None
self.state_graph_module = None
self.adjusted_rand_index_over_time = []
self.use_gt_boxes = self.rearrangement_args.use_gt_boxes
self.use_roi_feature_within_traj = self.rearrangement_args.use_roi_feature_within_traj
self.use_roi_feature_between_traj = self.rearrangement_args.use_roi_feature_between_traj
self.use_box_within_traj = self.rearrangement_args.use_box_within_traj
moved_detection_counts = {}
d = self.roomr_metadata[f'FloorPlan{rearrangement_args.room_id}_{rearrangement_args.instance_id}']['objects']
for o in d:
o_data = d[o]
moved_detection_counts[o] = {
'has_opened': o_data['has_opened'], 'count': 0}
# create env with basic controller
if self.env is None:
self.env = RearrangeTHOREnvironment(
force_cache_reset=False,
controller_kwargs={
'commit_id': ROOMR_CONTROLLER_COMMIT_ID,
'height': IMAGE_SIZE,
'width': IMAGE_SIZE,
'renderInstanceSegmentation': self.rearrangement_args.render_instance_segmentation,
'renderDepthImage': self.rearrangement_args.debug,
'visibilityDistance': self.rearrangement_args.visibility_distance,
'quality': "Very Low"})
# BOX MODULE
self.box_module = None
box_conf_threshold = self.rearrangement_args.box_conf_threshold
box_frac_threshold = self.rearrangement_args.box_frac_threshold
model_type = self.rearrangement_args.boxes_model_type
model_path = self.rearrangement_args.boxes_model_path
device_num = self.rearrangement_args.device_relation_tracking
get_roi_features = self.rearrangement_args.use_roi_feature_within_traj or self.rearrangement_args.use_roi_feature_between_traj
debug = self.rearrangement_args.debug
BoxModule = None
if self.rearrangement_args.use_gt_boxes:
BoxModule = GtBoxModule
else:
BoxModule = PredBoxModule
self.box_module = BoxModule(box_conf_threshold,
box_frac_threshold,
model_type,
model_path,
device_num,
moved_detection_counts,
get_roi_features,
debug)
# EXPLORATION MODULE
split = self.rearrangement_args.data_split
data = load_rearrange_data_from_path(
split, self.rearrangement_args.roomr_dir)
room_id = self.rearrangement_args.room_id
dump_dir = self.rearrangement_args.dump_dir
floor_plan = 'FloorPlan' + str(room_id)
instance_id = self.rearrangement_args.instance_id
exploration_strategy = self.rearrangement_args.gt_exploration_strategy
num_steps = self.rearrangement_args.num_steps
rotation_degrees = self.rearrangement_args.rotation_degrees
task_spec = get_rearrange_task_spec(
data, floor_plan, instance_id, split)
if self.rearrangement_args.use_gt_exploration:
metadata = self.roomr_metadata[f'{floor_plan}_{instance_id}']
self.exploration_module = GtExplorationModule(
self.env, task_spec, exploration_strategy, metadata, num_steps, rotation_degrees, room_id, instance_id, dump_dir)
else:
cache = self.exploration_cache[f'{room_id}_{instance_id}']
self.exploration_module = ReplayExplorationModule(
self.env, task_spec, cache, rotation_degrees, room_id, instance_id, dump_dir)
# WITHIN TRAJECTORY CORRESPONDENCE MODULE
use_roi_feature_within_traj = self.rearrangement_args.use_roi_feature_within_traj
use_roi_feature_between_traj = self.rearrangement_args.use_roi_feature_between_traj
self.relation_tracking_module = RelationTrackingModule(
self.rearrangement_args.relation_tracking_model_path,
self.rearrangement_args.object_tracking_model_path,
self.rearrangement_args.averaging_strategy,
self.rearrangement_args.device_relation_tracking,
self.rearrangement_args.use_gt_relation_tracking,
True,
self.rearrangement_args.cos_sim_match_threshold,
room_id,
instance_id,
dump_dir,
use_roi_feature_within_traj,
use_roi_feature_between_traj,
self.rearrangement_args.debug)
# BETWEEN TRAJECTORY CORRESPONDENCE MODULE
room_id = self.rearrangement_args.room_id
dump_dir = self.rearrangement_args.dump_dir
instance_id = self.rearrangement_args.instance_id
use_gt_object_matching = self.rearrangement_args.use_gt_object_matching
self.planner = PlannerModule(
self.env, room_id, instance_id, use_gt_object_matching, dump_dir)
# AGENT POSE CORRESPONDENCE MODULE
self.state_graph_module = StateGraphModule()
def walkthrough_pipeline(self):
self.explore_shared(True)
def rearrange_room(self):
self.exploration_module.reset(shuffle=True)
self.relation_tracking_module.reset()
self.state_graph_module.reset()
self.box_module.reset()
def unshuffle_pipeline(self):
assert self.exploration_module.env.shuffle_called
self.explore_shared(False)
# at this point we need to compare the scene representations to figure out what moved and a plan
self.planner.generate_plan(
self.rearrangement_args.cos_sim_moved_threshold,
self.rearrangement_args.cos_sim_object_threshold,
self.rearrangement_args.debug)
self.planner.execute_plan(self.rearrangement_args.debug)
def get_metrics(self, with_error=False):
metrics = rearrangement_metrics(
self.env, self.planner, self.roomr_metadata, with_error)
return metrics
def explore_shared(self, from_walkthrough):
# initial state and initialize the representation
event = self.exploration_module.env.last_event
seen_states = set()
event_key = ShortestPathNavigatorTHOR.get_key(
event.metadata['agent'])
seen_states.add(event_key)
if self.rearrangement_args.debug:
self.exploration_module.dump_observation()
grounded_state = self.relation_tracking_module.update_scene_representation(
event,
self.box_module,
)
_, ari = rand_metrics(self.relation_tracking_module.assignments,
self.relation_tracking_module.gt_assignments)
self.adjusted_rand_index_over_time.append(ari)
self.state_graph_module.add_edge(grounded_state, None)
if self.rearrangement_args.debug:
self.state_graph_module.dump_graph(from_walkthrough)
event = not None
while True:
event, update_state_graph = self.exploration_module.take_action()
if event is None:
break
last_action = event.metadata['lastAction']
if self.rearrangement_args.debug:
self.exploration_module.dump_observation()
event_key = ShortestPathNavigatorTHOR.get_key(
event.metadata['agent'])
grounded_state = self.relation_tracking_module.update_scene_representation(
event,
self.box_module
)
seen_states.add(event_key)
if update_state_graph:
_, ari = rand_metrics(
self.relation_tracking_module.assignments, self.relation_tracking_module.gt_assignments)
self.adjusted_rand_index_over_time.append(ari)
# hack around fact that turns are split into 3 images
self.state_graph_module.add_edge(
grounded_state, last_action)
if self.rearrangement_args.debug:
self.state_graph_module.dump_graph(from_walkthrough)
self.planner.store_representations(
self.relation_tracking_module, self.state_graph_module, self.box_module, from_walkthrough)
| CSR-main | src/simulation/agent_roomr.py |
import random
from collections import defaultdict
from typing import Any, Dict, List, Optional, Set
import numpy as np
from ai2thor.controller import Controller
import torch
from src.shared.constants import CLASSES_TO_IGNORE
from src.simulation.constants import \
OBJECT_TYPES_THAT_CAN_HAVE_IDENTICAL_MESHES
def valid_box_size(event, object_id, box_frac_threshold):
top = (event.instance_detections2D[object_id][0],
event.instance_detections2D[object_id][1])
bottom = (event.instance_detections2D[object_id][2] - 1,
event.instance_detections2D[object_id][3] - 1)
area = (bottom[0] - top[0]) * (bottom[1] - top[1])
if area / (event.metadata["screenWidth"] * event.metadata["screenHeight"]) < box_frac_threshold:
return False
return True
def compute_iou(pred, target):
with torch.no_grad():
assert pred.shape == target.shape
intersection = target & pred
union = target | pred
iou = torch.sum(intersection).flatten() / torch.sum(union).float()
return iou
def are_images_near(image1, image2, max_mean_pixel_diff=0.5):
return np.mean(np.abs(image1 - image2).flatten()) <= max_mean_pixel_diff
def are_images_far(image1, image2, min_mean_pixel_diff=10):
return np.mean(np.abs(image1 - image2).flatten()) >= min_mean_pixel_diff
class ThorPositionTo2DFrameTranslator(object):
def __init__(self, frame_shape, cam_position, orth_size):
self.frame_shape = frame_shape
self.lower_left = np.array(
(cam_position[0], cam_position[2])) - orth_size
self.span = 2 * orth_size
def __call__(self, position):
if len(position) == 3:
x, _, z = position
else:
x, z = position
camera_position = (np.array((x, z)) - self.lower_left) / self.span
return np.array(
(
round(self.frame_shape[0] * (1.0 - camera_position[1])),
round(self.frame_shape[1] * camera_position[0]),
),
dtype=int,
)
def get_pickupable_objects(event, ignore_classes=CLASSES_TO_IGNORE, distance_thresh=1.5):
objects_metadata = event.metadata['objects']
names = []
for object_metadata in objects_metadata:
if object_metadata['objectType'] in CLASSES_TO_IGNORE:
continue
if not object_metadata['visible']:
continue
if object_metadata['distance'] > distance_thresh:
continue
if object_metadata['pickupable']:
names.append(object_metadata['name'])
return names
def get_openable_objects(event, ignore_classes=CLASSES_TO_IGNORE, distance_thresh=1.5):
objects_metadata = event.metadata['objects']
names = []
for object_metadata in objects_metadata:
if object_metadata['objectType'] in CLASSES_TO_IGNORE:
continue
if not object_metadata['visible']:
continue
if object_metadata['distance'] > distance_thresh:
continue
if object_metadata['openable']:
names.append(object_metadata['name'])
return names
def get_interactable_objects(event, ignore_classes=CLASSES_TO_IGNORE, distance_thresh=1.5):
objects_metadata = event.metadata['objects']
ret = []
for object_metadata in objects_metadata:
if object_metadata['objectType'] in CLASSES_TO_IGNORE:
continue
if not object_metadata['visible']:
continue
if object_metadata['distance'] > distance_thresh:
continue
if object_metadata['pickupable'] or object_metadata['openable']:
ret.append(object_metadata['name'])
return ret
def position_to_tuple(position):
return (position["x"], position["y"], position["z"])
def get_agent_map_data(c: Controller):
c.step({"action": "ToggleMapView"})
cam_position = c.last_event.metadata["cameraPosition"]
cam_orth_size = c.last_event.metadata["cameraOrthSize"]
pos_translator = ThorPositionTo2DFrameTranslator(
c.last_event.frame.shape, position_to_tuple(
cam_position), cam_orth_size
)
to_return = {
"frame": c.last_event.frame,
"cam_position": cam_position,
"cam_orth_size": cam_orth_size,
"pos_translator": pos_translator,
}
c.step({"action": "ToggleMapView"})
return to_return
def open_objs(
objects_to_open: List[Dict[str, Any]], controller: Controller
) -> Dict[str, Optional[float]]:
"""Opens up the chosen pickupable objects if they're openable."""
out: Dict[str, Optional[float]] = defaultdict(lambda: None)
for obj in objects_to_open:
last_openness = obj["openness"]
new_openness = last_openness
while abs(last_openness - new_openness) <= 0.2:
new_openness = random.random()
controller.step(
"OpenObject",
objectId=obj["objectId"],
openness=new_openness,
forceAction=True,
)
out[obj["name"]] = new_openness
return out
def get_object_ids_to_not_move_from_object_types(
controller: Controller, object_types: Set[str]
) -> List[str]:
object_types = set(object_types)
return [
o["objectId"]
for o in controller.last_event.metadata["objects"]
if o["objectType"] in object_types
]
def remove_objects_until_all_have_identical_meshes(controller: Controller):
obj_type_to_obj_list = defaultdict(lambda: [])
for obj in controller.last_event.metadata["objects"]:
obj_type_to_obj_list[obj["objectType"]].append(obj)
for obj_type in OBJECT_TYPES_THAT_CAN_HAVE_IDENTICAL_MESHES:
objs_of_type = list(
sorted(obj_type_to_obj_list[obj_type], key=lambda x: x["name"])
)
random.shuffle(objs_of_type)
objs_to_remove = objs_of_type[:-1]
for obj_to_remove in objs_to_remove:
obj_to_remove_name = obj_to_remove["name"]
obj_id_to_remove = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["name"] == obj_to_remove_name
)
controller.step("RemoveFromScene", objectId=obj_id_to_remove)
if not controller.last_event.metadata["lastActionSuccess"]:
return False
return True
| CSR-main | src/simulation/utils.py |
from dataclasses import dataclass
import hashlib
from typing import Dict, List
from src.simulation.shortest_path_navigator import AgentLocKeyType
@dataclass
class DataEntry(object):
first_name: str = ''
second_name: str = ''
receptacle: int = 0
receptacle_sibling: int = 0
room_id: int = 0
trajectory_id: int = 0
timestep: int = 0
position: Dict = None
rotation: Dict = None
horizon: Dict = None
objects_relative_distance: float = -1.
in_frame_negatives: List = None
has_shuffle_negatives: bool = False
@property
def get_instance_key(self) -> str:
key_str = f'{self.first_name},{self.second_name}'
return hashlib.md5(key_str.encode()).hexdigest()
@property
def get_category_key(self) -> str:
first = self.first_name.split('_')[0]
second = self.second_name.split('_')[0]
key_str = f'{first},{second}'
return hashlib.md5(key_str.encode()).hexdigest()
| CSR-main | src/simulation/data_entry.py |
import json
import logging
import os
import random
from contextlib import contextmanager
from typing import Dict, Callable, Tuple, Union, List, Any, Optional, Sequence
import ai2thor.controller
import compress_pickle
import lru
import numpy as np
from scipy.spatial.qhull import ConvexHull, Delaunay
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import include_object_data
_UNIFORM_BOX_CACHE = {}
def save_frames_to_mp4(frames: Sequence[np.ndarray], file_name: str, fps=3):
import matplotlib.pyplot as plt
from matplotlib import animation
import pylab
h, w, _ = frames[0].shape
aspect_ratio = w / h
fig = plt.figure(figsize=(5 * aspect_ratio, 5))
ax = fig.add_subplot(111)
ax.set_frame_on(False)
fig.subplots_adjust(left=0, bottom=0, right=1,
top=1, wspace=None, hspace=None)
ax.set_aspect("equal")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
im = ax.imshow(frames[0], cmap="gray", interpolation="nearest")
im.set_clim([0, 255])
pylab.tight_layout()
def update_img(n):
if n >= len(frames):
im.set_data(frames[-1])
else:
im.set_data(frames[n])
return im
ani = animation.FuncAnimation(
fig, update_img, len(frames) - 1, interval=200)
writer = animation.writers["ffmpeg"](fps=fps)
ani.save(file_name, writer=writer, dpi=300)
def hand_in_initial_position(controller: ai2thor.controller.Controller):
metadata = controller.last_event.metadata
return (
IThorEnvironment.position_dist(
metadata["hand"]["localPosition"], {"x": 0, "y": -0.16, "z": 0.38},
)
< 1e-4
and IThorEnvironment.angle_between_rotations(
metadata["hand"]["localRotation"], {"x": 0, "y": 0, "z": 0}
)
< 1e-2
)
class BoundedFloat(object):
"""Declare a bounded float placeholder variable."""
def __init__(self, low: float, high: float):
"""High is the max float value, low is the min (both inclusive)."""
self.types = {float, int, np.float64}
if type(low) not in self.types or type(high) not in self.types:
raise ValueError("Bounds must both be floats.")
if low > high:
raise ValueError("low must be less than high.")
self.low = low
self.high = high
def sample(self) -> float:
"""Return a random float within the initialized range."""
return random.random() * (self.high - self.low) + self.low
def __contains__(self, n: float):
"""Assert n is within this classes bounded range."""
if type(n) not in self.types:
raise ValueError("n must be a float (or an int).")
return n >= self.low and n <= self.high
class RearrangeActionSpace(object):
"""Control which actions with bounded variables can be executed."""
def __init__(self, actions: Dict[Callable, Dict[str, BoundedFloat]]):
"""Build a new AI2-THOR action space.
Attributes
:actions (Dict[Callable, Dict[str, BoundedFloat]]) must be in the form
{
<Callable: e.g., controller.move_ahead>: {
'<x>': <BoundedFloat(low=0.5, high=2.5)>,
'<y>': <BoundedFloat(low=0.5, high=2.5)>,
'<z>': <BoundedFloat(low=0.5, high=2.5)>,
'<degrees>': <BoundedFloat(low=-90, high=90)>,
...
},
...
},
where the action variables are in the value and the callable function
is the key.
"""
self.keys = list(actions.keys())
self.actions = actions
def execute_random_action(self, log_choice: bool = True) -> None:
"""Execute a random action within the specified action space."""
action = random.choice(self.keys)
kwargs = {
name: bounds.sample() for name, bounds in self.actions[action].items()
}
# logging
if log_choice:
kwargs_str = str(
"".join(f" {k}: {v},\n" for k, v in kwargs.items()))
kwargs_str = "\n" + kwargs_str[:-2] if kwargs_str else ""
logging.info(f"Executing {action.__name__}(" + kwargs_str + ")")
action(**kwargs)
def __contains__(
self, action_fn_and_kwargs: Tuple[Callable, Dict[str, float]]
) -> bool:
"""Return if action_fn with variables is valid in this ActionSpace."""
action_fn, variables = action_fn_and_kwargs
# asserts the action is valid
if action_fn not in self.actions:
return False
# asserts the variables are valid
for name, x in variables.items():
if x not in self.actions[action_fn][name]:
return False
return True
def __str__(self) -> str:
"""Return a string representation of the action space."""
return self.__repr__()
def __repr__(self) -> str:
"""Return a string representation of the action space."""
s = ""
tab = " " * 2 # default tabs have like 8 spaces on shells
for action_fn, vars in self.actions.items():
fn_name = action_fn.__name__
vstr = ""
for i, (var_name, bound) in enumerate(vars.items()):
low = bound.low
high = bound.high
vstr += f"{tab * 2}{var_name}: float(low={low}, high={high})"
vstr += "\n" if i + 1 == len(vars) else ",\n"
vstr = "\n" + vstr[:-1] if vstr else ""
s += f"{tab}{fn_name}({vstr}),\n"
s = s[:-2] if s else ""
return "ActionSpace(\n" + s + "\n)"
def extract_obj_data(obj):
"""Return object evaluation metrics based on the env state."""
if "type" in obj:
return {
"type": obj["type"],
"position": obj["position"],
"rotation": obj["rotation"],
"openness": obj["openness"],
"pickupable": obj["pickupable"],
"broken": obj["broken"],
"bounding_box": obj["bounding_box"],
"objectId": obj["objectId"],
"name": obj["name"],
"parentReceptacles": obj.get("parentReceptacles", []),
}
return {
"type": obj["objectType"],
"position": obj["position"],
"rotation": obj["rotation"],
"openness": obj["openness"] if obj["openable"] else None,
"pickupable": obj["pickupable"],
"broken": obj["isBroken"],
"objectId": obj["objectId"],
"name": obj["name"],
"parentReceptacles": obj.get("parentReceptacles", []),
"bounding_box": obj["objectOrientedBoundingBox"]["cornerPoints"]
if obj["objectOrientedBoundingBox"]
else None,
}
def get_pose_info(
objs: Union[Sequence[Dict[str, Any]], Dict[str, Any]]
) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
"""Return data about each specified object.
For each object, the return consists of its type, position,
rotation, openness, and bounding box.
"""
# list of objects
if isinstance(objs, Sequence):
return [extract_obj_data(obj) for obj in objs]
# single object
return extract_obj_data(objs)
def execute_action(
controller: ai2thor.controller.Controller,
action_space: RearrangeActionSpace,
action_fn: Callable,
thor_action: str,
error_message: str = "",
updated_kwarg_names: Optional[Dict[str, str]] = None,
default_thor_kwargs: Optional[Dict[str, Any]] = None,
preprocess_kwargs_inplace: Optional[Callable] = None,
**kwargs: float,
) -> bool:
"""Execute a bounded action within the AI2-THOR controller."""
if updated_kwarg_names is None:
updated_kwarg_names = {}
if default_thor_kwargs is None:
default_thor_kwargs = {}
if (action_fn, kwargs) not in action_space: # Checks that values are in bounds
raise ValueError(
error_message
+ f" action_fn=={action_fn}, kwargs=={kwargs}, action_space=={action_space}."
)
if preprocess_kwargs_inplace is not None:
if len(updated_kwarg_names) != 0:
raise NotImplementedError(
"Cannot have non-empty `updated_kwarg_names` and a non-None `preprocess_kwargs_inplace` argument."
)
preprocess_kwargs_inplace(kwargs)
# get rid of bad variable names
for better_kwarg, thor_kwarg in updated_kwarg_names.items():
kwargs[thor_kwarg] = kwargs[better_kwarg]
del kwargs[better_kwarg]
for name, value in default_thor_kwargs.items():
kwargs[name] = value
event = controller.step(thor_action, **kwargs)
return event.metadata["lastActionSuccess"]
def _iou_slow(
b1: Sequence[Sequence[float]],
b2: Sequence[Sequence[float]],
num_points: int = 2197,
) -> float:
"""Calculate the IoU between 3d bounding boxes b1 and b2."""
b1 = np.array(b1) if not isinstance(b1, np.ndarray) else b1
b2 = np.array(b2) if not isinstance(b2, np.ndarray) else b2
def _outer_bounds(
points_1: np.ndarray, points_2: np.ndarray
) -> Dict[str, Dict[str, float]]:
"""Sample points from the outer bounds formed by points_1/2."""
assert points_1.shape == points_2.shape
bounds = dict()
for i in range(points_1.shape[0]):
x1, y1, z1 = points_1[i]
x2, y2, z2 = points_2[i]
points = [
(x1, "x"),
(x2, "x"),
(y1, "y"),
(y2, "y"),
(z1, "z"),
(z2, "z"),
]
for val, d_key in points:
if d_key not in bounds:
bounds[d_key] = {"min": val, "max": val}
else:
if val > bounds[d_key]["max"]:
bounds[d_key]["max"] = val
elif val < bounds[d_key]["min"]:
bounds[d_key]["min"] = val
return bounds
def _in_box(box: np.ndarray, points: np.ndarray) -> np.ndarray:
"""For each point, return if its in the hull."""
hull = ConvexHull(box)
deln = Delaunay(box[hull.vertices])
return deln.find_simplex(points) >= 0
bounds = _outer_bounds(b1, b2)
dim_points = int(num_points ** (1 / 3))
xs = np.linspace(bounds["x"]["min"], bounds["x"]["max"], dim_points)
ys = np.linspace(bounds["y"]["min"], bounds["y"]["max"], dim_points)
zs = np.linspace(bounds["z"]["min"], bounds["z"]["max"], dim_points)
points = np.array([[x, y, z]
for x in xs for y in ys for z in zs], copy=False)
in_b1 = _in_box(b1, points)
in_b2 = _in_box(b2, points)
intersection = np.count_nonzero(in_b1 * in_b2)
union = np.count_nonzero(in_b1 + in_b2)
iou = intersection / union if union else 0
return iou
def get_basis_for_3d_box(corners: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
assert corners[0].sum() == 0.0
without_first = corners[1:]
magnitudes1 = np.sqrt((without_first * without_first).sum(1))
v0_ind = np.argmin(magnitudes1)
v0_mag = magnitudes1[v0_ind]
v0 = without_first[np.argmin(magnitudes1)] / v0_mag
orth_to_v0 = (v0.reshape(1, -1) * without_first).sum(-1) < v0_mag / 2.0
inds_orth_to_v0 = np.where(orth_to_v0)[0]
v1_ind = inds_orth_to_v0[np.argmin(magnitudes1[inds_orth_to_v0])]
v1_mag = magnitudes1[v1_ind]
v1 = without_first[v1_ind, :] / magnitudes1[v1_ind]
orth_to_v1 = (v1.reshape(1, -1) * without_first).sum(-1) < v1_mag / 2.0
inds_orth_to_v0_and_v1 = np.where(orth_to_v0 & orth_to_v1)[0]
if len(inds_orth_to_v0_and_v1) != 1:
raise RuntimeError(f"Could not find basis for {corners}")
v2_ind = inds_orth_to_v0_and_v1[0]
v2 = without_first[v2_ind, :] / magnitudes1[v2_ind]
orth_mat = np.stack((v0, v1, v2), axis=1) # Orthonormal matrix
return orth_mat, magnitudes1[[v0_ind, v1_ind, v2_ind]]
def uniform_box_points(n):
if n not in _UNIFORM_BOX_CACHE:
start = 1.0 / (2 * n)
lin_space = np.linspace(start, 1 - start, num=n).reshape(n, 1)
mat = lin_space
for i in range(2):
mat = np.concatenate(
(np.repeat(lin_space, mat.shape[0], 0), np.tile(mat, (n, 1))), axis=1,
)
_UNIFORM_BOX_CACHE[n] = mat
return _UNIFORM_BOX_CACHE[n]
def iou_box_3d(b1: Sequence[Sequence[float]], b2: Sequence[Sequence[float]]) -> float:
"""Calculate the IoU between 3d bounding boxes b1 and b2."""
import numpy as np
b1 = np.array(b1)
b2 = np.array(b2)
assert b1.shape == b2.shape == (8, 3)
b1_center = b1[:1, :]
b1 = b1 - b1_center
b1_orth_basis, b1_mags = get_basis_for_3d_box(corners=b1)
b2 = (b2 - b1_center) @ b1_orth_basis
b2_center = b2[:1, :]
b2 = b2 - b2_center
b2_orth_basis, b2_mags = get_basis_for_3d_box(corners=b2)
sampled_points = b2_center.reshape(1, 3) + (
uniform_box_points(13) @ (b2_mags.reshape(-1, 1)
* np.transpose(b2_orth_basis))
)
prop_intersection = (
np.logical_and(
sampled_points > -1e-3, sampled_points <= 1e-3 +
b1_mags.reshape(1, 3)
)
.all(-1)
.mean()
)
b1_vol = np.prod(b1_mags)
b2_vol = np.prod(b2_mags)
intersect_vol = b2_vol * prop_intersection
return intersect_vol / (b1_vol + b2_vol - intersect_vol)
class PoseMismatchError(Exception):
pass
class ObjectInteractablePostionsCache:
def __init__(self, max_size: int = 20000, ndigits=2):
self._key_to_positions = lru.LRU(size=max_size)
self.ndigits = ndigits
self.max_size = max_size
def _get_key(self, scene_name: str, obj: Dict[str, Any]):
p = obj["position"]
return (
scene_name,
obj["type"] if "type" in obj else obj["objectType"],
round(p["x"], self.ndigits),
round(p["y"], self.ndigits),
round(p["z"], self.ndigits),
)
def get(
self,
scene_name: str,
obj: Dict[str, Any],
controller: ai2thor.controller.Controller,
reachable_positions: Optional[Sequence[Dict[str, float]]] = None,
force_cache_refresh: bool = False,
) -> List[Dict[str, Union[float, int, bool]]]:
scene_name = scene_name.replace("_physics", "")
obj_key = self._get_key(scene_name=scene_name, obj=obj)
if force_cache_refresh or obj_key not in self._key_to_positions:
with include_object_data(controller):
metadata = controller.last_event.metadata
cur_scene_name = metadata["sceneName"].replace("_physics", "")
assert (
scene_name == cur_scene_name
), f"Scene names must match when filling a cache miss ({scene_name} != {cur_scene_name})."
obj_in_scene = next(
(o for o in metadata["objects"]
if o["name"] == obj["name"]), None,
)
if obj_in_scene is None:
raise RuntimeError(
f"Object with name {obj['name']} must be in the scene when filling a cache miss"
)
desired_pos = obj["position"]
desired_rot = obj["rotation"]
cur_pos = obj_in_scene["position"]
cur_rot = obj_in_scene["rotation"]
should_teleport = (
IThorEnvironment.position_dist(desired_pos, cur_pos) >= 1e-3
or IThorEnvironment.rotation_dist(desired_rot, cur_rot) >= 1
)
object_held = obj_in_scene["isPickedUp"]
physics_was_unpaused = controller.last_event.metadata.get(
"physicsAutoSimulation", True
)
if should_teleport:
if object_held:
if not hand_in_initial_position(controller=controller):
raise NotImplementedError
if physics_was_unpaused:
controller.step("PausePhysicsAutoSim")
assert controller.last_event.metadata["lastActionSuccess"]
event = controller.step(
"TeleportObject",
objectId=obj_in_scene["objectId"],
rotation=desired_rot,
**desired_pos,
forceAction=True,
allowTeleportOutOfHand=True,
forceKinematic=True,
)
assert event.metadata["lastActionSuccess"]
metadata = controller.step(
action="GetInteractablePoses",
objectId=obj["objectId"],
positions=reachable_positions,
).metadata
assert metadata["lastActionSuccess"]
self._key_to_positions[obj_key] = metadata["actionReturn"]
if should_teleport:
if object_held:
if hand_in_initial_position(controller=controller):
controller.step(
"PickupObject",
objectId=obj_in_scene["objectId"],
forceAction=True,
)
assert controller.last_event.metadata["lastActionSuccess"]
if physics_was_unpaused:
controller.step("UnpausePhysicsAutoSim")
assert controller.last_event.metadata["lastActionSuccess"]
else:
raise NotImplementedError
else:
event = controller.step(
"TeleportObject",
objectId=obj_in_scene["objectId"],
rotation=cur_rot,
**cur_pos,
forceAction=True,
)
assert event.metadata["lastActionSuccess"]
return self._key_to_positions[obj_key]
def load_rearrange_data_from_path(
stage: str, base_dir: Optional[str] = None,
) -> Dict[str, List[Dict[str, Any]]]:
stage = stage.lower()
if stage == "valid":
stage = "val"
data_path = os.path.abspath(os.path.join(base_dir, f"{stage}.pkl.gz"))
if not os.path.exists(data_path):
raise RuntimeError(f"No data at path {data_path}")
data = compress_pickle.load(path=data_path)
for scene in data:
for ind, task_spec_dict in enumerate(data[scene]):
task_spec_dict["scene"] = scene
if "index" not in task_spec_dict:
task_spec_dict["index"] = ind
if "stage" not in task_spec_dict:
task_spec_dict["stage"] = stage
return data
def load_exploration_cache_dir(exploration_cache_dir: str):
trajectories = {}
for e in os.listdir(exploration_cache_dir):
trajectory = None
with open(os.path.join(exploration_cache_dir, e), 'r') as f:
trajectory = json.load(f)
_, room_id, instance_id = e.split('.')[0].split('_')
trajectories[f'{room_id}_{instance_id}'] = trajectory
return trajectories
def load_rearrange_meta_from_path(stage: str, base_dir: Optional[str] = None):
data = None
with open(os.path.join(base_dir, f'{stage}.json'), 'r') as f:
data = json.load(f)
return data | CSR-main | src/simulation/rearrange_utils.py |
import enum
import math
import pprint
import random
import traceback
from collections import OrderedDict
from typing import Dict, Any, Tuple, Optional, Callable, List, Union, Sequence
import ai2thor
import ai2thor.controller
import ai2thor.fifo_server
import ai2thor.server
import ai2thor.wsgi_server
import numpy as np
from packaging import version
from torch.distributions.utils import lazy_property
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import (
round_to_factor,
include_object_data,
)
from src.simulation.utils import (
open_objs,
get_object_ids_to_not_move_from_object_types,
remove_objects_until_all_have_identical_meshes,
)
from src.simulation.constants import (
ROOMR_CONTROLLER_COMMIT_ID,
MAX_HAND_METERS,
OBJECT_TYPES_TO_NOT_MOVE
)
from src.simulation.rearrange_utils import (
BoundedFloat,
RearrangeActionSpace,
PoseMismatchError,
ObjectInteractablePostionsCache,
execute_action,
get_pose_info,
iou_box_3d,
)
class RearrangeMode(enum.Enum):
"""Different modes allowed in RearrangeTHOREnvironment."""
MANIPULATE = "MANIPULATE"
SNAP = "SNAP"
class RearrangeTaskSpec:
"""Data container encapsulating how a single rearrangement instance should
be initialized.
The rearrangement datasets are structured as large dictionaries of the form
```python
{
SCENE_NAME: [
{
DATA_DEFINING_A_SINGLE_REARRANGE_TASK
},
...
],
...
}
```
This `RearrangeTaskSpec` is used to encapsulate the `DATA_DEFINING_A_SINGLE_REARRANGE_TASK`
which allows us to use autocomplete and type checking rather than passing around raw dictionaries.
# Attributes
scene : A string specifying the AI2-THOR scene (e.g "FloorPlan18") in which to run the rearrange task.
stage : A string specifying the type of instance this is data corresponds to (e.g. "train", "val", "test", etc.)
agent_position : A Dict[str, float] specifying the "x", "y", and "z" coordinates of the agent's starting position.
agent_rotation: A float specifying the agents starting rotation (in degrees).
openable_data : A sequence of dictionaries specifying the degree to which certain objects in the scene should be open
in the walkthrough and unshuffle phases. E.g. the openness of a particular cabinent might be specified by the
dictionary:
```python
{
"name": "Cabinet_a8b4237f",
"objectName": "Cabinet_a8b4237f",
"objectId": "Cabinet|+01.31|+02.46|+04.36",
"start_openness": 0.6170539671128578,
"target_openness": 0.8788923191809455
}
```
where `start_openness` is the degree to which the cabinent is open at the start of the unshuffle phase.
starting_poses : A sequence of dictionaries specifying the poses of all pickupable objects at the start
of the unshuffle phase. E.g. one such dictionary might look like:
```python
{
"name": "Bowl_803d17c0",
"objectName": "Bowl_803d17c0",
"position": {
"x": -0.5572903156280518,
"y": 0.8256161212921143,
"z": 6.25293493270874,
},
"rotation": {
"x": 359.9241943359375,
"y": -0.00041645264718681574,
"z": 0.004868899006396532,
},
}
```
target_poses : Similar to `starting_poses` but specifying the poses of objects during the walkthrough phase.
runtime_sample : If `True`, then this task is meant to randomly specified at runtime. That is, the above fields
(except for the `scene`) are to be left as `None` and the RearrangeTHOREnvironment will randomly generate
them instead (this may be slow).
runtime_data : A Dict[str, Any] into which the `RearrangeTHOREnvironment` may cache data for efficiency.
metrics : Any additional metrics that might be associated with a task specification. For instance, the
rearrangement dataset dictionaries include metrics such as `open_diff_count` which records the number
of objects who differ in openness at the start of the walkthrough/unshuffle phases.
"""
def __init__(
self,
scene: str,
stage: Optional[str] = None,
agent_position: Optional[Dict[str, float]] = None,
agent_rotation: Optional[float] = None,
openable_data: Optional[Sequence[Dict[str, Any]]] = None,
starting_poses: Optional[Sequence[Dict[str, Any]]] = None,
target_poses: Optional[Sequence[Dict[str, Any]]] = None,
runtime_sample: bool = False,
runtime_data: Optional[Dict[str, Any]] = None,
**metrics,
):
"""Instantiate a `RearrangeTaskSpec` object."""
self.scene = scene
self.stage = stage
self.agent_position = agent_position
self.agent_rotation = agent_rotation
self.openable_data = openable_data
self.starting_poses = starting_poses
self.target_poses = target_poses
self.runtime_sample = runtime_sample
self.runtime_data: Dict[str, Any] = (
runtime_data if runtime_data is not None else {}
)
self.metrics = metrics
def __str__(self):
"""String representation of a `RearrangeTaskSpec` object."""
return pprint.pformat(self.__dict__)
@property
def unique_id(self):
if self.runtime_sample:
raise NotImplementedError("Cannot create a unique id for a runtime sample.")
return f"{self.scene}__{self.stage}__{self.metrics['index']}"
class RearrangeTHOREnvironment:
"""Custom AI2-THOR Controller for the task of object rearrangement.
# Attributes
mode : The current mode of rearrangement. Takes one of the values of RearrangeMode
(RearrangeMode.SNAP or RearrangeMode.MANIPULATE).
force_cache_reset : Whether or not we should force cache resets when using the `drop_held_object_with_snap` action.
Setting this value to `False` results in higher FPS at the expense of possibly having `drop_held_object_with_snap`
work/fail when it shouldn't. Setting `force_cache_reset` to `True` is recommended during validation/testing.
obj_name_to_walkthrough_start_pose : Dictionary mapping AI2-THOR object names to their poses (positions & rotations)
before they were shuffled (i.e. what the agent sees at the start of the walkthrough phase).
This will be changed after every call to `reset`.
obj_name_to_unshuffle_start_pose : Same as `obj_name_to_walkthrough_start_pose` but mapping object names to their poses (positions &
rotations) just after they were shuffled, i.e. what the agent sees at the start of the unshuffle phase).
current_task_spec : A `RearrangeTaskSpec` object specifying the current rearrangement task details.
controller : A ai2thor controller used to execute all the actions.
shuffle_called : `True` if the objects have been shuffled so that we're in the `unshuffle` phase. Otherwise `False`.
"""
def __init__(
self,
mode: RearrangeMode = RearrangeMode.SNAP,
force_cache_reset: Optional[bool] = None,
controller_kwargs: Optional[Dict[str, Any]] = None,
enhanced_physics_determinism: bool = True,
):
"""Initialize a new rearrangement controller.
# Parameters
mode : See description of this class' attributes.
controller_kwargs : Dictionary specifying any keyword arguments to be passed
when initializing the `ai2thor.controller.Controller` (e.g. width/height).
"""
if ai2thor.__version__ is not None: # Allows for custom THOR installs
if ai2thor.__version__ not in ["0.0.1", None] and version.parse(
ai2thor.__version__
) < version.parse(ROOMR_CONTROLLER_COMMIT_ID):
raise ImportError(
f"To run the rearrangment baseline experiments you must use"
f" ai2thor version {ROOMR_CONTROLLER_COMMIT_ID} or higher."
)
# Saving attributes
if mode == RearrangeMode.SNAP:
assert (
force_cache_reset is not None
), "When in RearrangeMode.SNAP mode you must specify a value for `force_cache_reset`"
else:
force_cache_reset = force_cache_reset
self.force_cache_reset = force_cache_reset
self.mode = mode
self._controller_kwargs = {} if controller_kwargs is None else controller_kwargs
self._enhanced_physics_determinism = enhanced_physics_determinism
self.physics_step_kwargs = {}
if self._enhanced_physics_determinism:
self.physics_step_kwargs = {
"actionSimulationSeconds": 0.26,
"fixedDeltaTime": 0.02,
}
# Cache of where objects can be interacted with
self._interactable_positions_cache = ObjectInteractablePostionsCache()
# Object poses at start of walkthrough and unshuffle phases.
# Reset after every call to reset and shuffle respectively.
self.obj_name_to_walkthrough_start_pose: Optional[Dict[str, Dict]] = None
self.obj_name_to_unshuffle_start_pose: Optional[Dict[str, Dict]] = None
self._cached_poses: Optional[Tuple[List, List, List]] = None
# Current task specification
self.current_task_spec: Optional[RearrangeTaskSpec] = None
# Caches of starting unshuffle/walkthrough object poses and other information. Reset on every call to reset
self._sorted_and_extracted_walkthrough_start_poses: Optional[List] = None
self._sorted_and_extracted_unshuffle_start_poses: Optional[List] = None
self._have_warned_about_mismatch = False
self._agent_signals_done = False # Also reset on `shuffle()`
# instance masks now not supported. But an Exception would be thrown if
# `mode == RearrangeMode.MANIPULATE` and render_instance_masks is True, since masks are
# only available on RearrangeMode.SNAP mode.
self._render_instance_masks: bool = False
if self.mode == RearrangeMode.MANIPULATE and self._render_instance_masks:
raise Exception(
"render_instance_masks is only available on RearrangeMode.SNAP mode."
)
# local thor controller to execute all the actions
self.controller = self.create_controller()
# always begin in walkthrough phase
self.shuffle_called = False
def create_controller(self):
"""Create the ai2thor controller."""
assert ("width" in self._controller_kwargs) == (
"height" in self._controller_kwargs
), "Either controller_kwargs must contain either both of width/height or neither."
self._controller_kwargs["width"] = self._controller_kwargs.get("width", 224)
self._controller_kwargs["height"] = self._controller_kwargs.get("height", 224)
controller = ai2thor.controller.Controller(
**{
"scene": "FloorPlan17_physics",
"server_class": ai2thor.fifo_server.FifoServer,
# "server_class": ai2thor.wsgi_server.WsgiServer, # Possibly useful in debugging
**self._controller_kwargs,
},
)
return controller
@property
def held_object(self) -> Optional[Dict[str, Any]]:
"""Return the data corresponding to the object held by the agent (if
any)."""
with include_object_data(self.controller):
metadata = self.controller.last_event.metadata
if len(metadata["inventoryObjects"]) == 0:
return None
assert len(metadata["inventoryObjects"]) <= 1
held_obj_id = metadata["inventoryObjects"][0]["objectId"]
return next(o for o in metadata["objects"] if o["objectId"] == held_obj_id)
def get_agent_location(self) -> Dict[str, Union[float, int, bool]]:
"""Returns the agent's current location.
# Returns
A dictionary of the form
```python
{
"x": X_POSITION_IN_SPACE, # float
"y": Y_POSITION_IN_SPACE, # float
"z": Z_POSITION_IN_SPACE, # float
"rotation": AGENTS_ROTATION_ABOUT_THE_Y_AXIS_IN_DEGREES, # float or int
"horizon": AGENTS_CAMERA_ANGLE_IN_DEGREES, # float (0 degrees is horizontal)
"standing": WHETHER_OR_NOT_THE_AGENT_IS_STANDING, # boolean
}
```
"""
metadata = self.controller.last_event.metadata
return {
"x": metadata["agent"]["position"]["x"],
"y": metadata["agent"]["position"]["y"],
"z": metadata["agent"]["position"]["z"],
"rotation": metadata["agent"]["rotation"]["y"],
"horizon": metadata["agent"]["cameraHorizon"],
"standing": metadata.get("isStanding", metadata["agent"].get("isStanding")),
}
@property
def observation(self) -> Tuple[np.array, Optional[np.array]]:
"""Return the current (RGB, depth, Optional[instance masks]) frames.
# Returns
A tuple containing a
* RGB frame is of shape (height)x(width)x3 with integer entries in [0:255].
* depth frame is of shape (height)x(width) with unscaled entries representing the
meter distance from the agent to the pixel. This will be `None` if the controller_kwargs
passed to the initializer did not specify that depth images should be returned by AI2-THOR.
"""
rgb = self.last_event.frame
depth = (
self.last_event.depth_frame
if hasattr(self.last_event, "depth_frame")
else None
)
return rgb, depth
@lazy_property
def walkthrough_action_space(self) -> RearrangeActionSpace:
"""Return the RearrangeActionSpace for the walkthrough phase based on
the RearrangeMode."""
# Walkthrough actions
actions: Dict[Callable, Dict[str, BoundedFloat]] = {
self.move_ahead: {},
self.move_right: {},
self.move_left: {},
self.move_back: {},
self.rotate_right: {},
self.rotate_left: {},
self.stand: {},
self.crouch: {},
self.look_up: {},
self.look_down: {},
self.done: {},
}
return RearrangeActionSpace(actions)
@lazy_property
def unshuffle_action_space(self) -> RearrangeActionSpace:
"""Return the RearrangeActionSpace for the unshuffle phase based on the
RearrangeMode."""
actions = {**self.walkthrough_action_space.actions}
# additional shuffle allowed actions
actions.update(
{
self.open_object: {
"x": BoundedFloat(low=0, high=1),
"y": BoundedFloat(low=0, high=1),
"openness": BoundedFloat(low=0, high=1),
},
self.pickup_object: {
"x": BoundedFloat(low=0, high=1),
"y": BoundedFloat(low=0, high=1),
},
self.push_object: {
"x": BoundedFloat(low=0, high=1),
"y": BoundedFloat(low=0, high=1),
"rel_x_force": BoundedFloat(low=-0.5, high=0.5),
"rel_y_force": BoundedFloat(low=-0.5, high=0.5),
"rel_z_force": BoundedFloat(low=-0.5, high=0.5),
"force_magnitude": BoundedFloat(low=0, high=1),
},
self.move_held_object: {
"x_meters": BoundedFloat(low=-0.5, high=0.5),
"y_meters": BoundedFloat(low=-0.5, high=0.5),
"z_meters": BoundedFloat(low=-0.5, high=0.5),
},
self.rotate_held_object: {
"x": BoundedFloat(low=-0.5, high=0.5),
"y": BoundedFloat(low=-0.5, high=0.5),
"z": BoundedFloat(low=-0.5, high=0.5),
},
self.drop_held_object: {},
}
)
if self.mode == RearrangeMode.SNAP:
actions.update({self.drop_held_object_with_snap: {}})
return RearrangeActionSpace(actions)
@property
def action_space(self) -> RearrangeActionSpace:
"""Return the RearrangeActionSpace based on the RearrangeMode and
whether we are in the unshuffle phase."""
if self.shuffle_called:
return self.unshuffle_action_space
else:
return self.walkthrough_action_space
def open_object(self, x: float, y: float, openness: float) -> bool:
"""Open the object corresponding to x/y to openness.
The action will not be successful if the specified openness would
cause a collision or if the object at x/y is not openable.
# Parameters
x : (float, min=0.0, max=1.0) horizontal percentage from the last frame
that the target object is located.
y : (float, min=0.0, max=1.0) vertical percentage from the last frame
that the target object is located.
# Returns
`True` if the action was successful, otherwise `False`.
"""
# If an object is already open, THOR doesn't support changing
# it's openness without first closing it. So we simply try to first
# close the object before reopening it.
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.open_object,
thor_action="OpenObject",
error_message=(
"x/y/openness must be in [0:1] and we must be in the unshuffle phase."
),
x=x,
y=y,
openness=openness,
default_thor_kwargs=self.physics_step_kwargs,
)
def pickup_object(self, x: float, y: float) -> bool:
"""Pick up the object corresponding to x/y.
The action will not be successful if the object at x/y is not
pickupable.
# Parameters
x : (float, min=0.0, max=1.0) horizontal percentage from the last frame
that the target object is located.
y : (float, min=0.0, max=1.0) vertical percentage from the last frame
that the target object is located.
# Returns
`True` if the action was successful, otherwise `False`.
"""
if len(self.last_event.metadata["inventoryObjects"]) != 0:
return False
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.pickup_object,
thor_action="PickupObject",
error_message="x/y must be in [0:1] and we must be in the unshuffle phase.",
x=x,
y=y,
default_thor_kwargs=self.physics_step_kwargs,
)
def push_object(
self,
x: float,
y: float,
rel_x_force: float,
rel_y_force: float,
rel_z_force: float,
force_magnitude: float,
) -> bool:
"""Push an object along a surface.
The action will not be successful if the object at x/y is not moveable.
# Parameters
x : (float, min=0.0, max=1.0) horizontal percentage from the last frame
that the target object is located.
y : (float, min=0.0, max=1.0) vertical percentage from the last frame
that the target object is located.
rel_x_force : (float, min=-0.5, max=0.5) amount of relative force
applied along the x axis.
rel_y_force : (float, min=-0.5, max=0.5) amount of relative force
applied along the y axis.
rel_z_force : (float, min=-0.5, max=0.5) amount of relative force
applied along the z axis.
force_magnitude : (float, min=0, max=1) relative amount of force
applied during this push action. Within AI2-THOR, the force is
rescaled to be between 0 and 50 newtons, which is estimated to
sufficiently move all pickupable objects.
# Returns
`True` if the action was successful, otherwise `False`.
"""
def preprocess_kwargs(kwargs: Dict[str, Any]):
direction = {}
for k in ["x", "y", "z"]:
force_key = f"rel_{k}_force"
direction[k] = kwargs[force_key]
del kwargs[force_key]
kwargs["direction"] = direction
kwargs["force_magnitude"] = 50 * kwargs["force_magnitude"]
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.pickup_object,
thor_action="TouchThenApplyForce",
error_message="Error in call to pickup object."
" Must be in unshuffle phase (i.e., call shuffle()),"
" x,y,force_magnitude must be in [0:1],"
" and rel_(x/y/z)_force must be in [-0.5:0.5]",
default_thor_kwargs=dict(handDistance=1.5, **self.physics_step_kwargs),
preprocess_kwargs_inplace=preprocess_kwargs,
x=x,
y=y,
rel_x_force=rel_x_force,
rel_y_force=rel_y_force,
rel_z_force=rel_z_force,
moveMagnitude=force_magnitude,
)
def move_ahead(self) -> bool:
"""Move the agent ahead from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_ahead,
thor_action="MoveAhead",
default_thor_kwargs=self.physics_step_kwargs,
)
def move_back(self) -> bool:
"""Move the agent back from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_back,
thor_action="MoveBack",
default_thor_kwargs=self.physics_step_kwargs,
)
def move_right(self) -> bool:
"""Move the agent right from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_right,
thor_action="MoveRight",
default_thor_kwargs=self.physics_step_kwargs,
)
def move_left(self) -> bool:
"""Move the agent left from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_left,
thor_action="MoveLeft",
default_thor_kwargs=self.physics_step_kwargs,
)
def rotate_left(self) -> bool:
"""Rotate the agent left from its facing direction."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.rotate_left,
thor_action="RotateLeft",
default_thor_kwargs=self.physics_step_kwargs,
)
def rotate_right(self) -> bool:
"""Rotate the agent left from its facing direction."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.rotate_right,
thor_action="RotateRight",
default_thor_kwargs=self.physics_step_kwargs,
)
def stand(self) -> bool:
"""Stand the agent from the crouching position."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.stand,
thor_action="Stand",
default_thor_kwargs=self.physics_step_kwargs,
)
def crouch(self) -> bool:
"""Crouch the agent from the standing position."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.crouch,
thor_action="Crouch",
default_thor_kwargs=self.physics_step_kwargs,
)
def look_up(self) -> bool:
"""Turn the agent's head and camera up by 30 degrees."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.look_up,
thor_action="LookUp",
default_thor_kwargs=self.physics_step_kwargs,
)
def look_down(self) -> bool:
"""Turn the agent's head and camera down by 30 degrees."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.look_down,
thor_action="LookDown",
default_thor_kwargs=self.physics_step_kwargs,
)
def done(self) -> bool:
"""Agent's signal that it's completed its current rearrangement phase.
Note that we do not automatically switch from the walkthrough
phase to the unshuffling phase, and vice-versa, that is up to
the user. This allows users to call .poses after the agent calls
done, and have it correspond to the current episode.
"""
self._agent_signals_done = True
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.done,
thor_action="Done",
)
def move_held_object(
self, x_meters: float, y_meters: float, z_meters: float
) -> bool:
"""Move the object in the agent's hand by the specified amount.
The maximum magnitude that the object
can move in one time step is 0.5 meters. If the calculated magnitude is
above 0.5, it's magnitude will be clipped to 0.5.
The action is successful in the case that the agent is holding an
object and moving the object by the specified amount does not bump
into an object.
# Parameters
x_meters : (float, min=-0.5, max=0.5) movement meters along the x-axis.
y_meters : (float, min=-0.5, max=0.5) movement meters along the y-axis.
z_meters : (float, min=-0.5, max=0.5) movement meters along the z-axis.
# Exceptions
In walkthrough phase. This method can only be called within the
unshuffle phase. The shuffle phase starts with controller.shuffle()
and ends with controller.reset().
"""
mag = math.sqrt(x_meters ** 2 + y_meters ** 2 + z_meters ** 2)
# clips the max value at MAX_HAND_METERS.
if MAX_HAND_METERS > mag:
scale = MAX_HAND_METERS / mag
x_meters *= scale
y_meters *= scale
z_meters *= scale
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_held_object,
thor_action="MoveHandDelta",
updated_kwarg_names={"x_meters": "x", "y_meters": "y", "z_meters": "z"},
x_meters=x_meters,
y_meters=y_meters,
z_meters=z_meters,
default_thor_kwargs=self.physics_step_kwargs,
)
def rotate_held_object(self, x: float, y: float, z: float) -> bool:
"""Rotate the object in the agent's hand by the specified degrees.
The rotation parameters are scaled linearly to put rotations
between [-90:90] degrees. The action is only successful agent is holding an object.
# Parameters
x : (float, min=-0.5, max=0.5) rotation along the x-axis.
y : (float, min=-0.5, max=0.5) rotation along the y-axis.
z : (float, min=-0.5, max=0.5) rotation along the z-axis.
"""
def rescale_xyz(kwargs: Dict[str, Any]):
for k in ["x", "y", "z"]:
kwargs[k] = 180 * kwargs[k]
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.rotate_held_object,
thor_action="RotateHand",
preprocess_kwargs_inplace=rescale_xyz,
x=x,
y=y,
z=z,
default_thor_kwargs=self.physics_step_kwargs,
)
def drop_held_object(self) -> bool:
"""Drop the object in the agent's hand.
The action is only successful agent is holding an object.
"""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.drop_held_object,
thor_action="DropHandObject",
default_thor_kwargs={
"autoSimulation": False,
"randomMagnitude": 0.0,
**self.physics_step_kwargs,
},
)
def drop_held_object_with_snap(self) -> bool:
"""Drop the object in the agent's hand to the target position.
Exception is raised if shuffle has not yet been called on the current
episode or the agent is in default mode.
For this action to work:
1. The agent must be within 1.5 meters from the goal object's
position, observed during the walkthrough phase.
2. The agent must be looking in the direction of where it was
located in the walkthrough phase.
Otherwise, the object will be placed in a visible receptacle or
if this also fails, it will be simply dropped.
# Returns
`True` if the drop was successful, otherwise `False`.
"""
if not self.shuffle_called:
raise Exception("Must be in unshuffle stage.")
if not self.mode == RearrangeMode.SNAP:
raise Exception("Must be in RearrangeMode.SNAP mode.")
# round positions to 2 decimals
DEC = 2
with include_object_data(self.controller):
event = self.controller.last_event
held_obj = self.held_object
if held_obj is None:
return False
# When dropping up an object, make it breakable.
self.controller.step(
"MakeObjectBreakable", objectId=self.held_object["objectId"]
)
agent = event.metadata["agent"]
goal_pose = self.obj_name_to_walkthrough_start_pose[held_obj["name"]]
goal_pos = goal_pose["position"]
goal_rot = goal_pose["rotation"]
good_positions_to_drop_from = self._interactable_positions_cache.get(
scene_name=self.last_event.metadata["sceneName"],
obj={**held_obj, **{"position": goal_pos, "rotation": goal_rot},},
controller=self.controller,
force_cache_refresh=self.force_cache_reset, # Forcing cache resets when not training.
)
def position_to_tuple(position: Dict[str, float]):
return tuple(round(position[k], DEC) for k in ["x", "y", "z"])
agent_xyz = position_to_tuple(agent["position"])
agent_rot = (round(agent["rotation"]["y"] / 90) * 90) % 360
agent_standing = int(agent["isStanding"])
agent_horizon = round(agent["cameraHorizon"])
for valid_agent_pos in good_positions_to_drop_from:
# Checks if the agent is close enough to the target
# for the object to be snapped to the target location.
valid_xyz = position_to_tuple(valid_agent_pos)
valid_rot = (round(valid_agent_pos["rotation"] / 90) * 90) % 360
valid_standing = int(valid_agent_pos["standing"])
valid_horizon = round(valid_agent_pos["horizon"])
if (
valid_xyz == agent_xyz # Position
and valid_rot == agent_rot # Rotation
and valid_standing == agent_standing # Standing
and round(valid_horizon) == agent_horizon # Horizon
):
# Try a few locations near the target for robustness' sake
positions = [
{
"x": goal_pos["x"] + 0.001 * xoff,
"y": goal_pos["y"] + 0.001 * yoff,
"z": goal_pos["z"] + 0.001 * zoff,
}
for xoff in [0, -1, 1]
for zoff in [0, -1, 1]
for yoff in [0, 1, 2]
]
self.controller.step(
action="TeleportObject",
objectId=held_obj["objectId"],
rotation=goal_rot,
positions=positions,
forceKinematic=True,
allowTeleportOutOfHand=True,
makeUnbreakable=True,
)
break
if self.held_object is None:
# If we aren't holding the object anymore, then let's check if it
# was placed into the right location.
if self.are_poses_equal(
goal_pose=get_pose_info(goal_pose),
cur_pose=next(
get_pose_info(o)
for o in self.last_event.metadata["objects"]
if o["name"] == goal_pose["name"]
),
treat_broken_as_unequal=True,
):
return True
else:
return False
# We couldn't teleport the object to the target location, let's try placing it
# in a visible receptacle.
possible_receptacles = [
o for o in event.metadata["objects"] if o["visible"] and o["receptacle"]
]
possible_receptacles = sorted(
possible_receptacles, key=lambda o: (o["distance"], o["objectId"])
)
for possible_receptacle in possible_receptacles:
self.controller.step(
action="PlaceHeldObject",
objectId=possible_receptacle["objectId"],
**self.physics_step_kwargs,
)
if self.controller.last_event.metadata["lastActionSuccess"]:
break
# We failed to place the object into a receptacle, let's just drop it.
if not self.controller.last_event.metadata["lastActionSuccess"]:
self.controller.step(
"DropHandObjectAhead",
forceAction=True,
autoSimulation=False,
randomMagnitude=0.0,
**{**self.physics_step_kwargs, "actionSimulationSeconds": 1.5},
)
return False
@property
def last_event(self) -> ai2thor.server.Event:
"""Return the AI2-THOR Event from the most recent controller action."""
return self.controller.last_event
@property
def scene(self) -> str:
"""Return the current AI2-THOR scene name."""
return self.controller.last_event.metadata["sceneName"].replace("_physics", "")
@staticmethod
def compare_poses(
goal_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
cur_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Compare two object poses and return where they differ.
The `goal_pose` must not have the object as broken.
# Parameters
goal_pose : The goal pose of the object.
cur_pose : The current pose of the object.
# Returns
A dictionary with the following keys keys and values
* "broken" - `True` if the `cur_pose` is broken in which case all below values are `None`, otherwise `False`.
* "iou" - The IOU overlap between the two object poses (min==0, max==1) using their 3d bounding boxes. Computed
using an approximate sampling procedure. If the `position_dist` (see below) is <0.01 and the `rotation_dist`
is <10.0 then the IOU computation is short circuited and a value of 1 is returned.
* "openness_diff" - `None` if the object types are not openable. Otherwise this equals the absolute difference
between the `openness` values of the two poses.
* "position_dist" - The euclidean distance between the positions of the center points of the two poses.
* "rotation_dist" - The angle (in degrees) between the two poses. See the
`IThorEnvironment.angle_between_rotations` function for more information.
"""
if isinstance(goal_pose, Sequence):
assert isinstance(cur_pose, Sequence)
return [
RearrangeTHOREnvironment.compare_poses(goal_pose=gp, cur_pose=cp)
for gp, cp in zip(goal_pose, cur_pose)
]
assert goal_pose["type"] == cur_pose["type"]
assert not goal_pose["broken"]
if cur_pose["broken"]:
return {
"broken": True,
"iou": None,
"openness_diff": None,
"position_dist": None,
"rotation_dist": None,
}
if goal_pose["bounding_box"] is None and cur_pose["bounding_box"] is None:
iou = None
position_dist = None
rotation_dist = None
else:
position_dist = IThorEnvironment.position_dist(
goal_pose["position"], cur_pose["position"]
)
rotation_dist = IThorEnvironment.angle_between_rotations(
goal_pose["rotation"], cur_pose["rotation"]
)
if position_dist < 1e-2 and rotation_dist < 10.0:
iou = 1.0
else:
try:
iou = iou_box_3d(
goal_pose["bounding_box"], cur_pose["bounding_box"]
)
except Exception as _:
get_logger().warning(
"Could not compute IOU, will assume it was 0. Error during IOU computation:"
f"\n{traceback.format_exc()}"
)
iou = 0
if goal_pose["openness"] is None and cur_pose["openness"] is None:
openness_diff = None
else:
openness_diff = abs(goal_pose["openness"] - cur_pose["openness"])
return {
"broken": False,
"iou": iou,
"openness_diff": openness_diff,
"position_dist": position_dist,
"rotation_dist": rotation_dist,
}
@classmethod
def pose_difference_energy(
cls,
goal_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
cur_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
min_iou: float = 0.5,
open_tol: float = 0.2,
pos_barrier: float = 2.0,
) -> Union[float, np.ndarray]:
"""Computes the energy between two poses.
The energy (taking values in [0:1]) between two poses provides a soft and holistic measure of how
far apart two poses are. If the energy is near 1 then the two poses are very dissimilar, if the energy
is near 1 then the two poses are nearly equal.
# Parameters
goal_pose : The goal pose of the object.
cur_pose : The current pose of the object.
min_iou : As the IOU between the two poses increases between [0:min_iou] the contribution to the energy
corresponding solely to the to the IOU decrease from 0.5 to 0 in a linear fashion.
open_tol: If the object is openable, then if the absolute openness difference is less than `open_tol`
the energy is 0. Otherwise the pose energy is 1.
pos_barrier: If two poses are separated by a large distance, we would like to decrease the energy as
the two poses are brought closer together. The `pos_barrier` controls when this energy decrease begins,
namely at its default value of 2.0, the contribution of the distance to
the energy decreases linearly from 0.5 to 0 as the distance between the two poses decreases from
2 meters to 0 meters.
"""
if isinstance(goal_pose, Sequence):
assert isinstance(cur_pose, Sequence)
return np.array(
[
cls.pose_difference_energy(
goal_pose=p0,
cur_pose=p1,
min_iou=min_iou,
open_tol=open_tol,
pos_barrier=pos_barrier,
)
for p0, p1 in zip(goal_pose, cur_pose)
]
)
assert not goal_pose["broken"]
pose_diff = cls.compare_poses(goal_pose=goal_pose, cur_pose=cur_pose)
if pose_diff["broken"]:
return 1.0
if pose_diff["openness_diff"] is None or goal_pose["pickupable"]:
gbb = np.array(goal_pose["bounding_box"])
cbb = np.array(cur_pose["bounding_box"])
iou = pose_diff["iou"]
iou_energy = max(1 - iou / min_iou, 0)
if iou > 0:
position_dist_energy = 0.0
else:
min_pairwise_dist_between_corners = np.sqrt(
(
(
np.tile(gbb, (1, 8)).reshape(-1, 3)
- np.tile(cbb, (8, 1)).reshape(-1, 3)
)
** 2
).sum(1)
).min()
position_dist_energy = min(
min_pairwise_dist_between_corners / pos_barrier, 1.0
)
return 0.5 * iou_energy + 0.5 * position_dist_energy
else:
return 1.0 * (pose_diff["openness_diff"] > open_tol)
@classmethod
def are_poses_equal(
cls,
goal_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
cur_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
min_iou: float = 0.5,
open_tol: float = 0.2,
treat_broken_as_unequal: bool = False,
) -> Union[bool, np.ndarray]:
"""Determine if two object poses are equal (up to allowed error).
The `goal_pose` must not have the object as broken.
# Parameters
goal_pose : The goal pose of the object.
cur_pose : The current pose of the object.
min_iou : If the two objects are pickupable objects, they are considered equal if their IOU is `>=min_iou`.
open_tol: If the object is openable and not pickupable, then the poses are considered equal if the absolute
openness difference is less than `open_tol`.
treat_broken_as_unequal : If `False` an exception will be thrown if the `cur_pose` is broken. If `True`, then
if `cur_pose` is broken this function will always return `False`.
"""
if isinstance(goal_pose, Sequence):
assert isinstance(cur_pose, Sequence)
return np.array(
[
cls.are_poses_equal(
goal_pose=p0,
cur_pose=p1,
min_iou=min_iou,
open_tol=open_tol,
treat_broken_as_unequal=treat_broken_as_unequal,
)
for p0, p1 in zip(goal_pose, cur_pose)
]
)
assert not goal_pose["broken"]
if cur_pose["broken"]:
if treat_broken_as_unequal:
return False
else:
raise RuntimeError(
f"Cannot determine if poses of two objects are"
f" equal if one is broken object ({goal_pose} v.s. {cur_pose})."
)
pose_diff = cls.compare_poses(goal_pose=goal_pose, cur_pose=cur_pose)
return (pose_diff["iou"] is None or pose_diff["iou"] > min_iou) and (
pose_diff["openness_diff"] is None or pose_diff["openness_diff"] <= open_tol
)
@property
def all_rearranged_or_broken(self):
"""Return if every object is simultaneously broken or in its correct
pose.
The unshuffle agent can make no more progress on its task in the
case that that every object is either (1) in its correct
position or (2) broken so that it can never be placed in its
correct position. This function simply returns whether this is
the case.
"""
return all(
cp["broken"] or self.are_poses_equal(goal_pose=gp, cur_pose=cp)
for _, gp, cp in zip(*self.poses)
)
@property
def poses(
self,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]:
"""Return (unshuffle start, walkthrough start, current) pose for every
object in the scene.
Can only be called during the unshuffle phase.
# Returns
A Tuple of containing three ordered lists of object poses `(unshuffle_start_poses, walkthrough_start_poses, current_poses)`
such that, for `0 <= i < len(current_poses)`,
* `unshuffle_start_poses[i]` - corresponds to the pose of the ith object at the start of the unshuffle phase.
* `walkthrough_start_poses[i]` - corresponds to the pose of the ith object at the start of the walkthrough phase.
* `current_poses[i]` - corresponds to the pose of the ith object in the current environment.
During the unshuffle phase is commonly useful to compare `current_poses[i]` against `walkthrough_start_poses[i]`
to get a sense of the agent's progress towards placing the objects into their correct locations.
"""
# Ensure we are in the unshuffle phase.
if not self.shuffle_called:
raise Exception("shuffle() must be called before accessing poses")
# Get current object information
with include_object_data(self.controller):
obj_name_to_current_obj = self._obj_list_to_obj_name_to_pose_dict(
self.controller.last_event.metadata["objects"]
)
ordered_obj_names = list(self.obj_name_to_walkthrough_start_pose.keys())
current_objs_list = []
for obj_name in ordered_obj_names:
if obj_name not in obj_name_to_current_obj:
# obj_name_to_predicted_obj can have more objects than goal objects
# (breaking objects can generate new ones)
# The other way (more goal poses than predicted objs) is a problem, we will
# assume that the disappeared objects are broken
if not self._have_warned_about_mismatch:
# Don't want to warn many many times during single episode
self._have_warned_about_mismatch = True
usos = set(self.obj_name_to_unshuffle_start_pose.keys())
wsos = set(self.obj_name_to_walkthrough_start_pose.keys())
cos = set(obj_name_to_current_obj.keys())
get_logger().warning(
f"Mismatch between walkthrough start, unshuffle start, and current pose objects."
f"\nscene = {self.scene}, index {self.current_task_spec.metrics.get('index')}"
f"\nusos-wsos, wsos-usos = {usos - wsos}, {wsos - usos}"
f"\ncos-usos, usos-cos = {cos - usos}, {usos - cos}"
f"\ncos-wsos, wsos-cos = {cos - wsos}, {wsos - cos}"
)
obj_name_to_current_obj[obj_name] = {
**self.obj_name_to_walkthrough_start_pose[obj_name],
"isBroken": True,
"broken": True,
"position": None,
"rotation": None,
"openness": None,
}
current_objs_list.append(obj_name_to_current_obj[obj_name])
# We build a cache of object poses corresponding to the start of the walkthrough/unshuffle phases
# as these remain the same until the `reset` function is called.
if self._sorted_and_extracted_walkthrough_start_poses is None:
broken_obj_names = [
obj_name
for obj_name in ordered_obj_names
if self.obj_name_to_walkthrough_start_pose[obj_name]["isBroken"]
]
if len(broken_obj_names) != 0:
if not self.current_task_spec.runtime_sample:
# Don't worry about reporting broken objects when using
# a "runtime_sample" task spec as these types of things are
# more common.
get_logger().warning(
f"BROKEN GOAL OBJECTS!"
f"\nIn scene {self.scene}"
f"\ntask spec {self.current_task_spec}"
f"\nbroken objects {broken_obj_names}"
)
# If we find a broken goal object, we will simply pretend as though it was not
# broken. This means the agent can never succeed in unshuffling, this means it is
# possible that even a perfect agent will not succeed for some tasks.
for broken_obj_name in broken_obj_names:
self.obj_name_to_walkthrough_start_pose[broken_obj_name][
"isBroken"
] = False
self.obj_name_to_unshuffle_start_pose[broken_obj_name][
"isBroken"
] = False
ordered_obj_names = list(self.obj_name_to_walkthrough_start_pose.keys())
walkthrough_start_poses = tuple(
self.obj_name_to_walkthrough_start_pose[k] for k in ordered_obj_names
)
unshuffle_start_poses = tuple(
self.obj_name_to_unshuffle_start_pose[k] for k in ordered_obj_names
)
self._sorted_and_extracted_unshuffle_start_poses = get_pose_info(
unshuffle_start_poses
)
self._sorted_and_extracted_walkthrough_start_poses = get_pose_info(
walkthrough_start_poses
)
return (
self._sorted_and_extracted_unshuffle_start_poses,
self._sorted_and_extracted_walkthrough_start_poses,
get_pose_info(current_objs_list),
)
def _runtime_reset(
self, task_spec: RearrangeTaskSpec, force_axis_aligned_start: bool
):
"""Randomly initialize a scene at runtime.
Rather than using a predefined collection of object states,
randomly generate these positions at runtime. This may be useful for obtaining more
diverse training examples.
# Parameters
task_spec : The RearrangeTaskSpec for this runtime sample. `task_spec.runtime_sample` should be `True`.
force_axis_aligned_start : If `True`, this will force the agent's start rotation to be 'axis aligned', i.e.
to equal to 0, 90, 180, or 270 degrees.
"""
assert (
task_spec.runtime_sample
), "Attempted to use a runtime reset with a task spec which has a `False` `runtime_sample` property."
# For efficiency reasons, we do not completely reset the ai2thor scene (which
# will reset all object states to a default configuration and restore broken
# objects to their unbroken state) on every call to `_runtime_reset` if the scene name hasn't changed. Instead
# we reset the ai2thor scene only every 25 calls.
if (
task_spec.scene != self.scene
or self.current_task_spec.runtime_data["count"] >= 25
):
count = 1
self.controller.reset(task_spec.scene)
if self._enhanced_physics_determinism:
self.controller.step("PausePhysicsAutoSim")
# self.controller.step("MakeAllObjectsStationary")
remove_objects_until_all_have_identical_meshes(self.controller)
self.controller.step(
"InitialRandomSpawn", forceVisible=True, placeStationary=True,
)
md = self.controller.step("GetReachablePositions").metadata
assert md["lastActionSuccess"]
reachable_positions = md["actionReturn"]
else:
count = 1 + self.current_task_spec.runtime_data["count"]
reachable_positions = self.current_task_spec.runtime_data[
"reachable_positions"
]
self.current_task_spec = task_spec
self.current_task_spec.stage = "Unknown"
self.current_task_spec.runtime_data = {
"count": count,
"reachable_positions": reachable_positions,
}
with include_object_data(self.controller):
random.shuffle(reachable_positions)
# set agent position
max_teleports = min(10, len(reachable_positions))
for teleport_count, pos in enumerate(reachable_positions):
rot = 30 * random.randint(0, 11)
if force_axis_aligned_start:
rot = round_to_factor(30 * random.randint(0, 11), 90)
md = self.controller.step(
"TeleportFull",
**pos,
rotation={"x": 0, "y": rot, "z": 0},
horizon=0.0,
standing=True,
forceAction=teleport_count == max_teleports - 1,
).metadata
if md["lastActionSuccess"]:
break
else:
raise RuntimeError("No reachable positions?")
assert md["lastActionSuccess"]
self.current_task_spec.agent_position = pos
self.current_task_spec.agent_rotation = rot
self.current_task_spec.runtime_data["starting_objects"] = md["objects"]
def _task_spec_reset(
self, task_spec: RearrangeTaskSpec, force_axis_aligned_start: bool
):
"""Initialize a ai2thor environment from a (non-runtime sample) task
specification (i.e. an exhaustive collection of object poses for the
walkthrough and unshuffle phase).
After this call, the environment will be ready for use in the walkthrough phase.
# Parameters
task_spec : The RearrangeTaskSpec for this task. `task_spec.runtime_sample` should be `False`.
force_axis_aligned_start : If `True`, this will force the agent's start rotation to be 'axis aligned', i.e.
to equal to 0, 90, 180, or 270 degrees.
"""
assert (
not task_spec.runtime_sample
), "`_task_spec_reset` requires that `task_spec.runtime_sample` is `False`."
self.current_task_spec = task_spec
self.controller.reset(self.current_task_spec.scene)
if self._enhanced_physics_determinism:
self.controller.step("PausePhysicsAutoSim")
if force_axis_aligned_start:
self.current_task_spec.agent_rotation = round_to_factor(
self.current_task_spec.agent_rotation, 90
)
# set agent position
pos = self.current_task_spec.agent_position
rot = {"x": 0, "y": self.current_task_spec.agent_rotation, "z": 0}
self.controller.step(
"TeleportFull",
**pos,
rotation=rot,
horizon=0.0,
standing=True,
forceAction=True,
)
# show object metadata
with include_object_data(self.controller):
# open objects
for obj in self.current_task_spec.openable_data:
# id is re-found due to possible floating point errors
current_obj_info = next(
l_obj
for l_obj in self.last_event.metadata["objects"]
if l_obj["name"] == obj["name"]
)
self.controller.step(
action="OpenObject",
objectId=current_obj_info["objectId"],
openness=obj["target_openness"],
forceAction=True,
**self.physics_step_kwargs,
)
# arrange walkthrough poses for pickupable objects
self.controller.step(
"SetObjectPoses",
objectPoses=self.current_task_spec.target_poses,
forceKinematic=False,
enablePhysicsJitter=True,
forceRigidbodySleep=True,
)
assert self.controller.last_event.metadata["lastActionSuccess"]
def reset(
self, task_spec: RearrangeTaskSpec, force_axis_aligned_start: bool = False,
) -> None:
"""Reset the environment with respect to the new task specification.
The environment will start in the walkthrough phase.
# Parameters
task_spec : The `RearrangeTaskSpec` defining environment state.
force_axis_aligned_start : If `True`, this will force the agent's start rotation to be 'axis aligned', i.e.
to equal to 0, 90, 180, or 270 degrees.
"""
if task_spec.runtime_sample:
self._runtime_reset(
task_spec=task_spec, force_axis_aligned_start=force_axis_aligned_start
)
else:
self._task_spec_reset(
task_spec=task_spec, force_axis_aligned_start=force_axis_aligned_start,
)
self.shuffle_called = False
self.obj_name_to_walkthrough_start_pose = self._obj_list_to_obj_name_to_pose_dict(
self.last_event.metadata["objects"]
)
self._have_warned_about_mismatch = False
self._sorted_and_extracted_walkthrough_start_poses = None
self._sorted_and_extracted_unshuffle_start_poses = None
self._agent_signals_done = False
def _runtime_shuffle(self):
"""Randomly shuffle objects in the environment to start the unshuffle
phase.
Also resets the agent's position to its start position.
"""
assert (not self.shuffle_called) and self.current_task_spec.runtime_sample
task_spec = self.current_task_spec
# set agent position
pos = task_spec.agent_position
rot = {"x": 0, "y": task_spec.agent_rotation, "z": 0}
self.controller.step(
"TeleportFull",
**pos,
rotation=rot,
horizon=0.0,
standing=True,
forceAction=True,
)
# Randomly shuffle a subset of objects.
nobjects_to_move = random.randint(1, 5)
pickupable = [
o for o in task_spec.runtime_data["starting_objects"] if o["pickupable"]
]
random.shuffle(pickupable)
pickupable.sort(
key=lambda x: 1 * (x["objectType"] in OBJECT_TYPES_TO_NOT_MOVE),
reverse=True,
)
objects_to_not_move = pickupable[:-nobjects_to_move]
object_ids_not_to_move = [o["objectId"] for o in objects_to_not_move]
object_ids_not_to_move.extend(
get_object_ids_to_not_move_from_object_types(
controller=self.controller, object_types=OBJECT_TYPES_TO_NOT_MOVE,
)
)
self.controller.step(
"InitialRandomSpawn",
excludedObjectIds=object_ids_not_to_move,
forceVisible=True,
placeStationary=True,
)
# Randomly open some subset of objects.
num_objects_to_open = random.randint(0, 1)
openable_objects = [
o
for o in self.last_event.metadata["objects"]
if o["openable"] and not o["pickupable"]
]
random.shuffle(openable_objects)
open_objs(
objects_to_open=openable_objects[:num_objects_to_open],
controller=self.controller,
)
self.current_task_spec.runtime_data[
"target_objects"
] = self.last_event.metadata["objects"]
def _task_spec_shuffle(self, reset: bool = False):
"""Shuffle objects in the environment to start the unshuffle phase
using the current task specification.
Also resets the agent's position to its start position.
"""
assert not (self.current_task_spec.runtime_sample or self.shuffle_called)
task_spec = self.current_task_spec
if reset:
self.controller.reset(self.scene)
if self._enhanced_physics_determinism:
self.controller.step("PausePhysicsAutoSim")
# set agent position
pos = task_spec.agent_position
rot = {"x": 0, "y": task_spec.agent_rotation, "z": 0}
self.controller.step(
"TeleportFull",
**pos,
rotation=rot,
horizon=0.0,
standing=True,
forceAction=True,
)
# open objects
with include_object_data(self.controller):
for obj in task_spec.openable_data:
# id is re-found due to possible floating point errors
current_obj_info = next(
l_obj
for l_obj in self.last_event.metadata["objects"]
if l_obj["name"] == obj["name"]
)
self.controller.step(
action="OpenObject",
objectId=current_obj_info["objectId"],
openness=obj["start_openness"],
forceAction=True,
**(
self.physics_step_kwargs
if obj is task_spec.openable_data[-1]
else {}
),
)
# arrange unshuffle start poses for pickupable objects
self.controller.step(
"SetObjectPoses",
objectPoses=task_spec.starting_poses,
forceKinematic=False,
enablePhysicsJitter=True,
forceRigidbodySleep=True,
)
assert self.controller.last_event.metadata["lastActionSuccess"]
def shuffle(self, require_reset: bool = False):
"""Shuffle objects in the environment to start the unshuffle phase."""
assert not self.shuffle_called
runtime_sample = self.current_task_spec.runtime_sample
if runtime_sample:
self._runtime_shuffle()
else:
self._task_spec_shuffle(reset=require_reset)
# Save object metadata
with include_object_data(self.controller):
self.obj_name_to_unshuffle_start_pose = self._obj_list_to_obj_name_to_pose_dict(
self.last_event.metadata["objects"]
)
if len(self.obj_name_to_unshuffle_start_pose) != len(
self.obj_name_to_walkthrough_start_pose
):
if runtime_sample or require_reset:
walkthrough_start_obj_names = set(
self.obj_name_to_walkthrough_start_pose.keys()
)
unshuffle_start_obj_names = set(
self.obj_name_to_unshuffle_start_pose.keys()
)
raise PoseMismatchError(
"Irrecoverable difference between walkthrough and unshuffle phase objects."
f"\ng-i, i-g = {walkthrough_start_obj_names - unshuffle_start_obj_names},"
f" {unshuffle_start_obj_names - walkthrough_start_obj_names}"
)
else:
self.shuffle(require_reset=True)
self.shuffle_called = True
self._agent_signals_done = False
_, gps, cps = self.poses
self.start_energies = self.pose_difference_energy(gps, cps)
@staticmethod
def _obj_list_to_obj_name_to_pose_dict(
objects: List[Dict[str, Any]]
) -> OrderedDict:
"""Helper function to transform a list of object data dicts into a
dictionary."""
objects = [
o
for o in objects
if o["openable"] or o.get("objectOrientedBoundingBox") is not None
]
d = OrderedDict(
(o["name"], o) for o in sorted(objects, key=lambda x: x["name"])
)
assert len(d) == len(objects)
return d
def stop(self):
"""Terminate the current AI2-THOR session."""
try:
self.controller.stop()
except Exception as _:
pass
def __del__(self):
self.stop()
| CSR-main | src/simulation/environment.py |
import itertools
import json
import time
import numpy as np
import src.dataloaders.augmentations as A
import torch
import torch.nn.functional as F
from PIL import Image
from scipy.optimize import linear_sum_assignment
from src.lightning.modules import moco2_module_old
from src.lightning.modules import moco2_module
from src.shared.constants import IMAGE_SIZE
from src.shared.utils import (check_none_or_empty, get_device,
load_lightning_inference, render_adj_matrix)
from src.simulation.module_box import GtBoxModule
from src.simulation.state import State
from src.simulation.utils import get_openable_objects, get_pickupable_objects
from torchvision.transforms.transforms import ToTensor
class RelationTrackingModule(object):
def __init__(
self,
relation_tracking_model_path,
object_tracking_model_path,
averaging_strategy,
device_relation_tracking,
use_gt_matches,
use_gt_are_close,
cos_sim_match_threshold,
room_id,
instance_id,
dump_dir,
use_roi_feature_within_traj,
use_roi_feature_between_traj,
debug) -> None:
super().__init__()
self.relation_tracking_model = None
self.object_tracking_model = None
self.device = get_device(device_relation_tracking)
self.debug = debug
self.room_id = room_id
self.instance_id = instance_id
self.dump_dir = dump_dir
self.use_roi_feature_within_traj = use_roi_feature_within_traj
self.use_roi_feature_between_traj = use_roi_feature_between_traj
if not check_none_or_empty(relation_tracking_model_path):
self.relation_tracking_model = load_lightning_inference(
relation_tracking_model_path, moco2_module_old.MocoV2).encoder_q.to(self.device)
else:
raise ValueError(
'relation_tracking_model_path should never be None or empty')
if not check_none_or_empty(object_tracking_model_path):
self.object_tracking_model = load_lightning_inference(
object_tracking_model_path, moco2_module.MocoV2).encoder_q.to(self.device)
else:
raise ValueError(
'object_tracking_model_path should never be None or empty')
self.averaging_strategy = averaging_strategy
self.use_gt_matches = use_gt_matches
self.use_gt_are_close = use_gt_are_close
if self.use_gt_matches:
self.cos_sim_match_threshold = None
else:
self.cos_sim_match_threshold = cos_sim_match_threshold
self.reset()
def reset(self):
self.update_count = 0
self.instance_map = {}
self.gt_adjacency_matrix = np.zeros((0, 0))
self.gt_assignments = []
self.object_bank = None
self.object_match_counts = None
self.feature_bank = None
self.feature_match_counts = None
self.relationship_bank = None
self.relationship_match_counts = {}
self.assignments = []
self.cluster_meta = {}
self.state_graph = None
self.correct_assignments = 0
self.total_assignments = 0
self.box_timer = []
self.csr_timer = []
self.obj_timer = []
self.matching_timer = []
def update_scene_representation(
self,
event,
box_module,
):
im = Image.fromarray(event.frame)
tic = time.perf_counter()
step_instances, boxes, interaction_points, areas, roi_features = box_module.get_boxes(event)
toc = time.perf_counter()
self.box_timer.append(toc-tic)
# cluster book keeping
new_count = 0
for name in step_instances:
if name not in self.instance_map:
self.instance_map[name] = len(self.instance_map)
self.gt_assignments.append([])
new_count += 1
self.gt_assignments[self.instance_map[name]].append(name)
pickupable_objects = set(get_pickupable_objects(event))
openable_objects = set(get_openable_objects(event))
agent_position = event.metadata['agent']['position']
agent_rotation = event.metadata['agent']['rotation']
agent_horizon = event.metadata['agent']['cameraHorizon']
if new_count > 0:
# update gt adjacency matrix
dim_old = self.gt_adjacency_matrix.shape[0]
dim_new = dim_old + new_count
new_gt_adjacency_matrx = np.zeros((dim_new, dim_new))
new_gt_adjacency_matrx[:dim_old,
:dim_old] = self.gt_adjacency_matrix
self.gt_adjacency_matrix = new_gt_adjacency_matrx
# fill in the gt adjacency matrix
step_pairs = list(itertools.product(step_instances, repeat=2))
for p in step_pairs:
i = self.instance_map[p[0]]
j = self.instance_map[p[1]]
self.gt_adjacency_matrix[i, j] = 1
self.gt_adjacency_matrix[j, i] = 1
if len(step_instances) == 0:
# case where there are no detections, just want to return
# Have to save in the state graph
return State([], {}, {}, [], [], [], {}, im, agent_position, agent_rotation, agent_horizon)
# run inference on the self-features
query_features = []
step_instace_to_index = {}
for step_index, step_instance in enumerate(step_instances):
step_instace_to_index[step_instance] = step_index
edge_features = {}
edge_pairings = list(itertools.permutations(boxes.keys(), 2))
num_self = len(step_instances)
self_pairings = [(i, i) for i in boxes]
keys = self_pairings + edge_pairings
x = self.create_batch(keys, boxes, im)
A.TestTransform(x)
x_instance = torch.cat((x['image'], x['mask_1'], x['mask_2']),
1).to(self.device)
x_object = torch.cat((x['image'][:num_self], x['mask_1'][:num_self], x['mask_2'][:num_self]),
1).to(self.device)
if self.use_roi_feature_within_traj:
query_features = roi_features.cpu()
else:
feat_instance = None
i = 0
tic = time.perf_counter()
while i < x_instance.shape[0]:
if feat_instance is None:
feat_instance = self.relation_tracking_model(x_instance[i:i+100])
else:
feat_instance = torch.cat((feat_instance, self.relation_tracking_model(x_instance[i:i+100])), 0)
i += 100
toc = time.perf_counter()
self.csr_timer.append(toc-tic)
feat_instance = F.normalize(feat_instance, dim=1).cpu()
query_features = feat_instance[:num_self]
for i, pairing in enumerate(edge_pairings):
edge_features[pairing] = feat_instance[i + num_self]
object_features = None
if self.use_roi_feature_between_traj:
object_features = roi_features.cpu()
else:
tic = time.perf_counter()
feat_object = self.object_tracking_model(x_object)
toc = time.perf_counter()
self.obj_timer.append(toc-tic)
object_features = F.normalize(feat_object, dim=1).cpu()
assert object_features.shape[0] == query_features.shape[0]
state = None
tic = time.perf_counter()
if self.feature_bank is None:
state = self.initialize_scene_representation(
query_features,
edge_features,
object_features,
step_instances,
im,
agent_position,
agent_rotation,
agent_horizon,
boxes,
pickupable_objects,
openable_objects,
interaction_points,
areas)
else:
if self.use_gt_matches:
state = self.match_scene_representation_gt(
query_features,
edge_features,
object_features,
step_instances,
im,
agent_position,
agent_rotation,
agent_horizon,
boxes,
pickupable_objects,
openable_objects,
interaction_points,
areas)
else:
state = self.match_scene_representation_pred(
query_features,
edge_features,
object_features,
step_instances,
im,
agent_position,
agent_rotation,
agent_horizon,
boxes,
pickupable_objects,
openable_objects,
interaction_points,
areas)
toc = time.perf_counter()
self.matching_timer.append(toc-tic)
assert self.relationship_bank.shape[0] == self.relationship_bank.shape[1]
assert self.relationship_bank.shape[2] == self.feature_bank.shape[0]
assert self.relationship_bank.shape[0] == self.feature_bank.shape[1]
# update the relationship with the main diagonal self features
for i in range(self.feature_bank.shape[1]):
self.relationship_bank[i, i] = self.feature_bank[:, i]
return state
def create_batch(self, keys, boxes, im):
mask_1 = torch.zeros((len(keys), 1, IMAGE_SIZE, IMAGE_SIZE))
mask_2 = torch.zeros((len(keys), 1, IMAGE_SIZE, IMAGE_SIZE))
image = torch.zeros((len(keys), 3, IMAGE_SIZE, IMAGE_SIZE))
t = ToTensor()
tensor_image = t(im)
for i, k in enumerate(keys):
mask_1[i] = boxes[k[0]]
mask_2[i] = boxes[k[1]]
image[i] = torch.clone(tensor_image)
return {'mask_1': mask_1, 'mask_2': mask_2, 'image': image}
def initialize_scene_representation(
self,
query_features,
edge_features,
object_features,
step_instances,
im,
agent_position,
agent_rotation,
agent_horizon,
boxes,
pickupable_objects,
openable_objects,
interaction_points,
areas):
if self.debug:
self.dump_features_and_labels(query_features, edge_features, step_instances)
self.update_count += 1
# if there is no feature bank, then the features we create the bank
self.feature_bank = torch.transpose(query_features, 0, 1)
self.object_bank = torch.transpose(object_features, 0, 1)
# also initialize a separate data structure for the edges
self.relationship_bank = torch.zeros(
query_features.shape[0], query_features.shape[0], query_features.shape[1])
for pair in edge_features:
self.relationship_bank[pair[0], pair[1]] = edge_features[pair]
# keep track of the number of matches per feature in the bank for weighted averages
self.feature_match_counts = torch.ones(
self.feature_bank.shape[1])
self.object_match_counts = torch.ones(
self.object_bank.shape[1])
# initialize the pred assignments
self.assignments = [[s] for s in step_instances]
# create data structure to keep track of cluster to instance name matching (for metrics)
self.cluster_meta = {i: {s: 1, 'representative': s}
for i, s in enumerate(step_instances)}
cluster_idx_to_name = {i: s for i, s in enumerate(step_instances)}
# for first step all assignments are correct assignments (assuming GT boxes)
self.total_assignments += len(step_instances)
self.correct_assignments += len(step_instances)
cluster_idxs = [i for i in self.cluster_meta]
pickupable_bools = []
openable_bools = []
pickupable_points = {}
openable_points = {}
for i in self.cluster_meta:
if cluster_idx_to_name[i] in pickupable_objects:
pickupable_bools.append(i)
pickupable_points[i] = interaction_points[i]
if cluster_idx_to_name[i] in openable_objects:
openable_bools.append(i)
openable_points[i] = interaction_points[i]
# add state to graph
state = State(cluster_idxs, pickupable_points, openable_points, pickupable_bools,
openable_bools, boxes, areas, im, agent_position, agent_rotation, agent_horizon)
return state
def match_scene_representation_gt(
self,
query_features,
edge_features,
object_features,
step_instances,
im,
agent_position,
agent_rotation,
agent_horizon,
boxes,
pickupable_objects,
openable_objects,
interaction_points,
areas) -> State:
if self.debug:
self.dump_features_and_labels(query_features, edge_features, step_instances)
self.update_count += 1
# add the number of queries (denom for a metric)
self.total_assignments += query_features.shape[0]
name_to_cluster_idx = {
self.cluster_meta[i]['representative']: i for i in self.cluster_meta}
det_idx_to_cluster_idx = {
i: None for i in range(query_features.shape[0])}
num_new_clusters = 0
for det_idx, name in enumerate(step_instances):
if name in name_to_cluster_idx:
cluster_idx = name_to_cluster_idx[name]
det_idx_to_cluster_idx[det_idx] = cluster_idx
self.cluster_meta[cluster_idx][name] += 1
if self.averaging_strategy == 'weighted':
# weighted average to integrate the query feature into the history
self.weighted_average_self_feature(
cluster_idx, query_features, object_features, det_idx)
else:
# unweighted average, which has the affect of weighting newer observations more
self.unweighted_average_self_feature(
cluster_idx, query_features, object_features, det_idx)
# renormalize
self.feature_bank[:, cluster_idx] = F.normalize(
self.feature_bank[:, cluster_idx], dim=0)
self.object_bank[:, cluster_idx] = F.normalize(
self.object_bank[:, cluster_idx], dim=0)
# add the gt label of the assignment to this cluster for metrics
self.assignments[cluster_idx].append(name)
else:
# add a cluster for each new unmatched query
num_new_clusters += 1
self.assignments.append([name])
det_idx_to_cluster_idx[det_idx] = len(
self.cluster_meta)
self.cluster_meta[len(self.cluster_meta)] = {
name: 1, 'representative': name}
# append features to the feature bank
new_features = query_features[det_idx].unsqueeze(-1)
self.feature_bank = torch.cat(
(self.feature_bank, new_features), 1)
self.feature_match_counts = torch.cat(
(self.feature_match_counts, torch.ones(1)), 0)
new_features = object_features[det_idx].unsqueeze(-1)
self.object_bank = torch.cat(
(self.object_bank, new_features), 1)
self.object_match_counts = torch.cat(
(self.object_match_counts, torch.ones(1)), 0)
cluster_idx_to_name = {
i: self.cluster_meta[i]['representative'] for i in self.cluster_meta}
# expand the relationship bank as necessary
if num_new_clusters != 0:
n_old = self.relationship_bank.shape[0]
n_new = n_old + num_new_clusters
tmp = torch.zeros(n_new, n_new, query_features.shape[1])
tmp[:n_old, :n_old, :] = self.relationship_bank
self.relationship_bank = tmp
# create the state representation with references to the scene object representation
cluster_idxs = list(det_idx_to_cluster_idx.values())
pickupable_bools = []
openable_bools = []
pickupable_points = {}
openable_points = {}
for det_idx in det_idx_to_cluster_idx:
cluster_idx = det_idx_to_cluster_idx[det_idx]
if cluster_idx_to_name[cluster_idx] in pickupable_objects:
pickupable_bools.append(cluster_idx)
pickupable_points[cluster_idx] = interaction_points[det_idx]
if cluster_idx_to_name[cluster_idx] in openable_objects:
openable_bools.append(cluster_idx)
openable_points[cluster_idx] = interaction_points[det_idx]
state = State(cluster_idxs, pickupable_points, openable_points, pickupable_bools,
openable_bools, boxes, areas, im, agent_position, agent_rotation, agent_horizon)
# print(f'pickupable: {pickupable_bools}')
# # print(f'openable: {openable_bools}')
# print(cluster_idx_to_name)
# print('-' * 30)
for pair in edge_features:
# fill in the edge feature representations
ith, jth = det_idx_to_cluster_idx[pair[0]
], det_idx_to_cluster_idx[pair[1]]
# NOTE: could be the case that two detections might coor
# to the same cluster if pred boxes being used
if ith == jth:
continue
if (ith, jth) not in self.relationship_match_counts:
# norm should be 1, so if this is the case we have a new relation and need to just fill it with the edge feature
self.relationship_bank[ith, jth] = edge_features[pair]
self.relationship_match_counts[(ith, jth)] = 1
elif self.averaging_strategy == 'weighted':
raise NotImplementedError('gotta write this still')
else:
self.relationship_match_counts[(ith, jth)] += 1
self.relationship_bank[ith, jth] = (
self.relationship_bank[ith, jth] + edge_features[pair]) / 2
self.relationship_bank[ith, jth] = F.normalize(
self.relationship_bank[ith, jth], dim=0)
self.correct_assignments += len(step_instances)
return state
def match_scene_representation_pred(
self,
query_features,
edge_features,
object_features,
step_instances,
im,
agent_position,
agent_rotation,
agent_horizon,
boxes,
pickupable_objects,
openable_objects,
interaction_points,
areas):
if self.debug:
self.dump_features_and_labels(query_features, edge_features, step_instances)
self.update_count += 1
# start with all features being unmatched with the history
unmatched_queries = set([i for i in range(query_features.shape[0])])
# create a reward matrix between current observation and the feature bank
sim = torch.matmul(query_features, self.feature_bank)
# hungarian matching to get the maximal assignment
query_idx, history_idx = linear_sum_assignment(
sim.numpy(), maximize=True)
assert len(query_idx) == len(history_idx)
# add the number of queries (denom for a metric)
self.total_assignments += query_features.shape[0]
# get the identities of the clusters before updating (for metrics)
prev_representatives = set(
[self.cluster_meta[i]['representative'] for i in self.cluster_meta])
det_idx_to_cluster_idx = {
i: None for i in range(query_features.shape[0])}
for i in range(len(query_idx)):
cluster_number = history_idx[i]
if sim[query_idx[i], history_idx[i]] > self.cos_sim_match_threshold:
# considered a match if the sim is greater than the threshold
det_idx_to_cluster_idx[query_idx[i]] = cluster_number
# remove from the unmatched queries set
unmatched_queries.remove(query_idx[i])
if self.averaging_strategy == 'weighted':
# weighted average to integrate the query feature into the history
self.weighted_average_self_feature(
cluster_number, query_features, object_features, query_idx[i])
else:
# unweighted average, which has the affect of weighting newer observations more
self.unweighted_average_self_feature(
cluster_number, query_features, object_features, query_idx[i])
# re-normalize
self.feature_bank[:, cluster_number] = F.normalize(
self.feature_bank[:, cluster_number], dim=0)
self.object_bank[:, cluster_number] = F.normalize(
self.object_bank[:, cluster_number], dim=0)
# add the gt label of the assignment to this cluster for metrics
assigned_label = step_instances[query_idx[i]]
self.assignments[cluster_number].append(assigned_label)
# find the current representative of the cluster
representative_label = self.cluster_meta[cluster_number]['representative']
if assigned_label in self.cluster_meta[cluster_number]:
self.cluster_meta[cluster_number][assigned_label] += 1
if assigned_label == representative_label:
# we are assigning the feature the a cluster with the same gt label, this is good
self.correct_assignments += 1
else:
# here we are adding to a cluster that has never before seen this instance, not good
self.cluster_meta[cluster_number][assigned_label] = 1
if self.cluster_meta[cluster_number][representative_label] <= self.cluster_meta[cluster_number][assigned_label]:
# update the gt label identity of the cluster for purposes of metrics
# NOTE: this is fine to do in the loop as the linear assignment ensures each cluster_number is unique for the update
self.cluster_meta[cluster_number]['representative'] = assigned_label
# get the queries that have not matched
unmatched_queries = list(unmatched_queries)
for u in unmatched_queries:
if step_instances[u] not in prev_representatives:
# case where we correctly assign a new cluster for this instance
self.correct_assignments += 1
for u in unmatched_queries:
# add a cluster for each new unmatched query
self.assignments.append([step_instances[u]])
det_idx_to_cluster_idx[u] = len(self.cluster_meta)
self.cluster_meta[len(self.cluster_meta)] = {
step_instances[u]: 1, 'representative': step_instances[u]}
# expand the relationship bank as necessary
num_new_clusters = len(unmatched_queries)
if num_new_clusters != 0:
n_old = self.relationship_bank.shape[0]
n_new = n_old + num_new_clusters
tmp = torch.zeros(n_new, n_new, query_features.shape[1])
tmp[:n_old, :n_old, :] = self.relationship_bank
self.relationship_bank = tmp
# append features to the feature bank
new_features = torch.transpose(
query_features[unmatched_queries], 0, 1)
self.feature_bank = torch.cat(
(self.feature_bank, new_features), 1)
self.feature_match_counts = torch.cat((self.feature_match_counts, torch.ones(
len(unmatched_queries))), 0)
new_features = torch.transpose(
object_features[unmatched_queries], 0, 1)
self.object_bank = torch.cat(
(self.object_bank, new_features), 1)
self.object_match_counts = torch.cat((self.object_match_counts, torch.ones(
len(unmatched_queries))), 0)
for pair in edge_features:
# fill in the edge feature representations
ith, jth = det_idx_to_cluster_idx[pair[0]
], det_idx_to_cluster_idx[pair[1]]
assert ith != jth
if (ith, jth) not in self.relationship_match_counts:
# norm should be 1, so if this is the case we have a new relation and need to just fill it with the edge feature
self.relationship_bank[ith, jth] = edge_features[pair]
self.relationship_match_counts[(ith, jth)] = 1
elif self.averaging_strategy == 'weighted':
raise NotImplementedError('gotta write this still')
else:
self.relationship_match_counts[(ith, jth)] += 1
self.relationship_bank[ith, jth] = (
self.relationship_bank[ith, jth] + edge_features[pair]) / 2
self.relationship_bank[ith, jth] = F.normalize(
self.relationship_bank[ith, jth], dim=0)
cluster_idxs = list(det_idx_to_cluster_idx.values())
pickupable_bools = []
openable_bools = []
pickupable_points = {}
openable_points = {}
cluster_idx_to_name = {
i: self.cluster_meta[i]['representative'] for i in self.cluster_meta}
for det_idx in det_idx_to_cluster_idx:
cluster_idx = det_idx_to_cluster_idx[det_idx]
if cluster_idx_to_name[cluster_idx] in pickupable_objects and det_idx in interaction_points:
pickupable_bools.append(cluster_idx)
pickupable_points[cluster_idx] = interaction_points[det_idx]
if cluster_idx_to_name[cluster_idx] in openable_objects and det_idx in interaction_points:
openable_bools.append(cluster_idx)
openable_points[cluster_idx] = interaction_points[det_idx]
# print(f'pickupable: {pickupable_bools}')
# # print(f'openable: {openable_bools}')
# print(cluster_idx_to_name)
# print('-' * 30)
state = State(cluster_idxs, pickupable_points, openable_points, pickupable_bools,
openable_bools, boxes, areas, im, agent_position, agent_rotation, agent_horizon)
return state
def weighted_average_self_feature(self, cluster_number, query_features, object_featrues, instance_number):
# weighted average to integrate the query feature into the history
self.feature_bank[:, cluster_number] = self.feature_bank[:, cluster_number] * \
self.feature_match_counts[cluster_number] + \
query_features[instance_number]
self.feature_match_counts[cluster_number] += 1
self.feature_bank[:,
cluster_number] /= self.feature_match_counts[cluster_number]
self.object_bank[:, cluster_number] = self.object_bank[:, cluster_number] * \
self.object_match_counts[cluster_number] + \
object_featrues[instance_number]
self.object_match_counts[cluster_number] += 1
self.object_bank[:,
cluster_number] /= self.object_match_counts[cluster_number]
def unweighted_average_self_feature(self, cluster_number, query_features, object_featrues, instance_number):
self.feature_bank[:, cluster_number] = self.feature_bank[:, cluster_number] + \
query_features[instance_number]
self.feature_match_counts[cluster_number] += 1
self.feature_bank[:,
cluster_number] /= 2
self.object_bank[:, cluster_number] = self.object_bank[:, cluster_number] + \
object_featrues[instance_number]
self.object_match_counts[cluster_number] += 1
self.object_bank[:,
cluster_number] /= 2
def dump_gt_adjacency_matrix(self):
row_labels = [k for k, _ in sorted(
self.instance_map.items(), key=lambda item: item[1])]
mat = render_adj_matrix(self.gt_adjacency_matrix, row_labels)
sim_mat = Image.fromarray(mat, 'RGB')
sim_mat.save(
f'{self.dump_dir}/{self.room_id}_1_{self.steps-1}_adj.png')
sim_mat.close()
def dump_features_and_labels(self, query_features, edge_features, labels):
torch.save(query_features, f'{self.dump_dir}/{self.room_id}_{self.instance_id}_{self.update_count}.pt')
with open(f'{self.dump_dir}/{self.room_id}_{self.instance_id}_{self.update_count}_label.json', 'w') as f:
json.dump(labels, f) | CSR-main | src/simulation/module_relation_tracking.py |
import json
import os
from copy import deepcopy
from itertools import permutations
import torch
from networkx.algorithms.shortest_paths.weighted import dijkstra_path
from PIL import Image
from scipy.optimize import linear_sum_assignment
from src.shared.utils import render_adj_diff_matrix, render_sim_matrix
from src.simulation.environment import RearrangeTHOREnvironment
from src.simulation.module_box import GtBoxModule
from src.simulation.module_relation_tracking import RelationTrackingModule
from src.simulation.module_state_graph import StateGraphModule
from src.simulation.shortest_path_navigator import ShortestPathNavigatorTHOR
from src.simulation.utils import get_agent_map_data
class PlannerModule(object):
def __init__(self,
env: RearrangeTHOREnvironment,
room_id: int,
instance_id: int,
use_gt_object_matching: bool,
dump_dir: str) -> None:
super().__init__()
#
self.use_gt_object_matching = use_gt_object_matching
#
self.scene_module_walkthrough = None
self.state_module_walkthrough = None
self.scene_module_unshuffle = None
self.state_module_unshuffle = None
self.box_stats_walkthrough = None
self.box_stats_unshuffle = None
#
self.env = env
self.room_id = room_id
self.instance_id = instance_id
self.dump_dir = dump_dir
self.steps = 0
#
self.fused_state_module = None
self.walkthrough_to_fused_map = {}
self.legs = None
self.pickup_ids = None
def store_representations(self, rtm: RelationTrackingModule, sgm: StateGraphModule, bm: GtBoxModule, from_walkthrough: bool):
if from_walkthrough:
self.scene_module_walkthrough = deepcopy(rtm)
self.state_module_walkthrough = deepcopy(sgm)
self.box_stats_walkthrough = deepcopy(bm.moved_detection_counts)
else:
self.scene_module_unshuffle = deepcopy(rtm)
self.state_module_unshuffle = deepcopy(sgm)
self.box_stats_unshuffle = deepcopy(bm.moved_detection_counts)
def generate_plan(self, cos_sim_moved_threshold, cos_sim_object_threshold, debug):
shared_cluster_id_walkthrough, shared_cluster_id_unshuffle = None, None
names = None
if self.use_gt_object_matching:
shared_cluster_id_walkthrough, shared_cluster_id_unshuffle, names = self._object_match_gt()
else:
shared_cluster_id_walkthrough, shared_cluster_id_unshuffle = self._object_match_pred(
cos_sim_object_threshold)
shared_cluster_id_walkthrough = torch.tensor(
shared_cluster_id_walkthrough)
shared_cluster_id_unshuffle = torch.tensor(shared_cluster_id_unshuffle)
features_matched_walkthrough = self.scene_module_walkthrough.relationship_bank[
shared_cluster_id_walkthrough][:, shared_cluster_id_walkthrough, :]
features_matched_unshuffle = self.scene_module_unshuffle.relationship_bank[
shared_cluster_id_unshuffle][:, shared_cluster_id_unshuffle, :]
dotted = torch.einsum(
"hwc,hwc->hw", features_matched_walkthrough, features_matched_unshuffle)
# if debug:
# adj_walkthrough = torch.norm(features_matched_walkthrough, dim=2)
# adj_unshuffle = torch.norm(features_matched_unshuffle, dim=2)
# mat = render_sim_matrix(dotted.numpy(), names, names)
# img = Image.fromarray(mat, 'RGB')
# img.save(f'sim.png')
# mat = render_adj_diff_matrix(
# adj_walkthrough.numpy(), adj_unshuffle.numpy(), names, names)
# img = Image.fromarray(mat, 'RGB')
# img.save(f'adj_diff.png')
# find what moved
candidate_moved_dotted_id = self._infer_moved(
dotted, cos_sim_moved_threshold)
# get the index for the walkthrough state
_, target_nodes_walkthrough = self._find_cluster_ids_nodes(
shared_cluster_id_walkthrough, candidate_moved_dotted_id, self.state_module_walkthrough)
cluster_ids_unshuffle, source_nodes_unshuffle = self._find_cluster_ids_nodes(
shared_cluster_id_unshuffle, candidate_moved_dotted_id, self.state_module_unshuffle)
assert len(target_nodes_walkthrough) == len(source_nodes_unshuffle)
# NOTE: different as we are now taking unshffle as src (1st) not (2nd)
finals_src_target = []
finals_cluster_id_unshuffle = []
for i in range(len(target_nodes_walkthrough)):
if target_nodes_walkthrough[i] is None and source_nodes_unshuffle[i] is None:
# good case where something is moveable but we do not detect it has moved
pass
elif target_nodes_walkthrough[i] is None or source_nodes_unshuffle[i] is None:
# bad case where something is moveable but we do have a movable state in both trajectories
pass
elif target_nodes_walkthrough[i] is not None and source_nodes_unshuffle[i] is not None:
# good case where we think something moved and have location for it before and after
finals_src_target.append(
(source_nodes_unshuffle[i], target_nodes_walkthrough[i]))
finals_cluster_id_unshuffle.append(cluster_ids_unshuffle[i])
self._fuse_graphs()
self.legs, self.pickup_ids = self._get_plan(
finals_src_target, finals_cluster_id_unshuffle)
def _object_match_gt(self):
name_to_cluster_id_walkthrough = self._name_to_cluster_id(
self.scene_module_walkthrough.cluster_meta)
name_to_cluster_id_unshuffle = self._name_to_cluster_id(
self.scene_module_unshuffle.cluster_meta)
shared_objects = name_to_cluster_id_walkthrough.keys(
) & name_to_cluster_id_unshuffle.keys()
shared_cluster_id_walkthrough = []
shared_cluster_id_unshuffle = []
names = []
for k in shared_objects:
names.append(k)
shared_cluster_id_walkthrough.append(
name_to_cluster_id_walkthrough[k])
shared_cluster_id_unshuffle.append(name_to_cluster_id_unshuffle[k])
return shared_cluster_id_walkthrough, shared_cluster_id_unshuffle, names
def _object_match_pred(self, cos_sim_object_threshold):
sim = torch.transpose(self.scene_module_walkthrough.object_bank, 0, 1)
sim = torch.matmul(sim, self.scene_module_unshuffle.object_bank)
# hungarian matching to get the maximal assignment
w_idx, un_idx = linear_sum_assignment(
sim.numpy(), maximize=True)
assert len(w_idx) == len(un_idx)
w_idx_final = []
un_idx_final = []
for i in range(len(w_idx)):
if (sim[w_idx[i], un_idx[i]] > cos_sim_object_threshold).item():
w_idx_final.append(w_idx[i])
un_idx_final.append(un_idx[i])
return w_idx_final, un_idx_final
def execute_plan(self, debug):
assert self.legs is not None
leg_num = 0
while len(self.legs) != 0:
leg = self.legs.pop(0)
event = self.env.controller.step(
action='Done', **self.env.physics_step_kwargs)
if debug:
self.dump_observation(event)
if leg_num % 2 == 1:
pid = self.pickup_ids.pop(0)
if pid in self.fused_state_module.graph.nodes[leg[0]]['attr']['state'].pickupable_points:
x = self.fused_state_module.graph.nodes[leg[0]
]['attr']['state'].pickupable_points[pid]['x']
y = self.fused_state_module.graph.nodes[leg[0]
]['attr']['state'].pickupable_points[pid]['y']
self.env.pickup_object(x, y)
else:
pass
self.steps += 1
if debug:
self.dump_observation(self.env.controller.last_event)
while len(leg) > 1:
curr_node = leg.pop(0)
next_node = leg[0]
if (curr_node, next_node) not in self.fused_state_module.graph.edges:
print('replanning downstream')
leg = dijkstra_path(
self.fused_state_module.graph, curr_node, leg[-1])
tmp = leg.pop(0)
assert tmp == curr_node
next_node = leg[0]
# make sure we are not drifting for some reason
curr_node_key = {
'x': self.fused_state_module.graph.nodes[curr_node]['attr']['state'].agent_position['x'],
'z': self.fused_state_module.graph.nodes[curr_node]['attr']['state'].agent_position['z'],
'rotation': self.fused_state_module.graph.nodes[curr_node]['attr']['state'].agent_rotation["y"],
'horizon': self.fused_state_module.graph.nodes[curr_node]['attr']['state'].agent_horizon
}
curr_node_key = ShortestPathNavigatorTHOR.get_key(
curr_node_key)
event_key = ShortestPathNavigatorTHOR.get_key(
self.env.controller.last_event.metadata['agent'])
assert curr_node_key == event_key
action = self.fused_state_module.graph.edges[(
curr_node, next_node)]['attr']['action']
event = None
if 'Rotate' in action:
event = self.env.controller.step(
action=action, degrees=90, **self.env.physics_step_kwargs)
elif 'Look' in action:
event = self.env.controller.step(
action=action, degrees=30, **self.env.physics_step_kwargs)
else:
event = self.env.controller.step(
action=action, **self.env.physics_step_kwargs)
if not self.env.controller.last_event.metadata["lastActionSuccess"]:
# delete edge between two nodes as not traversable
# ShortestPathNavigatorTHOR.get_key
self.fused_state_module.graph.remove_edge(
curr_node, next_node)
self.fused_state_module.graph.remove_edge(
next_node, curr_node)
print(
self.env.controller.last_event.metadata["errorMessage"])
print('replanning')
# NOTE: replan
leg = dijkstra_path(
self.fused_state_module.graph, curr_node, leg[-1])
continue
self.steps += 1
if debug:
self.dump_observation(event)
if leg_num % 2 == 1:
# assert self.env.drop_held_object_with_snap()
self.env.drop_held_object_with_snap()
self.steps += 1
if debug:
self.dump_observation(self.env.controller.last_event)
leg_num += 1
def dump_observation(self, event):
im = Image.fromarray(event.frame)
im.save(
f'{self.dump_dir}/rearrange_{self.room_id}_{self.instance_id}_{self.steps}.png', 'PNG')
with open(os.path.join(self.dump_dir, f'rearrange_{self.room_id}_{self.instance_id}_{self.steps}.json'), 'w') as f:
json.dump(event.metadata, f, indent=4)
top_down_data = get_agent_map_data(self.env.controller)
top_img = Image.fromarray(top_down_data['frame'])
top_img.save(os.path.join(
self.dump_dir, f'rearrange_{self.room_id}_{self.instance_id}_{self.steps}_top.png'), 'PNG')
return
def _infer_moved(self, dotted, cos_sim_moved_threshold):
candidate_moved_dotted_id = []
for i in range(dotted.shape[0]):
if dotted[i, i] < cos_sim_moved_threshold:
candidate_moved_dotted_id.append(i)
return candidate_moved_dotted_id
def _name_to_cluster_id(self, meta):
name_to_cluster_id = {}
for entry in meta:
name_to_cluster_id[meta[entry]['representative']] = int(
entry)
return name_to_cluster_id
def _find_cluster_ids_nodes(self, shared_cluster_id, candidate_moved_dotted_id, state_module):
possible_move = shared_cluster_id[candidate_moved_dotted_id].tolist()
target_nodes = []
for cid in possible_move:
if cid in state_module.pickupable_cluster_to_node:
target_nodes.append(
state_module.pickupable_cluster_to_node[cid][0])
else:
target_nodes.append(None)
return possible_move, target_nodes
def _fuse_graphs(self):
self.fused_state_module = deepcopy(self.state_module_unshuffle)
# add nodes from walkthrough and keep mapping from old to new node ids
for walkthrough_node_id in self.state_module_walkthrough.graph.nodes:
walkthrough_state = self.state_module_walkthrough.graph.nodes[
walkthrough_node_id]['attr']['state']
existing_node_id = self.fused_state_module.state_to_node_id(
walkthrough_state)
if existing_node_id is None:
# case where we need to add the a node to the graph
new_node_id = self.fused_state_module.add_adjoint_node(
walkthrough_state)
self.walkthrough_to_fused_map[walkthrough_node_id] = new_node_id
else:
# case where the position has been visited in the unshuffle stage already, keep the ref
self.walkthrough_to_fused_map[walkthrough_node_id] = existing_node_id
# add the edges from the walkthrough graph into the fused graph
for e in self.state_module_walkthrough.graph.edges:
attr = self.state_module_walkthrough.graph.edges[e]['attr']
self.fused_state_module.add_adjoint_edge(
self.walkthrough_to_fused_map[e[0]], self.walkthrough_to_fused_map[e[1]], attr)
def _get_plan(self, finals_src_target, finals_cluster_id):
assert len(finals_src_target) == len(finals_cluster_id)
# init nodes in a graph from unshuffle
if len(finals_src_target) == 0:
return [], []
# greedy plan from current position to src
best_legs = []
best_legs_order = None
best_cost = float('inf')
# perms = permutations(range(len(finals_src_target)))
# for p in perms:
legs = []
cost = 0
curr_node = self.fused_state_module.state_to_node_id(
self.fused_state_module.current_state)
for src, target in finals_src_target:
legs.append(dijkstra_path(
self.fused_state_module.graph, curr_node, src))
target_ajoint = self.walkthrough_to_fused_map[target]
legs.append(dijkstra_path(
self.fused_state_module.graph, src, target_ajoint))
curr_node = target_ajoint
# for leg in legs:
# cost += len(leg)
# if cost < best_cost:
# best_cost = cost
# best_legs = legs.copy()
# best_legs_order = list(p)
# best_cluster_id = [finals_cluster_id[i] for i in best_legs_order]
best_cluster_id = [finals_cluster_id[i]
for i in range(len(finals_cluster_id))]
# return best_legs, best_cluster_id
return legs, best_cluster_id
| CSR-main | src/simulation/module_planner.py |
from dataclasses import dataclass
from typing import Dict, List
from PIL import Image
@dataclass
class State:
instance_cluster_ids: List = None
# boxes: None
# instance_names: None
pickupable_points: Dict = None
openable_points: Dict = None
pickupable: List = None
openable: List = None
boxes: List = None
areas: Dict = None
image: Image = None
agent_position: Dict = None
agent_rotation: Dict = None
agent_horizon: float = None | CSR-main | src/simulation/state.py |
NORMALIZE_RGB_MEAN = (0.485, 0.456, 0.406)
NORMALIZE_RGB_STD = (0.229, 0.224, 0.225)
DEFAULT_NUM_WORKERS = 8
COLOR_JITTER_BRIGHTNESS = 0.4
COLOR_JITTER_CONTRAST = 0.4
COLOR_JITTER_SATURATION = 0.4
COLOR_JITTER_HUE = 0.2
GRAYSCALE_PROBABILITY = 0.2
ROTATIONS = (0., 90., 180., 270.)
# splits from ai2 rearrangement: https://github.com/allenai/ai2thor-rearrangement#-datasets
TRAIN_ROOM_IDS = tuple(list(range(1, 21)) + list(range(201, 221)) +
list(range(301, 321)) + list(range(401, 421)))
VAL_ROOM_IDS = tuple(list(range(21, 26)) + list(range(221, 226)) +
list(range(321, 326)) + list(range(421, 426)))
TEST_ROOM_IDS = tuple(list(range(26, 31)) + list(range(226, 231)) +
list(range(326, 331)) + list(range(426, 431)))
ALL_ROOM_IDS = tuple(list(TRAIN_ROOM_IDS) + list(VAL_ROOM_IDS) + list(TEST_ROOM_IDS))
TRAIN_VAL_ROOM_IDS = tuple(list(TRAIN_ROOM_IDS) + list(VAL_ROOM_IDS))
KITCHEN_FLOOR_PLANS = {f"FloorPlan{i}" for i in range(1, 31)}
LIVING_ROOM_FLOOR_PLANS = {f"FloorPlan{200 + i}" for i in range(1, 31)}
BEDROOM_FLOOR_PLANS = {f"FloorPlan{300 + i}" for i in range(1, 31)}
BATHROOM_FLOOR_PLANS = {f"FloorPlan{400 + i}" for i in range(1, 31)}
CLASSES_TO_IGNORE = (
'Floor',
'StoveKnob',
'StoveBurner',
'Window',
'Apple',
'Bread',
'Cloth',
'HandTowel',
'KeyChain',
'Lettuce',
'Pillow',
'Potato',
'Tomato',
'Mirror')
DATALOADER_BOX_FRAC_THRESHOLD = 0.008
RENDERING_BOX_FRAC_THRESHOLD = 0.0016
IMAGE_SIZE = 224
| CSR-main | src/shared/constants.py |
CSR-main | src/shared/__init__.py |
|
from enum import IntEnum
class DataSplit(IntEnum):
TRAIN = 1
VAL = 2
TEST = 3 | CSR-main | src/shared/data_split.py |
import io
import os
import random
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image
from pytorch_lightning.utilities.seed import seed_everything
from src.shared.constants import (IMAGE_SIZE, NORMALIZE_RGB_MEAN,
NORMALIZE_RGB_STD)
from torch import nn
from torch.nn import functional as F
from torchvision.utils import save_image
def check_none_or_empty(input):
return input is None or input == ''
def count_learnable_parameters(module):
return sum(p.numel() for p in module.parameters())
def next_power_eight(x):
# from: https://www.geeksforgeeks.org/round-to-next-greater-multiple-of-8/
return ((x + 7) & (-8))
def render_confusion_matrix(conf_mat: np.ndarray, class_names: List[str]) -> np.ndarray:
# based on: https://stackoverflow.com/questions/65498782/how-to-dump-confusion-matrix-using-tensorboard-logger-in-pytorch-lightning
df_cm = pd.DataFrame(
conf_mat.astype(np.int64),
index=np.arange(conf_mat.shape[0]),
columns=class_names)
plt.figure()
sn.set(font_scale=1.2)
sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, fmt='d')
buf = io.BytesIO()
plt.savefig(buf, format='jpeg')
plt.close()
buf.seek(0)
im = Image.open(buf)
return np.asarray(im, dtype=np.uint8)
def render_sim_matrix(conf_mat: np.ndarray, rows: List[str], cols: List[str], vmin: int = -1, vmax: int = 1) -> np.ndarray:
# based on: https://stackoverflow.com/questions/65498782/how-to-dump-confusion-matrix-using-tensorboard-logger-in-pytorch-lightning
df_cm = pd.DataFrame(
conf_mat.astype(np.float32),
index=rows, # np.arange(conf_mat.shape[0]),
columns=cols)
plt.figure()
plt.subplots(figsize=(30, 30))
sn.set(font_scale=1.2)
sn.heatmap(df_cm, annot=True, annot_kws={
"size": 20}, fmt='.2f', vmin=vmin, vmax=vmax, cmap='jet')
buf = io.BytesIO()
plt.tight_layout()
plt.savefig(buf, format='jpeg')
plt.close()
buf.seek(0)
im = Image.open(buf)
return np.asarray(im, dtype=np.uint8)
def render_adj_diff_matrix(mat1: np.ndarray, mat2: np.ndarray, rows: List[str], cols: List[str]) -> np.ndarray:
# based on: https://stackoverflow.com/questions/65498782/how-to-dump-confusion-matrix-using-tensorboard-logger-in-pytorch-lightning
mat = np.zeros_like(mat1)
for i in range(mat1.shape[0]):
for j in range(mat2.shape[1]):
if mat1[i, j] < 0.5 and mat2[i, j] < 0.5:
mat[i, j] = 0.0
if mat1[i, j] < 0.5 and mat2[i, j] > 0.5:
mat[i, j] = 0.33
if mat1[i, j] > 0.5 and mat2[i, j] < 0.5:
mat[i, j] = 0.66
if mat1[i, j] > 0.5 and mat2[i, j] > 0.5:
mat[i, j] = 1.0
df_cm = pd.DataFrame(
mat.astype(np.float32),
index=rows, # np.arange(conf_mat.shape[0]),
columns=cols)
plt.figure()
plt.subplots(figsize=(30, 30))
sn.set(font_scale=1.2)
sn.heatmap(df_cm, annot=False, vmin=0, vmax=1, cmap='jet')
buf = io.BytesIO()
plt.tight_layout()
plt.savefig(buf, format='jpeg')
plt.close()
buf.seek(0)
im = Image.open(buf)
return np.asarray(im, dtype=np.uint8)
def render_adj_matrix(adj_mat: np.ndarray, rows: List[str]) -> np.ndarray:
# based on: https://stackoverflow.com/questions/65498782/how-to-dump-confusion-matrix-using-tensorboard-logger-in-pytorch-lightning
df_cm = pd.DataFrame(
adj_mat.astype(np.int8),
index=rows, # np.arange(conf_mat.shape[0]),
columns=rows)
plt.figure()
plt.subplots(figsize=(30, 30))
sn.set(font_scale=1.2)
sn.heatmap(df_cm, annot=True, annot_kws={
"size": 20}, vmin=0, vmax=1, cmap='jet')
buf = io.BytesIO()
plt.tight_layout()
plt.savefig(buf, format='jpeg')
plt.close()
buf.seek(0)
im = Image.open(buf)
return np.asarray(im, dtype=np.uint8)
def render_receptacle_matrix(mat: np.ndarray, rows: List[str]) -> np.ndarray:
# based on: https://stackoverflow.com/questions/65498782/how-to-dump-confusion-matrix-using-tensorboard-logger-in-pytorch-lightning
df_cm = pd.DataFrame(
adj_mat.astype(np.int8),
index=rows, # np.arange(conf_mat.shape[0]),
columns=rows)
plt.figure()
plt.subplots(figsize=(30, 30))
sn.set(font_scale=1.2)
sn.heatmap(df_cm, annot=True, annot_kws={
"size": 20}, vmin=0, vmax=2, cmap='jet')
buf = io.BytesIO()
plt.tight_layout()
plt.savefig(buf, format='jpeg')
plt.close()
buf.seek(0)
im = Image.open(buf)
return np.asarray(im, dtype=np.uint8)
def reverse_dictonary(d):
d_inv = {}
for k, v in d.items(): # <- missing ()
d_inv[v] = d_inv.get(v, [])
d_inv[v].append(k)
return d_inv
def compute_3d_dist(p1, p2):
p1_np = np.array([p1['x'], p1['y'], p1['z']])
p2_np = np.array([p2['x'], p2['y'], p2['z']])
squared_dist = np.sum((p1_np-p2_np)**2, axis=0)
return np.sqrt(squared_dist)
def get_device(device_number):
if device_number >= 0:# and torch.cuda.is_available():
device = torch.device("cuda:{0}".format(device_number))
else:
device = torch.device("cpu")
return device
def load_lightning_inference(checkpoint_path, module_class):
model = module_class.load_from_checkpoint(checkpoint_path)
model.eval()
model.freeze()
return model
def load_lightning_train(checkpoint_path, module_class):
model = module_class.load_from_checkpoint(checkpoint_path)
return model
def worker_init_fn(worker_id):
torch_seed = torch.initial_seed()
if torch_seed + worker_id >= 2**30: # make sure torch_seed + workder_id < 2**32
torch_seed = torch_seed % 2**30
np.random.seed(torch_seed + worker_id)
random.seed(torch_seed + worker_id)
def get_box(corners, random_box=False):
if random_box and corners is None:
t_min, t_max = random.randint(IMAGE_SIZE), random.randint(IMAGE_SIZE)
x_min, x_max = min(t_min, t_max), max(t_min, t_max)
t_min, t_max = random.randint(IMAGE_SIZE), random.randint(IMAGE_SIZE)
y_min, y_max = min(t_min, t_max), max(t_min, t_max)
corners = [[x_min, x_max], [y_min, y_max]]
box = torch.zeros(IMAGE_SIZE, IMAGE_SIZE)
box[corners[0][1]:corners[1][1], corners[0][0]:corners[1][0]] = 1.
return box.unsqueeze(0)
def dump_batch(relation_query, relation_key, dump_dir, batch_count):
b = relation_query['image'].shape[0]
h = relation_query['image'].shape[2]
w = relation_query['image'].shape[3]
std = torch.tensor(NORMALIZE_RGB_STD).unsqueeze(
-1).unsqueeze(-1).repeat(1, h, w)
mean = torch.tensor(NORMALIZE_RGB_STD).unsqueeze(
-1).unsqueeze(-1).repeat(1, h, w)
q_objs = (relation_query['image'].cpu() * std + mean) # * torch.clamp(
# relation_query['mask_1'].cpu() + relation_query['mask_2'].cpu(), 0, 1)
k_objs = (relation_key['image'].cpu() * std + mean) # * torch.clamp(
# relation_key['mask_1'].cpu() + relation_key['mask_2'].cpu(), 0, 1)
# * torch.clamp(
s_objs = (relation_key['shuffle_image'].cpu() * std + mean)
# relation_key['shuffle_mask_1'].cpu() + relation_key['shuffle_mask_2'].cpu(), 0, 1)
for i in range(b):
if relation_key['has_shuffle_negative'][i]:
save_image(q_objs[i], os.path.join(dump_dir, f'{batch_count}_{i}_query.png'))
save_image(k_objs[i], os.path.join(dump_dir, f'{batch_count}_{i}_key.png'))
save_image(s_objs[i], os.path.join(dump_dir, f'{batch_count}_{i}_shuffle.png'))
def my_shuffle_evaluate(encoder_q, relation1, relation2, device, dump_path, self_feature_only, relational_feature_only, batch_count):
if dump_path is not None and os.path.exists(dump_path):
dump_batch(relation1, relation2, dump_path, batch_count)
# exit(0)
query = torch.cat(
(relation1['image'], relation1['mask_1'], relation1['mask_2']), 1).to(device)
shuffle_negative = torch.cat(
(relation2['shuffle_image'], relation2['shuffle_mask_1'], relation2['shuffle_mask_2']), 1).to(device)
positive = torch.cat(
(relation2['image'], relation2['mask_1'], relation2['mask_2']), 1).to(device)
has_negatives = relation2['has_shuffle_negative'] > 0.5
other_mask = torch.ones_like(has_negatives).bool()
if self_feature_only:
other_mask = relation1['self'] > 0.5
elif relational_feature_only:
other_mask = relation1['self'] < 0.5
has_negatives = has_negatives & other_mask
e_q = encoder_q(query)
e_q = nn.functional.normalize(e_q, dim=1)
e_n = encoder_q(shuffle_negative)
e_n = nn.functional.normalize(e_n, dim=1)
e_k = encoder_q(positive)
e_k = nn.functional.normalize(e_k, dim=1)
l_pos = torch.einsum('nc,nc->n', [e_q, e_k]).unsqueeze(-1)
l_neg = torch.einsum('nc,nc->n', [e_q, e_n]).unsqueeze(-1)
if torch.any(has_negatives):
logits = torch.cat((l_pos, l_neg), 1)[has_negatives] / 0.07
decisions = torch.max(logits, dim=1)
misses = torch.sum(decisions.indices)
total = decisions.indices.shape[0]
loss_shuffle = F.cross_entropy(logits.float(), torch.zeros(
logits.shape[0]).long().to(device))
return misses, total, loss_shuffle, logits
return None, None, None, None
| CSR-main | src/shared/utils.py |
# from open lth
# https://github.com/facebookresearch/open_lth/blob/2ce732fe48abd5a80c10a153c45d397b048e980c/models/imagenet_resnet.py
# and torchvision
# https://pytorch.org/vision/0.8/_modules/torchvision/models/resnet.html
import torch
import torchvision
from torchvision.models.resnet import BasicBlock, Bottleneck
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
class ResNet(torchvision.models.ResNet):
def __init__(self, block, layers, num_classes=1000, width=64):
"""To make it possible to vary the width, we need to override the constructor of the torchvision resnet."""
# Skip the parent constructor. This replaces it.
torch.nn.Module.__init__(self)
self._norm_layer = torch.nn.BatchNorm2d
self.inplanes = width
self.dilation = 1
self.groups = 1
self.base_width = 64
# The initial convolutional layer.
self.conv1 = torch.nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = self._norm_layer(self.inplanes)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# The subsequent blocks.
self.layer1 = self._make_layer(block, width, layers[0])
self.layer2 = self._make_layer(
block, width*2, layers[1], stride=2, dilate=False)
self.layer3 = self._make_layer(
block, width*4, layers[2], stride=2, dilate=False)
self.layer4 = self._make_layer(
block, width*8, layers[3], stride=2, dilate=False)
# The last layers.
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(width*8*block.expansion, num_classes)
# Default init.
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
| CSR-main | src/models/imagenet_resnet.py |
import torch.nn as nn
from src.models.backbones import FeatureLearner
from src.models.layers import *
class ResUNet(nn.Module):
def __init__(self, in_channels=3, out_channels=1):
super().__init__()
self.feature_extractor = FeatureLearner(in_channels)
self.up_head = ResUpHead(out_channels)
def forward(self, x):
self.feature_extractor(x)
enc1, enc2, enc3, enc4, b = self.feature_extractor.intermediate_features
return self.up_head(enc1, enc2, enc3, enc4, b) | CSR-main | src/models/unet.py |
CSR-main | src/models/__init__.py |
|
from collections import OrderedDict
import torch
import torch.nn as nn
from src.models.imagenet_resnet import resnet18, resnet34, resnet50
def get_torchvision_model_class(class_str: str):
if class_str == 'resnet18':
return resnet18
elif class_str == 'resnet34':
return resnet34
elif class_str == 'resnet50':
return resnet50
else:
raise NotImplementedError('Only supports resnet 18, 34, 50 for now.')
class FeatureLearner(nn.Module):
def __init__(
self,
in_channels=3,
channel_width=64,
pretrained=False,
num_classes=0,
backbone_str='resnet18'
):
super().__init__()
model_class = get_torchvision_model_class(backbone_str)
self.resnet = None
self.num_classes = num_classes
if num_classes == 0:
# case where we do not want the fc, only the resnet features
self.resnet = model_class(pretrained=pretrained, width=channel_width)
del self.resnet.fc
else:
# want the fc
self.resnet = model_class(pretrained=pretrained, width=channel_width)
# replace the fc if needed, do this in two steps as might want pretrained weights
if num_classes != 1000:
self.resnet.fc = nn.Sequential(OrderedDict([
('fc', nn.Linear(self.resnet.fc.in_features, num_classes))
]))
if in_channels != 3:
# case where we do not have 3 chan input so we might need to replicate channels
if pretrained:
# copy RGB feature channels in that order as necessary until we have reinitialized resnet.conv1
weight = self.resnet.conv1.weight.detach()
self.resnet.conv1 = nn.Conv2d(
in_channels, channel_width, kernel_size=7, stride=2, padding=3, bias=False)
self.resnet.conv1.weight.data[:, :3] = weight.clone()
for i in range(3, self.resnet.conv1.weight.data.shape[1]):
rhs_i = i % 3
self.resnet.conv1.weight.data[:,
i] = weight[:, rhs_i].clone()
else:
self.resnet.conv1 = nn.Conv2d(
in_channels, channel_width, kernel_size=7, stride=2, padding=3, bias=False)
# memory to save the resnet intermediate features
self.intermediate_features = None
def forward(self, x):
result = []
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
result.append(x)
x = self.resnet.maxpool(x)
# layer 1
x = self.resnet.layer1(x)
result.append(x)
# layer 2
x = self.resnet.layer2(x)
result.append(x)
# layer 3
x = self.resnet.layer3(x)
result.append(x)
# layer 4
x = self.resnet.layer4(x)
result.append(x)
self.intermediate_features = result
x = self.resnet.avgpool(x)
x = torch.flatten(x, 1)
if self.num_classes == 0:
# case where fc does not exist
return x
return self.resnet.fc(x)
class FeedForward(nn.Module):
def __init__(self, layer_sizes):
super(FeedForward, self).__init__()
layers = []
for i in range(1, len(layer_sizes)-1):
layers.append(nn.Linear(
layer_sizes[i-1], layer_sizes[i])),
layers.append(nn.BatchNorm1d(layer_sizes[i])),
layers.append(nn.ReLU())
layers.append(nn.Linear(
layer_sizes[-2], layer_sizes[-1]))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
| CSR-main | src/models/backbones.py |
"""Baseline models for use in the object navigation task.
Object navigation is currently available as a Task in AI2-THOR and
Facebook's Habitat.
"""
import platform
from datetime import datetime
from typing import Optional, Tuple, cast
from allenact.algorithms.onpolicy_sync.storage import RolloutStorage
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
import gym
import torch
from torch.distributions.categorical import Categorical
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import (ActorCriticModel,
DistributionType,
LinearCriticHead, Memory,
ObservationType)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.embodiedai.models.basic_models import RNNStateEncoder
from allenact.utils.model_utils import compute_cnn_output, make_cnn
from gym.spaces.dict import Dict as SpaceDict
import src.dataloaders.augmentations as A
from src.simulation.constants import ACTION_NEGATIONS, EXPLORATION_ACTION_ORDER
class LinearActorHeadNoCategory(nn.Module):
def __init__(self, num_inputs: int, num_outputs: int):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
nn.init.orthogonal_(self.linear.weight, gain=0.01)
nn.init.constant_(self.linear.bias, 0)
def forward(self, x: torch.FloatTensor): # type: ignore
x = self.linear(x) # type:ignore
assert len(x.shape) == 3
return x
class ExplorationModel(ActorCriticModel[CategoricalDistr]):
"""Baseline recurrent actor critic model for preddistancenav task.
# Attributes
action_space : The space of actions available to the agent. Currently only discrete
actions are allowed (so this space will always be of type `gym.spaces.Discrete`).
observation_space : The observation space expected by the agent. This observation space
should include (optionally) 'rgb' images and 'depth' images.
hidden_size : The hidden size of the GRU RNN.
object_type_embedding_dim: The dimensionality of the embedding corresponding to the goal
object type.
"""
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
hidden_size=512,
obj_state_embedding_size=512,
trainable_masked_hidden_state: bool = False,
num_rnn_layers=1,
rnn_type="GRU",
teacher_forcing=1,
visualize=False,
):
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(action_space=action_space, observation_space=observation_space)
self.visualize = visualize
self._hidden_size = hidden_size
self.object_type_embedding_size = obj_state_embedding_size
# sensor_names = self.observation_space.spaces.keys()
network_args = {'input_channels': 3, 'layer_channels': [32, 64, 32], 'kernel_sizes': [(8, 8), (4, 4), (3, 3)], 'strides': [(4, 4), (2, 2), (1, 1)], 'paddings': [(
0, 0), (0, 0), (0, 0)], 'dilations': [(1, 1), (1, 1), (1, 1)], 'output_height': 24, 'output_width': 24, 'output_channels': 512, 'flatten': True, 'output_relu': True}
self.full_visual_encoder = make_cnn(**network_args)
# self.detection_model = ConditionalDetectionModel()
self.state_encoder = RNNStateEncoder(
512,
self._hidden_size,
trainable_masked_hidden_state=trainable_masked_hidden_state,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.actor_pickup = LinearActorHeadNoCategory(
self._hidden_size, action_space.n)
self.critic_pickup = LinearCriticHead(self._hidden_size)
self.train()
# self.detection_model.eval()
self.starting_time = datetime.now().strftime(
"{}_%m_%d_%Y_%H_%M_%S_%f".format(self.__class__.__name__))
@property
def recurrent_hidden_state_size(self) -> int:
"""The recurrent hidden state size of the model."""
return self._hidden_size
@property
def num_recurrent_layers(self) -> int:
"""Number of recurrent hidden layers."""
return self.state_encoder.num_recurrent_layers
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
"""Processes input batched observations to produce new actor and critic
values. Processes input batched observations (along with prior hidden
states, previous actions, and masks denoting which recurrent hidden
states should be masked) and returns an `ActorCriticOutput` object
containing the model's policy (distribution over actions) and
evaluation of the current state (value).
# Parameters
observations : Batched input observations.
memory : `Memory` containing the hidden states from initial timepoints.
prev_actions : Tensor of previous actions taken.
masks : Masks applied to hidden states. See `RNNStateEncoder`.
# Returns
Tuple of the `ActorCriticOutput` and recurrent hidden state.
"""
visual_observation = observations['image'].float()
visual_observation_encoding = compute_cnn_output(
self.full_visual_encoder, visual_observation)
x_out, rnn_hidden_states = self.state_encoder(
visual_observation_encoding, memory.tensor("rnn"), masks
)
actor_out_pickup = self.actor_pickup(x_out)
critic_out_pickup = self.critic_pickup(x_out)
actor_out_final = actor_out_pickup
critic_out_final = critic_out_pickup
actor_out = CategoricalDistr(logits=actor_out_final)
actor_critic_output = ActorCriticOutput(
distributions=actor_out, values=critic_out_final, extras={}
)
memory = memory.set_tensor("rnn", rnn_hidden_states)
return (
actor_critic_output,
memory,
)
def init_exploration_model(exploration_model_path):
SENSORS = [
RGBSensorThor(
height=224,
width=224,
use_resnet_normalization=True,
uuid="rgb_lowres",
)
]
observation_space = SensorSuite(SENSORS).observation_spaces
model = ExplorationModel(
action_space=gym.spaces.Discrete(8),
observation_space=observation_space,
hidden_size=512,
visualize=False
)
model.load_state_dict(torch.load(exploration_model_path)['model_state_dict'])
model.eval()
model.cuda()
return model
class StatefulExplorationModel:
def __init__(self, exploration_model_path, max_steps=250) -> None:
self.exploration_model_path = exploration_model_path
self.max_steps = max_steps
self.reset(False)
def reset(self):
self.model = init_exploration_model(self.exploration_model_path)
self.rollout_storage = RolloutStorage(
num_steps=1,
num_samplers=1,
actor_critic=self.model,
only_store_first_and_last_in_memory=True,
)
self.memory = self.rollout_storage.pick_memory_step(0)
tmp = self.memory["rnn"][1]
self.memory["rnn"] = (self.memory["rnn"][0].cuda(), tmp)
self.memory.tensor("rnn").cuda()
self.masks = self.rollout_storage.masks[:1]
self.masks = 0 * self.masks
self.masks = self.masks.cuda()
self.action_count = 0
self.trajectory = []
def get_action(self, controller):
# rollout walkthrough traj
last_action = None
while 1:
observation = {'image' : controller.last_event.frame.copy()}
A.TestTransform(observation)
observation['image'] = observation['image'].permute(1, 2, 0).unsqueeze(0).unsqueeze(0).to(0)
ac_out, memory = cast(
Tuple[ActorCriticOutput, Memory],
self.model.forward(
observations=observation,
memory=memory,
prev_actions=None,
masks=self.masks,
),
)
self.masks.fill_(1)
action_success = False
dist = Categorical(ac_out.distributions.probs)
while not action_success:
if self.action_count == (self.max_steps - 1):
return None
if len(self.trajectory) > 2:
if ACTION_NEGATIONS[EXPLORATION_ACTION_ORDER[self.trajectory[-2]]] == EXPLORATION_ACTION_ORDER[self.trajectory[-1]]:
dist.probs[0][0][self.trajectory[-2]] = 0.0
action_num=dist.sample().item()
action = EXPLORATION_ACTION_ORDER[action_num]
action_dict = {}
action_dict['action'] = action
sr = controller.step(action_dict)
self.action_count += 1
# while action == last_action and not last_action_success:
# action=ac_out.distributions.sample().item()
action_success = sr.metadata['lastActionSuccess']
if action_success:
self.trajectory.append(action_num)
return action_num
else:
# modify the distribution
dist.probs[0][0][action_num] = 0.0
| CSR-main | src/models/exploration_model.py |
from src.simulation import flow
import torch
import torch.nn as nn
import torch.nn.functional as F
def upshuffle(in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes * upscale_factor ** 2,
kernel_size=kernel_size, stride=stride, padding=padding),
nn.PixelShuffle(upscale_factor),
nn.ReLU()
)
def upshuffle_norelu(in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes * upscale_factor ** 2,
kernel_size=kernel_size, stride=stride, padding=padding),
nn.PixelShuffle(upscale_factor),
)
def combine_1x1(in_planes, out_planes):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, 1, 1),
nn.BatchNorm2d(out_planes),
nn.ReLU(),
)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class ResBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, name=None):
super(ResBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if downsample is None:
downsample = nn.Sequential(
conv1x1(inplanes, planes, stride),
norm_layer(planes),
)
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicConvLayer(nn.Module):
def __init__(self, inplane, planes):
super(BasicConvLayer, self).__init__()
self.conv = conv3x3(inplane, planes)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class ResUpLayer(nn.Module):
def __init__(self, inplanes, planes):
"""Upsample and then pass through resblock
Args:
inplanes (int): input number of channels
planes (int): output number of channels
"""
super(ResUpLayer, self).__init__()
down_planes, up_planes = inplanes
self.upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=False)
self.conv = conv3x3(up_planes, planes)
self.block0 = ResBlock(down_planes + planes, planes)
self.block1 = ResBlock(planes, planes)
def forward(self, down_feature, up_feature):
x = self.upsample(up_feature)
x = self.conv(x)
if down_feature is not None:
x = torch.cat((down_feature, x), 1)
x = self.block0(x)
x = self.block1(x)
return x
class MultiscaleHead(nn.Module):
def __init__(self, out_planes):
super(MultiscaleHead, self).__init__()
self.resup4 = ResUpLayer((256, 512), 256)
self.resup3 = ResUpLayer((128, 256 + out_planes), 128)
self.resup2 = ResUpLayer((64, 128 + out_planes), 64)
self.resup1 = ResUpLayer((64, 64 + out_planes), 64)
self.resup0 = ResUpLayer((0, 64 + out_planes), 32)
self.flow4 = nn.Conv2d(
in_channels=256, out_channels=out_planes, kernel_size=3, padding=1)
self.flow3 = nn.Conv2d(
in_channels=128, out_channels=out_planes, kernel_size=3, padding=1)
self.flow2 = nn.Conv2d(
in_channels=64, out_channels=out_planes, kernel_size=3, padding=1)
self.flow1 = nn.Conv2d(
in_channels=64, out_channels=out_planes, kernel_size=3, padding=1)
self.flow0 = nn.Conv2d(
in_channels=32, out_channels=out_planes, kernel_size=1)
def forward(self, enc1, enc2, enc3, enc4, b, multiscale):
x = self.resup4(enc4, b)
flow4 = self.flow4(x)
x = self.resup3(enc3, torch.cat((x, flow4), 1))
flow3 = self.flow3(x)
x = self.resup2(enc2, torch.cat((x, flow3), 1))
flow2 = self.flow2(x)
x = self.resup1(enc1, torch.cat((x, flow2), 1))
flow1 = self.flow1(x)
x = self.resup0(None, torch.cat((x, flow1), 1))
flow0 = self.flow0(x)
if multiscale:
return flow4, flow3, flow2, flow1, flow0
else:
return flow0,
class ResUpHead(nn.Module):
def __init__(self, planes):
super(ResUpHead, self).__init__()
self.resup4 = ResUpLayer((256, 512), 256)
self.resup3 = ResUpLayer((128, 256), 128)
self.resup2 = ResUpLayer((64, 128), 64)
self.resup1 = ResUpLayer((64, 64), 64)
self.resup0 = ResUpLayer((0, 64), 32)
self.conv_out = nn.Conv2d(
in_channels=32, out_channels=planes, kernel_size=1)
def forward(self, enc1, enc2, enc3, enc4, b):
x = self.resup4(enc4, b)
x = self.resup3(enc3, x)
x = self.resup2(enc2, x)
x = self.resup1(enc1, x)
x = self.resup0(None, x)
return self.conv_out(x)
| CSR-main | src/models/layers.py |
import torch
import torch.nn as nn
from functools import partial
import math
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath, to_2tuple
import pdb
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384', 'container_light'
]
class Mlp(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CMlp(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
pdb.set_trace()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Attention_pure(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
C = int(C // 3)
qkv = x.reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj_drop(x)
return x
class MixBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, 3 * dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.conv = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
self.attn = Attention_pure(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sa_weight = nn.Parameter(torch.Tensor([0.0]))
def forward(self, x):
x = x + self.pos_embed(x)
B, _, H, W = x.shape
residual = x
x = self.norm1(x)
qkv = self.conv1(x)
conv = qkv[:, 2 * self.dim:, :, :]
conv = self.conv(conv)
sa = qkv.flatten(2).transpose(1, 2)
sa = self.attn(sa)
sa = sa.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
x = residual + self.drop_path(self.conv2(torch.sigmoid(self.sa_weight) * sa + (1 - torch.sigmoid(self.sa_weight)) * conv))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class CBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.pos_embed(x)
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Block(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.norm = nn.LayerNorm(embed_dim)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
B, C, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return x
class HybridEmbed(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, 1)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=[224, 56, 28, 14], patch_size=[4, 2, 2, 2], in_chans=3, num_classes=1000, embed_dim=[64, 128, 320, 512], depth=[3, 4, 8, 3],
num_heads=12, mlp_ratio=[8, 8, 4, 4], qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.embed_dim = embed_dim
self.depth = depth
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed1 = PatchEmbed(
img_size=img_size[0], patch_size=patch_size[0], in_chans=in_chans, embed_dim=embed_dim[0])
self.patch_embed2 = PatchEmbed(
img_size=img_size[1], patch_size=patch_size[1], in_chans=embed_dim[0], embed_dim=embed_dim[1])
self.patch_embed3 = PatchEmbed(
img_size=img_size[2], patch_size=patch_size[2], in_chans=embed_dim[1], embed_dim=embed_dim[2])
self.patch_embed4 = PatchEmbed(
img_size=img_size[3], patch_size=patch_size[3], in_chans=embed_dim[2], embed_dim=embed_dim[3])
num_patches1 = self.patch_embed1.num_patches
num_patches2 = self.patch_embed2.num_patches
num_patches3 = self.patch_embed3.num_patches
num_patches4 = self.patch_embed4.num_patches
self.pos_drop = nn.Dropout(p=drop_rate)
self.mixture =True
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] # stochastic depth decay rule
self.blocks1 = nn.ModuleList([
CBlock(
dim=embed_dim[0], num_heads=num_heads, mlp_ratio=mlp_ratio[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth[0])])
self.blocks2 = nn.ModuleList([
CBlock(
dim=embed_dim[1], num_heads=num_heads, mlp_ratio=mlp_ratio[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]], norm_layer=norm_layer)
for i in range(depth[1])])
self.blocks3 = nn.ModuleList([
CBlock(
dim=embed_dim[2], num_heads=num_heads, mlp_ratio=mlp_ratio[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]], norm_layer=norm_layer)
for i in range(depth[2])])
self.blocks4 = nn.ModuleList([
MixBlock(
dim=embed_dim[3], num_heads=num_heads, mlp_ratio=mlp_ratio[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]+depth[2]], norm_layer=norm_layer)
for i in range(depth[3])])
self.norm = nn.BatchNorm2d(embed_dim[-1])
# Representation layer
if representation_size:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head
self.head = nn.Linear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed1(x)
x = self.pos_drop(x)
for blk in self.blocks1:
x = blk(x)
x = self.patch_embed2(x)
for blk in self.blocks2:
x = blk(x)
x = self.patch_embed3(x)
for blk in self.blocks3:
x = blk(x)
x = self.patch_embed4(x)
for blk in self.blocks4:
x = blk(x)
x = self.norm(x)
x = self.pre_logits(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = x.flatten(2).mean(-1)
x = self.head(x)
return x
@register_model
def container_v1_light(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=[224, 56, 28, 14], patch_size=[4, 2, 2, 2], embed_dim=[64, 128, 320, 512], depth=[3, 4, 8, 3], num_heads=16, mlp_ratio=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| container-main | models.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for DeiT", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="deit")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| container-main | run_with_submitit.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| container-main | datasets.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from losses import DistillationLoss
import utils
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| container-main | engine.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| container-main | utils.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
"""
import torch
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| container-main | losses.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from models import *
dependencies = ["torch", "torchvision", "timm"]
| container-main | hubconf.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
import models
import utils
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
set_training_mode=args.finetune == '' # keep in eval mode during finetuning
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| container-main | main.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| container-main | samplers.py |
"""
Unit tests for multilabel_average_precision_metric.py
"""
import unittest
import numpy as np
from torch import Tensor
from relex.multilabel_average_precision_metric import MultilabelAveragePrecision
class TestUtil(unittest.TestCase):
@classmethod
def test_get_metrics(cls):
np.seterr(divide='ignore', invalid='ignore')
bins = 1000
diff = 0.01
metric = MultilabelAveragePrecision(bins=bins)
size = [1000, 100]
pred = Tensor(np.random.uniform(0, 1, size))
gold = Tensor(np.random.randint(0, 2, size))
metric.__call__(pred, gold)
fast_ap = metric.get_metric() # calls the fast get_metric
ap = metric.get_metric(reset=True) # calls the accurate get_metric
assert (abs(ap - fast_ap)) < diff
metric.__call__(pred, gold)
metric.__call__(pred, gold)
metric.__call__(pred, gold)
fast_ap = metric.get_metric()
ap = metric.get_metric(reset=True)
assert (abs(ap - fast_ap)) < diff
| comb_dist_direct_relex-master | tests/test_multilabel_average_precision_metric.py |
import json
from sklearn.metrics import precision_recall_curve
from scipy.interpolate import spline
import matplotlib.pyplot as plt
with open('scripts/PR_curves.json') as f:
x = json.load(f)
plt.step(x['belagy_et_al_best'][0], x['belagy_et_al_best'][1], where='post')
plt.step(x['belagy_et_al_baseline'][0], x['belagy_et_al_baseline'][1], where='post')
plt.step(x['reside'][0], x['reside'][1], where='post')
plt.step(x['lin_et_al'][0], x['lin_et_al'][1], where='post')
plt.grid( linestyle='dashed', linewidth=0.5)
plt.legend(['This work',
'Baseline',
'RESIDE (Vashishth et al., 2018)',
'PCNN+ATT (Lin et al., 2016)',
])
plt.xlabel('recall')
plt.ylabel('precision')
plt.ylim([0.4, 1])
plt.xlim([0, 0.4])
| comb_dist_direct_relex-master | scripts/plot_pr_curves.py |
"""
Relation Extraction.
"""
__version__ = 0.3
| comb_dist_direct_relex-master | relex/__init__.py |
from typing import Tuple
import logging
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@Predictor.register('relex')
class RelationExtractionPredictor(Predictor):
""""Predictor wrapper for the RelationExtractionPredictor"""
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Tuple[Instance, JsonDict]:
e1 = json_dict['e1']
e2 = json_dict['e2']
mentions = json_dict['mentions']
instance = self._dataset_reader.text_to_instance(
e1=e1, e2=e2, rels=[], mentions=mentions, is_predict=True, is_supervised_bag=False)
if not instance:
log.error('parsing instance failed: %s', mentions)
instance = self._dataset_reader.text_to_instance(
e1="e1", e2="e2", rels=[],
mentions=["Some relation between <e1>entity 1</e1> and <e2>entity 2</e2>"],
is_predict=True, is_supervised_bag=False)
return instance, {}
| comb_dist_direct_relex-master | relex/relation_extraction_predictor.py |
import logging
from overrides import overrides
import numpy as np
import torch
from torch import nn
from sklearn.metrics import precision_recall_curve
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Metric.register("multilabel_average_precision")
class MultilabelAveragePrecision(Metric):
"""
Average precision for multiclass multilabel classification. Average precision
approximately equals area under the precision-recall curve.
- Average precision: scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
- Precision/recall: this is multilabel metric, so all labels are considered predictions (with their
corresponding confidences) not just the one with the highest confidence.
Two modes of calculating AP are implemented,
- a fast but less accurate implementation that bins threshold. Supports the frequent use of get_metrics
- a more accurate implemenatition when get_metrics(reset=True)
The fast one tends to underestimate AP.
AP - Fast_AP < 10/number_of_bins
"""
def __init__(self, bins=1000, recall_thr = 0.40) -> None:
"""Args:
bins: number of threshold bins for the fast computation of AP
recall_thr: compute AP (or AUC) for recall values [0:recall_thr]
"""
self.recall_thr = recall_thr
self.sigmoid = nn.Sigmoid()
# stores data for the accurate calculation of AP
self.predictions = np.array([])
self.gold_labels = np.array([])
# stored data for the fast calculation of AP
self.bins = bins
self.bin_size = 1.0/self.bins
self.correct_counts = np.array([0] * self.bins)
self.total_counts = np.array([0] * self.bins)
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of zeros and ones of shape (batch_size, ..., num_classes). It must be the same
shape as the ``predictions`` tensor.
"""
predictions = self.sigmoid(predictions) # sigmoid to make sure all values are [0:1]
# Get the data from the Variables to avoid GPU memory leak
predictions, gold_labels = self.unwrap_to_tensors(predictions, gold_labels)
# Sanity check
if gold_labels.shape != predictions.shape:
raise ConfigurationError("gold_labels must have the same shape of predictions. "
"Found shapes {} and {}".format(gold_labels.shape, predictions.shape))
pred = predictions.numpy().ravel()
gold = gold_labels.numpy().ravel()
# udpate data of accurate computation of AP
self.predictions = np.append(self.predictions, pred)
self.gold_labels = np.append(self.gold_labels, gold)
# update data of fast computation of AP
idx = (self.bins - 1) - np.minimum((pred/self.bin_size).astype(int), self.bins - 1)
gold_uniq_idx, gold_idx_count = np.unique(idx[np.nonzero(gold)], return_counts=True)
self.correct_counts[gold_uniq_idx] = np.add(self.correct_counts[gold_uniq_idx], gold_idx_count)
uniq_idx, idx_count = np.unique(idx, return_counts=True)
self.total_counts[uniq_idx] = np.add(self.total_counts[uniq_idx], idx_count)
def _thresholded_average_precision_score(self, precision, recall):
if len(precision) == 0:
return 0, -1
index = np.argmin(abs(recall - self.recall_thr))
filtered_precision = precision[:index + 1]
filtered_recall = recall[:index + 1]
ap = np.sum(np.diff(np.insert(filtered_recall, 0, 0)) * filtered_precision)
return ap, index # index of the value with recall = self.recall_thr (useful for logging)
def get_metric(self, reset: bool = False):
"""
Returns average precision.
If reset=False, returns the fast AP.
If reset=True, returns accurate AP, logs difference ebtween accurate and fast AP and
logs a list of points on the precision-recall curve.
"""
correct_cumsum = np.cumsum(self.correct_counts)
precision = correct_cumsum / np.cumsum(self.total_counts)
recall = correct_cumsum / correct_cumsum[-1]
isfinite = np.isfinite(precision)
precision = precision[isfinite]
recall = recall[isfinite]
ap, index = self._thresholded_average_precision_score(precision, recall) # fast AP because of binned values
if reset:
fast_ap = ap
precision, recall, thresholds = precision_recall_curve(self.gold_labels, self.predictions)
# _thresholded_average_precision_score assumes precision is descending and recall is ascending
precision = precision[::-1]
recall = recall[::-1]
thresholds = thresholds[::-1]
ap, index = self._thresholded_average_precision_score(precision, recall) # accurate AP because of using all values
logger.info("Fast AP: %0.4f -- Accurate AP: %0.4f", fast_ap, ap)
if index >= len(thresholds):
logger.info("Index = %d but len(thresholds) = %d. Change index to point to the end of the list.",
index, len(thresholds))
index = len(thresholds) - 1
logger.info("at index %d/%d (top %%%0.4f) -- threshold: %0.4f",
index, len(self.gold_labels), 100.0 * index / len(self.gold_labels), thresholds[index])
# only keep the top predictions then reverse again for printing (to draw the AUC curve)
precision = precision[:index + 1][::-1]
recall = recall[:index + 1][::-1]
thresholds = thresholds[:index + 1][::-1]
next_step = thresholds[0]
step_size = 0.005
max_skip = int(len(thresholds) / 500)
skipped = 0
logger.info("Precision-Recall curve ... ")
logger.info("precision, recall, threshold")
for p, r, t in [x for x in zip(precision, recall, thresholds)]:
if t < next_step and skipped < max_skip:
skipped += 1
continue
skipped = 0
next_step += step_size
# logger.info("%0.4f, %0.4f, %0.4f", p, r, t)
self.reset()
return ap
@overrides
def reset(self):
self.predictions = np.array([])
self.gold_labels = np.array([])
self.correct_counts = np.array([0] * self.bins)
self.total_counts = np.array([0] * self.bins) | comb_dist_direct_relex-master | relex/multilabel_average_precision_metric.py |
from typing import Set, Tuple, List, Dict
import logging
import random
from collections import defaultdict
from overrides import overrides
import tqdm
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, ListField, MultiLabelField, SequenceLabelField, LabelField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.tokenizers.word_splitter import JustSpacesWordSplitter
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data import Token
log = logging.getLogger(__name__) # pylint: disable=invalid-name
NEGATIVE_RELATION_NAME = 'NA'
@DatasetReader.register("relation_instances")
class RelationInstancesReader(DatasetReader):
r"""DatasetReader to read a relation extraction dataset.
Each example is a pair of entities, bag (list) of sentences and a relation type. The sentences of each
bag should be listed consecutively in the dataset file.
File format: tab separated text file of 7 columns. They are:
entity1_id
entity2_id
entity1_text: can be NA because it is not used by the model
entity2_text: can be NA because it is not used by the model
relation_type: use NA to indicate No Relation
sentence: entity mentions are highlighted with <e1>entity1<\e1> and <e2>entity2<\e2>
supervision_type: "direct" or "distant"
The reader assumes that the sentences relevant to a pair of entities are all listed consecutively.
If the entity pair changes, the reader starts a new bag.
"""
max_distance = 30 # for position embeddings
max_sentence_length = 130 # words
def __init__(self, lazy: bool = False,
max_bag_size: int = 25,
negative_exampels_percentage: int = 100,
with_direct_supervision: bool = True) -> None:
"""
args:
lazy: lazy reading of the dataset
max_bag_size: maximum number of sentences per a bag
negative_exampels_percentage: percentage of negative examples to keep
with_direct_supervision: keep or ignore direct supervision examples
"""
super().__init__(lazy=lazy)
self.max_bag_size = max_bag_size
self.negative_exampels_percentage = negative_exampels_percentage
self.with_direct_supervision = with_direct_supervision
self._tokenizer = WordTokenizer(word_splitter=JustSpacesWordSplitter())
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
# for logging and input validation
self._inst_counts: Dict = defaultdict(int) # count instances per relation type
self._pairs: Set = set() # keep track of pairs of entities
self._bag_sizes: Dict = defaultdict(int) # count relation types per bag
self._relation_coocur: Dict = defaultdict(int) # count relation types per bag
self._failed_mentions_count: int = 0 # count mentions with wrong formating
self._count_direct_supervised_inst: int = 0
self._count_bag_labels: Dict = defaultdict(int)
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
log.info("Reading instances from lines in file at: %s", file_path)
self._inst_counts = defaultdict(int) # count instances per relation type
self._pairs = set() # keep track of pairs of entities
self._bag_sizes = defaultdict(int) # count relation types per bag
self._relation_coocur = defaultdict(int) # count relation types per bag
self._failed_mentions_count = 0
self._count_direct_supervised_inst: int = 0
self._count_bag_labels: Dict = defaultdict(int)
e1 = None
e2 = None
rels = None
mentions = None
# Lines are assumed to be sorted by entity1/entity2/relation_type
for _, line in enumerate(tqdm.tqdm(data_file.readlines())):
line = line.strip()
new_e1, new_e2, _, _, rel, m, supervision_type = line.strip().split("\t")
assert supervision_type in ['direct', 'distant']
if new_e1 != e1 or new_e2 != e2 or supervision_type == 'direct':
# new entity pair
if rels:
# subsample negative examples and sentence-level supervised examples
if random.randint(1, 100) <= self.negative_exampels_percentage or \
NEGATIVE_RELATION_NAME not in rels or \
supervision_type == 'direct': # pylint: disable=unsupported-membership-test
if not self.with_direct_supervision and supervision_type == 'direct':
pass
else:
inst = self.text_to_instance(e1, e2, rels, mentions, is_predict=False,
supervision_type=supervision_type)
if inst:
yield inst
e1 = new_e1
e2 = new_e2
rels = set([rel])
mentions = set([m])
else:
# same pair of entities, just add the relation and the mention
rels.add(rel)
mentions.add(m)
if rels:
if not self.with_direct_supervision and supervision_type == 'direct':
pass
else:
inst = self.text_to_instance(e1, e2, rels, mentions, is_predict=False, supervision_type=supervision_type)
if inst is not None:
yield inst
# log relation types and number of instances
for rel, cnt in sorted(self._inst_counts.items(), key=lambda x: -x[1]):
log.info("%s - %d", rel, cnt)
# log number of relations per bag
log.info("number of relations per bag size (bagsize -> relation count)")
for k, v in sorted(self._bag_sizes.items(), key=lambda x: -x[1]):
log.info("%s - %d", k, v)
for k, v in sorted(self._relation_coocur.items(), key=lambda x: -x[1]):
log.info("%s - %d", k, v)
@overrides
def text_to_instance(self, e1: str, e2: str, # pylint: disable=arguments-differ
rels: Set[str],
mentions: Set[str],
is_predict: bool,
supervision_type: str) -> Instance:
"""Construct an instance given text input.
is_predict: True if this is being called for prediction not training
supervision_type: direct or distant
"""
assert supervision_type in ['direct', 'distant']
if (e1, e2) in self._pairs and supervision_type == 'distant' and not is_predict:
assert False, "input file is not sorted, check entities %s, %s" % (e1, e2)
self._pairs.add((e1, e2))
for rel in rels:
self._inst_counts[rel] += 1 # keep track of number of instances in each relation type for logging
if NEGATIVE_RELATION_NAME in rels:
if len(rels) > 1:
log.error("Positive relations between entities can\'t include %s. "
"Found relation types: %s between entities %s and %s",
NEGATIVE_RELATION_NAME, rels, e1, e2)
rels.remove(NEGATIVE_RELATION_NAME)
self._bag_sizes[len(rels)] += 1
if len(rels) > 1:
rels_str = ", ".join(sorted(list(rels)))
self._relation_coocur[rels_str] += 1
filtered_mentions = list(mentions)[:self.max_bag_size] # limit number of mentions per bag
fields_list = []
for m in filtered_mentions:
try:
mention_fields = self._tokens_distances_fields(
self._tokenizer.tokenize(m)[:self.max_sentence_length]
)
fields_list.append(mention_fields)
except ValueError:
# ignore mentions with wrong entity tags
self._failed_mentions_count += 1
if self._failed_mentions_count % 1000 == 0:
log.error('Number of failed mentions: %d', self._failed_mentions_count)
if len(fields_list) == 0:
return None # instance with zero mentions (because all mentions failed)
mention_f, position1_f, position2_f = zip(*fields_list)
if len(rels) == 0:
bag_label = 0 # negative bag
elif supervision_type == 'direct':
bag_label = 1 # positive bag with sentence-level supervision
else:
bag_label = 2 # positive bag distantly supervised
self._count_bag_labels[bag_label] += 1
sent_labels = [LabelField(bag_label, skip_indexing=True)] * len(fields_list)
if supervision_type == 'direct':
is_direct_supervision_bag_field = TextField(self._tokenizer.tokenize(". ."), self._token_indexers)
self._count_direct_supervised_inst += 1
else:
is_direct_supervision_bag_field = TextField(self._tokenizer.tokenize("."), self._token_indexers)
fields = {"mentions": ListField(list(mention_f)),
"positions1": ListField(list(position1_f)),
"positions2": ListField(list(position2_f)),
"is_direct_supervision_bag": is_direct_supervision_bag_field,
"sent_labels": ListField(sent_labels), # 0: -ve, 1: directly supervised +ve, 2: distantly-supervised +ve
"labels": MultiLabelField(rels), # bag-level labels
}
return Instance(fields)
def _tokens_distances_fields(self, tokens):
"""Returns the updated list of tokens and entity distances for the first and second entity as fields."""
tokens, positions1, positions2 = self._tokens_distances(tokens)
t_f = TextField(tokens, self._token_indexers)
p1_f = SequenceLabelField(positions1, t_f)
p2_f = SequenceLabelField(positions2, t_f)
return t_f, p1_f, p2_f
def _tokens_distances(self, tokens):
e1_loc = []
e2_loc = []
while len(tokens) < 5: # a hack to make sure all sentences are at least 5 tokens. CNN breaks otherwise.
tokens.append(Token(text='.'))
for i, token in enumerate(tokens):
if token.text.startswith('<e1>'):
e1_loc.append((i, 'start'))
token.text = token.text[4:]
if token.text.endswith('</e1>'):
e1_loc.append((i, 'end'))
token.text = token.text[:-5]
if token.text.startswith('<e2>'):
e2_loc.append((i, 'start'))
token.text = token.text[4:]
if token.text.endswith('</e2>'):
e2_loc.append((i, 'end'))
token.text = token.text[:-5]
positions1 = self._positions(len(tokens), e1_loc)
positions2 = self._positions(len(tokens), e2_loc)
return tokens, positions1, positions2
def _positions(self, tokens_count: int, e_loc: List[Tuple[int, str]]):
# if the entity tags are missing, return a list of -1's
if not e_loc:
raise ValueError('entity tags are missing.')
prev_loc = (-10000000000, 'end') # large negative number
next_loc_index = 0
next_loc = e_loc[next_loc_index]
distance_list = []
for i in range(tokens_count):
if prev_loc[1] == 'end' and next_loc[1] == 'start':
# between two entities
to_min = [abs(i - prev_loc[0]), abs(i - next_loc[0])]
to_min.append(self.max_distance)
distance = min(to_min)
elif prev_loc[1] == 'start' and next_loc[1] == 'end':
# inside the same entity
distance = 0
else:
# malformed e_loc
distance = self.max_distance
distance_list.append(distance)
while i == next_loc[0]:
prev_loc = next_loc
next_loc_index += 1
if next_loc_index >= len(e_loc):
next_loc = (10000000000, 'start') # large positive number
else:
next_loc = e_loc[next_loc_index]
return distance_list | comb_dist_direct_relex-master | relex/relation_instances_reader.py |
from typing import Dict
import logging
from overrides import overrides
import torch
from torch import nn
import numpy as np
from allennlp.data import Vocabulary
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.metrics.average import Average
from allennlp.modules import TextFieldEmbedder
from relex.multilabel_average_precision_metric import MultilabelAveragePrecision
from relex.relation_instances_reader import RelationInstancesReader
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("comb_dist_direct_relex")
class CombDistDirectRelex(Model):
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
cnn_size: int = 100,
dropout_weight: float = 0.1,
with_entity_embeddings: bool = True ,
sent_loss_weight: float = 1,
attention_weight_fn: str = 'sigmoid',
attention_aggregation_fn: str = 'max') -> None:
regularizer = None
super().__init__(vocab, regularizer)
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_field_embedder = text_field_embedder
self.dropout_weight = dropout_weight
self.with_entity_embeddings = with_entity_embeddings
self.sent_loss_weight = sent_loss_weight
self.attention_weight_fn = attention_weight_fn
self.attention_aggregation_fn = attention_aggregation_fn
# instantiate position embedder
pos_embed_output_size = 5
pos_embed_input_size = 2 * RelationInstancesReader.max_distance + 1
self.pos_embed = nn.Embedding(pos_embed_input_size, pos_embed_output_size)
pos_embed_weights = np.array([range(pos_embed_input_size)] * pos_embed_output_size).T
self.pos_embed.weight = nn.Parameter(torch.Tensor(pos_embed_weights))
d = cnn_size
sent_encoder = CnnEncoder # TODO: should be moved to the config file
cnn_output_size = d
embedding_size = 300 # TODO: should be moved to the config file
# instantiate sentence encoder
self.cnn = sent_encoder(embedding_dim=(embedding_size + 2 * pos_embed_output_size), num_filters=cnn_size,
ngram_filter_sizes=(2, 3, 4, 5),
conv_layer_activation=torch.nn.ReLU(), output_dim=cnn_output_size)
# dropout after word embedding
self.dropout = nn.Dropout(p=self.dropout_weight)
# given a sentence, returns its unnormalized attention weight
self.attention_ff = nn.Sequential(
nn.Linear(cnn_output_size, d),
nn.ReLU(),
nn.Linear(d, 1)
)
self.ff_before_alpha = nn.Sequential(
nn.Linear(1, 50),
nn.ReLU(),
nn.Linear(50, 1),
)
ff_input_size = cnn_output_size
if self.with_entity_embeddings:
ff_input_size += embedding_size
# output layer
self.ff = nn.Sequential(
nn.Linear(ff_input_size, d),
nn.ReLU(),
nn.Linear(d, self.num_classes)
)
self.loss = torch.nn.BCEWithLogitsLoss() # sigmoid + binary cross entropy
self.metrics = {}
self.metrics['ap'] = MultilabelAveragePrecision() # average precision = AUC
self.metrics['bag_loss'] = Average() # to display bag-level loss
if self.sent_loss_weight > 0:
self.metrics['sent_loss'] = Average() # to display sentence-level loss
@overrides
def forward(self, # pylint: disable=arguments-differ
mentions: Dict[str, torch.LongTensor],
positions1: torch.LongTensor,
positions2: torch.LongTensor,
is_direct_supervision_bag: torch.LongTensor,
sent_labels: torch.LongTensor, # sentence-level labels
labels: torch.LongTensor # bag-level labels
) -> Dict[str, torch.Tensor]:
# is all instances in this batch directly or distantly supervised
is_direct_supervision_batch = bool(is_direct_supervision_bag['tokens'].shape[1] - 1)
if is_direct_supervision_bag['tokens'].shape[1] != 1:
direct_supervision_bags_count = sum(is_direct_supervision_bag['tokens'][:, -1] != 0).item()
# is it a mix of both ? this affects a single batch because of the sorting_keys in the bucket iterator
if direct_supervision_bags_count != len(is_direct_supervision_bag['tokens'][:, -1] != 0):
log.error("Mixed batch with %d supervised bags. Treated as dist. supervised", direct_supervision_bags_count)
tokens = mentions['tokens']
assert tokens.dim() == 3
batch_size = tokens.size(0)
padded_bag_size = tokens.size(1)
padded_sent_size = tokens.size(2)
mask = util.get_text_field_mask(mentions, num_wrapping_dims=1)
# embed text
t_embd = self.text_field_embedder(mentions)
# embed position information
p1_embd = self.pos_embed(positions1)
p2_embd = self.pos_embed(positions2)
# concatinate position emebddings to the word embeddings
# x.shape: batch_size x padded_bag_size x padded_sent_size x (text_embedding_size + 2 * position_embedding_size)
x = torch.cat([t_embd, p1_embd, p2_embd], dim=3)
if self.dropout_weight > 0:
x = self.dropout(x)
# merge the first two dimensions becase sentence encoder doesn't support the 4d input
x = x.view(batch_size * padded_bag_size, padded_sent_size, -1)
mask = mask.view(batch_size * padded_bag_size, -1)
# call sequence encoder
x = self.cnn(x, mask) # (batch_size * padded_bag_size) x cnn_output_size
# separate the first two dimensions back
x = x.view(batch_size, padded_bag_size, -1)
mask = mask.view(batch_size, padded_bag_size, -1)
# compute unnormalized attention weights, one scaler per sentence
alphas = self.attention_ff(x)
if self.sent_loss_weight > 0:
# compute sentence-level loss on the directly supervised data (if any)
sent_labels = sent_labels.unsqueeze(-1)
# `sent_labels != 2`: directly supervised examples and distantly supervised negative examples
sent_labels_mask = ((sent_labels != 2).long() * mask[:, :, [0]]).float()
sent_labels_masked_pred = sent_labels_mask * torch.sigmoid(alphas)
sent_labels_masked_goal = sent_labels_mask * sent_labels.float()
sent_loss = torch.nn.functional.binary_cross_entropy(sent_labels_masked_pred, sent_labels_masked_goal)
# apply a small FF to the attention weights
alphas = self.ff_before_alpha(alphas)
# normalize attention weights based on the selected weighting function
if self.attention_weight_fn == 'uniform':
alphas = mask[:, :, 0].float()
elif self.attention_weight_fn == 'softmax':
alphas = util.masked_softmax(alphas.squeeze(-1), mask[:, :, 0].float())
elif self.attention_weight_fn == 'sigmoid':
alphas = torch.sigmoid(alphas.squeeze(-1)) * mask[:, :, 0].float()
elif self.attention_weight_fn == 'norm_sigmoid': # equation 7 in https://arxiv.org/pdf/1805.02214.pdf
alphas = torch.sigmoid(alphas.squeeze(-1)) * mask[:, :, 0].float()
alphas = alphas / alphas.sum(dim=-1, keepdim=True)
else:
assert False
# Input:
# `x`: sentence encodings
# `alphas`: attention weights
# `attention_aggregation_fn`: aggregation function
# Output: bag encoding
if self.attention_aggregation_fn == 'max':
x = alphas.unsqueeze(-1) * x # weight sentences
x = x.max(dim=1)[0] # max pooling
elif self.attention_aggregation_fn == 'avg':
x = torch.bmm(alphas.unsqueeze(1), x).squeeze(1) # average pooling
else:
assert False
if self.with_entity_embeddings:
# actual bag_size (not padded_bag_size) for each instance in the batch
bag_size = mask[:, :, 0].sum(dim=1, keepdim=True).float()
e1_mask = (positions1 == 0).long() * mask
e1_embd = torch.matmul(e1_mask.unsqueeze(2).float(), t_embd)
e1_embd_sent_sum = e1_embd.squeeze(dim=2).sum(dim=1)
e1_embd_sent_avg = e1_embd_sent_sum / bag_size
e2_mask = (positions2 == 0).long() * mask
e2_embd = torch.matmul(e2_mask.unsqueeze(2).float(), t_embd)
e2_embd_sent_sum = e2_embd.squeeze(dim=2).sum(dim=1)
e2_embd_sent_avg = e2_embd_sent_sum / bag_size
e1_e2_mult = e1_embd_sent_avg * e2_embd_sent_avg
x = torch.cat([x, e1_e2_mult], dim=1)
logits = self.ff(x) # batch_size x self.num_classes
output_dict = {'logits': logits} # sigmoid is applied in the loss function and the metric class, not here
if labels is not None: # Training and evaluation
w = self.sent_loss_weight / (self.sent_loss_weight + 1)
one_minus_w = 1 - w # weight of the bag-level loss
if is_direct_supervision_batch and self.sent_loss_weight > 0:
one_minus_w = 0
loss = self.loss(logits, labels.squeeze(-1).type_as(logits)) * self.num_classes # scale the loss to be more readable
loss *= one_minus_w
self.metrics['bag_loss'](loss.item())
self.metrics['ap'](logits, labels.squeeze(-1))
if self.sent_loss_weight > 0:
sent_loss *= w
self.metrics['sent_loss'](sent_loss.item())
loss += sent_loss
output_dict['loss'] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
prob_thr = 0.6 # to ignore predicted labels with low prob.
probs = torch.sigmoid(output_dict['logits'])
output_dict['labels'] = []
for row in probs.cpu().data.numpy():
labels = []
for i, p in enumerate(row):
if p > prob_thr: # ignore predictions with low prob.
labels.append((self.vocab.get_token_from_index(i, namespace="labels"), float(p)))
# output_dict[self.vocab.get_token_from_index(i, namespace="labels")] = torch.Tensor([float(p)])
output_dict['labels'].append(labels)
del output_dict['loss']
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
| comb_dist_direct_relex-master | relex/comb_dist_direct_relex.py |
import os
from pathlib import Path
ABS_PATH_OF_TOP_LEVEL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
ABS_PATH_OF_DOCS_DIR = os.path.join(ABS_PATH_OF_TOP_LEVEL_DIR, "docs")
| allenact-main | constants.py |
#!/usr/bin/env python3
"""Entry point to training/validating/testing for a user given experiment
name."""
import allenact.main
if __name__ == "__main__":
allenact.main.main()
| allenact-main | main.py |
allenact-main | projects/__init__.py |
|
allenact-main | projects/gym_baselines/__init__.py |
|
from abc import ABC
from typing import Dict, Sequence, Optional, List, Any
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.sensor import Sensor
class GymBaseConfig(ExperimentConfig, ABC):
SENSORS: Optional[Sequence[Sensor]] = None
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
raise NotImplementedError
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(
process_ind=process_ind, mode="train", seeds=seeds
)
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(
process_ind=process_ind, mode="valid", seeds=seeds
)
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="test", seeds=seeds)
| allenact-main | projects/gym_baselines/experiments/gym_base.py |
from abc import ABC
from typing import Dict, Any
from allenact.utils.viz_utils import VizSuite, AgentViewViz
from projects.gym_baselines.experiments.gym_base import GymBaseConfig
class GymHumanoidBaseConfig(GymBaseConfig, ABC):
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
visualizer = None
if mode == "test":
visualizer = VizSuite(
mode=mode,
video_viz=AgentViewViz(
label="episode_vid",
max_clip_length=400,
vector_task_source=("render", {"mode": "rgb_array"}),
fps=30,
),
)
return {
"nprocesses": 8 if mode == "train" else 1, # rollout
"devices": [],
"visualizer": visualizer,
}
| allenact-main | projects/gym_baselines/experiments/gym_humanoid_base.py |
from abc import ABC
from typing import Dict, Any
from allenact.utils.viz_utils import VizSuite, AgentViewViz
from projects.gym_baselines.experiments.gym_base import GymBaseConfig
class GymMoJoCoBaseConfig(GymBaseConfig, ABC):
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
visualizer = None
if mode == "test":
visualizer = VizSuite(
mode=mode,
video_viz=AgentViewViz(
label="episode_vid",
max_clip_length=400,
vector_task_source=("render", {"mode": "rgb_array"}),
fps=30,
),
)
return {
"nprocesses": 8 if mode == "train" else 1, # rollout
"devices": [],
"visualizer": visualizer,
}
| allenact-main | projects/gym_baselines/experiments/gym_mujoco_base.py |
from abc import ABC
from typing import cast
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.ppo import PPO
from allenact.utils.experiment_utils import (
TrainingPipeline,
Builder,
PipelineStage,
LinearDecay,
)
from projects.gym_baselines.experiments.gym_humanoid_base import GymHumanoidBaseConfig
class GymHumanoidPPOConfig(GymHumanoidBaseConfig, ABC):
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
lr = 1e-4
ppo_steps = int(8e7) # convergence may be after 1e8
clip_param = 0.1
value_loss_coef = 0.5
entropy_coef = 0.0
num_mini_batch = 4 # optimal 64
update_repeats = 10
max_grad_norm = 0.5
num_steps = 2048
gamma = 0.99
use_gae = True
gae_lambda = 0.95
advance_scene_rollout_period = None
save_interval = 200000
metric_accumulate_interval = 50000
return TrainingPipeline(
named_losses=dict(
ppo_loss=PPO(
clip_param=clip_param,
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
),
), # type:ignore
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps),
],
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=advance_scene_rollout_period,
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
lr_scheduler_builder=Builder(
LambdaLR,
{
"lr_lambda": LinearDecay(steps=ppo_steps, startp=1, endp=1)
}, # constant learning rate
),
)
| allenact-main | projects/gym_baselines/experiments/gym_humanoid_ddppo.py |
allenact-main | projects/gym_baselines/experiments/__init__.py |
|
from abc import ABC
from typing import cast
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.ppo import PPO
from allenact.utils.experiment_utils import (
TrainingPipeline,
Builder,
PipelineStage,
LinearDecay,
)
from projects.gym_baselines.experiments.gym_mujoco_base import GymMoJoCoBaseConfig
class GymMuJoCoPPOConfig(GymMoJoCoBaseConfig, ABC):
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
lr = 3e-4
ppo_steps = int(3e7)
clip_param = 0.2
value_loss_coef = 0.5
entropy_coef = 0.0
num_mini_batch = 4 # optimal 64
update_repeats = 10
max_grad_norm = 0.5
num_steps = 2048
gamma = 0.99
use_gae = True
gae_lambda = 0.95
advance_scene_rollout_period = None
save_interval = 200000
metric_accumulate_interval = 50000
return TrainingPipeline(
named_losses=dict(
ppo_loss=PPO(
clip_param=clip_param,
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
),
), # type:ignore
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps),
],
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=advance_scene_rollout_period,
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps, startp=1, endp=0)},
),
)
| allenact-main | projects/gym_baselines/experiments/gym_mujoco_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoSwimmerConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Swimmer-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (2,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Swimmer-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Swimmer-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Swimmer-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_swimmer_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoReacherConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Reacher-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (2,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Reacher-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Reacher-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Reacher-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_reacher_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoWalkerConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Walker2d-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (6,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Walker2d-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Walker2d-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Walker2d-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_walker2d_ddppo.py |
allenact-main | projects/gym_baselines/experiments/mujoco/__init__.py |
|
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoHalfCheetahConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="HalfCheetah-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (6,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="HalfCheetah-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["HalfCheetah-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-HalfCheetah-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_halfcheetah_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_humanoid_ddppo import GymHumanoidPPOConfig
class GymMuJoCoHumanoidConfig(GymHumanoidPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Humanoid-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(
-0.4000000059604645, 0.4000000059604645, (17,), "float32"
)
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Humanoid-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Humanoid-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Humanoid-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_humanoid_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoInvertedDoublePendulumConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(
gym_env_name="InvertedDoublePendulum-v2", uuid="gym_mujoco_data"
),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (1,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="InvertedDoublePendulum-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["InvertedDoublePendulum-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-InvertedDoublePendulum-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_inverteddoublependulum_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoAntConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Ant-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-3.0, 3.0, (8,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Ant-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Ant-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Ant-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_ant_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoHopperConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Hopper-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (3,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Hopper-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Hopper-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Hopper-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_hopper_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoInvertedPendulumConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="InvertedPendulum-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-3.0, 3.0, (1,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="InvertedPendulum-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["InvertedPendulum-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-InvertedPendulum-v2-PPO"
| allenact-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_invertedpendulum_ddppo.py |
allenact-main | projects/gym_baselines/models/__init__.py |
|
"""
Note: I add this file just for the format consistence with other baselines in the project, so it is just the same as
`allenact_plugins.gym_models.py` so far. However, if it is in the Gym Robotics, some modification is need.
For example, for `state_dim`:
if input_uuid == 'gym_robotics_data':
# consider that the observation space is Dict for robotics env
state_dim = observation_space[self.input_uuid]['observation'].shape[0]
else:
assert len(observation_space[self.input_uuid].shape) == 1
state_dim = observation_space[self.input_uuid].shape[0]
"""
| allenact-main | projects/gym_baselines/models/gym_models.py |
from typing import Sequence, Union, Optional, Dict, Tuple, Type
import attr
import gym
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torchvision import models
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.embodiedai.aux_losses.losses import (
InverseDynamicsLoss,
TemporalDistanceLoss,
CPCA1Loss,
CPCA2Loss,
CPCA4Loss,
CPCA8Loss,
CPCA16Loss,
MultiAuxTaskNegEntropyLoss,
CPCA1SoftMaxLoss,
CPCA2SoftMaxLoss,
CPCA4SoftMaxLoss,
CPCA8SoftMaxLoss,
CPCA16SoftMaxLoss,
)
from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.experiment_utils import (
Builder,
TrainingPipeline,
PipelineStage,
LinearDecay,
)
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.navigation_plugin.objectnav.models import (
ResnetTensorNavActorCritic,
ObjectNavActorCritic,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
@attr.s(kw_only=True)
class ResNetPreprocessGRUActorCriticMixin:
sensors: Sequence[Sensor] = attr.ib()
resnet_type: str = attr.ib()
screen_size: int = attr.ib()
goal_sensor_type: Type[Sensor] = attr.ib()
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
preprocessors = []
if self.resnet_type in ["RN18", "RN34"]:
output_shape = (512, 7, 7)
elif self.resnet_type in ["RN50", "RN101", "RN152"]:
output_shape = (2048, 7, 7)
else:
raise NotImplementedError(
f"`RESNET_TYPE` must be one 'RNx' with x equaling one of"
f" 18, 34, 50, 101, or 152."
)
rgb_sensor = next((s for s in self.sensors if isinstance(s, RGBSensor)), None)
if rgb_sensor is not None:
preprocessors.append(
ResNetPreprocessor(
input_height=self.screen_size,
input_width=self.screen_size,
output_width=output_shape[2],
output_height=output_shape[1],
output_dims=output_shape[0],
pool=False,
torchvision_resnet_model=getattr(
models, f"resnet{self.resnet_type.replace('RN', '')}"
),
input_uuids=[rgb_sensor.uuid],
output_uuid="rgb_resnet_imagenet",
)
)
depth_sensor = next(
(s for s in self.sensors if isinstance(s, DepthSensor)), None
)
if depth_sensor is not None:
preprocessors.append(
ResNetPreprocessor(
input_height=self.screen_size,
input_width=self.screen_size,
output_width=output_shape[2],
output_height=output_shape[1],
output_dims=output_shape[0],
pool=False,
torchvision_resnet_model=getattr(
models, f"resnet{self.resnet_type.replace('RN', '')}"
),
input_uuids=[depth_sensor.uuid],
output_uuid="depth_resnet_imagenet",
)
)
return preprocessors
def create_model(self, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in self.sensors)
has_depth = any(isinstance(s, DepthSensor) for s in self.sensors)
goal_sensor_uuid = next(
(s.uuid for s in self.sensors if isinstance(s, self.goal_sensor_type)),
None,
)
return ResnetTensorNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_resnet_imagenet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_resnet_imagenet"
if has_depth
else None,
hidden_size=512,
goal_dims=32,
)
@attr.s(kw_only=True)
class ObjectNavUnfrozenResNetWithGRUActorCriticMixin:
backbone: str = attr.ib()
sensors: Sequence[Sensor] = attr.ib()
auxiliary_uuids: Sequence[str] = attr.ib()
add_prev_actions: bool = attr.ib()
multiple_beliefs: bool = attr.ib()
belief_fusion: Optional[str] = attr.ib()
def create_model(self, **kwargs) -> nn.Module:
rgb_uuid = next(
(s.uuid for s in self.sensors if isinstance(s, RGBSensor)), None
)
depth_uuid = next(
(s.uuid for s in self.sensors if isinstance(s, DepthSensor)), None
)
goal_sensor_uuid = next(
(s.uuid for s in self.sensors if isinstance(s, GoalObjectTypeThorSensor))
)
return ObjectNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
goal_sensor_uuid=goal_sensor_uuid,
hidden_size=192
if self.multiple_beliefs and len(self.auxiliary_uuids) > 1
else 512,
backbone=self.backbone,
resnet_baseplanes=32,
object_type_embedding_dim=32,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=self.add_prev_actions,
action_embed_size=6,
auxiliary_uuids=self.auxiliary_uuids,
multiple_beliefs=self.multiple_beliefs,
beliefs_fusion=self.belief_fusion,
)
class ObjectNavDAggerMixin:
@staticmethod
def training_pipeline(
advance_scene_rollout_period: Optional[int] = None,
) -> TrainingPipeline:
training_steps = int(300000000)
tf_steps = int(5e6)
anneal_steps = int(5e6)
il_no_tf_steps = training_steps - tf_steps - anneal_steps
assert il_no_tf_steps > 0
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"imitation_loss": Imitation(),},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=advance_scene_rollout_period,
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=tf_steps,
teacher_forcing=LinearDecay(startp=1.0, endp=1.0, steps=tf_steps,),
),
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=anneal_steps + il_no_tf_steps,
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=anneal_steps,
),
),
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=training_steps)},
),
)
def update_with_auxiliary_losses(
named_losses: Dict[str, Tuple[AbstractActorCriticLoss, float]],
auxiliary_uuids: Sequence[str],
multiple_beliefs: bool,
) -> Dict[str, Tuple[AbstractActorCriticLoss, float]]:
# auxliary losses
aux_loss_total_weight = 2.0
# Total losses
total_aux_losses: Dict[str, Tuple[AbstractActorCriticLoss, float]] = {
InverseDynamicsLoss.UUID: (
InverseDynamicsLoss(
subsample_rate=0.2, subsample_min_num=10, # TODO: test its effects
),
0.05 * aux_loss_total_weight, # should times 2
),
TemporalDistanceLoss.UUID: (
TemporalDistanceLoss(
num_pairs=8, epsiode_len_min=5, # TODO: test its effects
),
0.2 * aux_loss_total_weight, # should times 2
),
CPCA1Loss.UUID: (
CPCA1Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA2Loss.UUID: (
CPCA2Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA4Loss.UUID: (
CPCA4Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA8Loss.UUID: (
CPCA8Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA16Loss.UUID: (
CPCA16Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA1SoftMaxLoss.UUID: (
CPCA1SoftMaxLoss(subsample_rate=1.0,),
0.05 * aux_loss_total_weight, # should times 2
),
CPCA2SoftMaxLoss.UUID: (
CPCA2SoftMaxLoss(subsample_rate=1.0,),
0.05 * aux_loss_total_weight, # should times 2
),
CPCA4SoftMaxLoss.UUID: (
CPCA4SoftMaxLoss(subsample_rate=1.0,),
0.05 * aux_loss_total_weight, # should times 2
),
CPCA8SoftMaxLoss.UUID: (
CPCA8SoftMaxLoss(subsample_rate=1.0,),
0.05 * aux_loss_total_weight, # should times 2
),
CPCA16SoftMaxLoss.UUID: (
CPCA16SoftMaxLoss(subsample_rate=1.0,),
0.05 * aux_loss_total_weight, # should times 2
),
}
named_losses.update({uuid: total_aux_losses[uuid] for uuid in auxiliary_uuids})
if multiple_beliefs: # add weight entropy loss automatically
named_losses[MultiAuxTaskNegEntropyLoss.UUID] = (
MultiAuxTaskNegEntropyLoss(auxiliary_uuids),
0.01,
)
return named_losses
class ObjectNavPPOMixin:
@staticmethod
def training_pipeline(
auxiliary_uuids: Sequence[str],
multiple_beliefs: bool,
normalize_advantage: bool = True,
advance_scene_rollout_period: Optional[int] = None,
lr=3e-4,
num_mini_batch=1,
update_repeats=4,
num_steps=128,
save_interval=5000000,
log_interval=10000 if torch.cuda.is_available() else 1,
gamma=0.99,
use_gae=True,
gae_lambda=0.95,
max_grad_norm=0.5,
anneal_lr: bool = True,
extra_losses: Optional[Dict[str, Tuple[AbstractActorCriticLoss, float]]] = None,
) -> TrainingPipeline:
ppo_steps = int(300000000)
named_losses = {
"ppo_loss": (
PPO(**PPOConfig, normalize_advantage=normalize_advantage),
1.0,
),
**({} if extra_losses is None else extra_losses),
}
named_losses = update_with_auxiliary_losses(
named_losses=named_losses,
auxiliary_uuids=auxiliary_uuids,
multiple_beliefs=multiple_beliefs,
)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=advance_scene_rollout_period,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
)
if anneal_lr
else None,
)
| allenact-main | projects/objectnav_baselines/mixins.py |
allenact-main | projects/objectnav_baselines/__init__.py |
|
allenact-main | projects/objectnav_baselines/experiments/__init__.py |
|
import glob
import os
import platform
from abc import ABC
from math import ceil
from typing import Dict, Any, List, Optional, Sequence, Tuple, cast
import ai2thor
import ai2thor.build
import gym
import numpy as np
import torch
from packaging import version
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import evenly_distribute_count_into_bins
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_util import (
horizontal_to_vertical_fov,
get_open_x_displays,
)
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
ObjectNavDatasetTaskSampler,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
if (
ai2thor.__version__ not in ["0.0.1", None]
and not ai2thor.__version__.startswith("0+")
and version.parse(ai2thor.__version__) < version.parse("3.2.0")
):
raise ImportError(
"To run the AI2-THOR ObjectNav baseline experiments you must use"
" ai2thor version 3.2.0 or higher."
)
import ai2thor.platform
class ObjectNavThorBaseConfig(ObjectNavBaseConfig, ABC):
"""The base config for all AI2-THOR ObjectNav experiments."""
DEFAULT_NUM_TRAIN_PROCESSES: Optional[int] = None
DEFAULT_TRAIN_GPU_IDS = tuple(range(torch.cuda.device_count()))
DEFAULT_VALID_GPU_IDS = (torch.cuda.device_count() - 1,)
DEFAULT_TEST_GPU_IDS = (torch.cuda.device_count() - 1,)
TRAIN_DATASET_DIR: Optional[str] = None
VAL_DATASET_DIR: Optional[str] = None
TEST_DATASET_DIR: Optional[str] = None
AGENT_MODE = "default"
TARGET_TYPES: Optional[Sequence[str]] = None
THOR_COMMIT_ID: Optional[str] = None
DEFAULT_THOR_IS_HEADLESS: bool = False
ACTION_SPACE = gym.spaces.Discrete(len(ObjectNavTask.class_action_names()))
def __init__(
self,
num_train_processes: Optional[int] = None,
num_test_processes: Optional[int] = None,
test_on_validation: bool = False,
train_gpu_ids: Optional[Sequence[int]] = None,
val_gpu_ids: Optional[Sequence[int]] = None,
test_gpu_ids: Optional[Sequence[int]] = None,
randomize_train_materials: bool = False,
headless: bool = False,
):
super().__init__()
def v_or_default(v, default):
return v if v is not None else default
self.num_train_processes = v_or_default(
num_train_processes, self.DEFAULT_NUM_TRAIN_PROCESSES
)
self.num_test_processes = v_or_default(
num_test_processes, (10 if torch.cuda.is_available() else 1)
)
self.test_on_validation = test_on_validation
self.train_gpu_ids = v_or_default(train_gpu_ids, self.DEFAULT_TRAIN_GPU_IDS)
self.val_gpu_ids = v_or_default(val_gpu_ids, self.DEFAULT_VALID_GPU_IDS)
self.test_gpu_ids = v_or_default(test_gpu_ids, self.DEFAULT_TEST_GPU_IDS)
self.headless = v_or_default(headless, self.DEFAULT_THOR_IS_HEADLESS)
self.sampler_devices = self.train_gpu_ids
self.randomize_train_materials = randomize_train_materials
def env_args(self):
assert self.THOR_COMMIT_ID is not None
return dict(
width=self.CAMERA_WIDTH,
height=self.CAMERA_HEIGHT,
commit_id=self.THOR_COMMIT_ID
if not self.headless
else ai2thor.build.COMMIT_ID,
stochastic=True,
continuousMode=True,
applyActionNoise=self.STOCHASTIC,
rotateStepDegrees=self.ROTATION_DEGREES,
visibilityDistance=self.VISIBILITY_DISTANCE,
gridSize=self.STEP_SIZE,
snapToGrid=False,
agentMode=self.AGENT_MODE,
fieldOfView=horizontal_to_vertical_fov(
horizontal_fov_in_degrees=self.HORIZONTAL_FIELD_OF_VIEW,
width=self.CAMERA_WIDTH,
height=self.CAMERA_HEIGHT,
),
include_private_scenes=False,
renderDepthImage=any(isinstance(s, DepthSensorThor) for s in self.SENSORS),
)
def machine_params(self, mode="train", **kwargs):
sampler_devices: Sequence[torch.device] = []
devices: Sequence[torch.device]
if mode == "train":
workers_per_device = 1
devices = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else cast(Tuple, self.train_gpu_ids) * workers_per_device
)
nprocesses = evenly_distribute_count_into_bins(
self.num_train_processes, max(len(devices), 1)
)
sampler_devices = self.sampler_devices
elif mode == "valid":
nprocesses = 1
devices = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else self.val_gpu_ids
)
elif mode == "test":
devices = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else self.test_gpu_ids
)
nprocesses = evenly_distribute_count_into_bins(
self.num_test_processes, max(len(devices), 1)
)
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
sensors = [*self.SENSORS]
if mode != "train":
sensors = [s for s in sensors if not isinstance(s, ExpertActionSensor)]
sensor_preprocessor_graph = (
SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(sensors).observation_spaces,
preprocessors=self.preprocessors(),
)
if mode == "train"
or (
(isinstance(nprocesses, int) and nprocesses > 0)
or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)
)
else None
)
return MachineParams(
nprocesses=nprocesses,
devices=devices,
sampler_devices=sampler_devices
if mode == "train"
else devices, # ignored with > 1 gpu_ids
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return ObjectNavDatasetTaskSampler(**kwargs)
@staticmethod
def _partition_inds(n: int, num_parts: int):
return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(
np.int32
)
def _get_sampler_args_for_scene_split(
self,
scenes_dir: str,
process_ind: int,
total_processes: int,
devices: Optional[List[int]],
seeds: Optional[List[int]],
deterministic_cudnn: bool,
include_expert_sensor: bool = True,
allow_oversample: bool = False,
) -> Dict[str, Any]:
path = os.path.join(scenes_dir, "*.json.gz")
scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)]
if len(scenes) == 0:
raise RuntimeError(
(
"Could find no scene dataset information in directory {}."
" Are you sure you've downloaded them? "
" If not, see https://allenact.org/installation/download-datasets/ information"
" on how this can be done."
).format(scenes_dir)
)
oversample_warning = (
f"Warning: oversampling some of the scenes ({scenes}) to feed all processes ({total_processes})."
" You can avoid this by setting a number of workers divisible by the number of scenes"
)
if total_processes > len(scenes): # oversample some scenes -> bias
if not allow_oversample:
raise RuntimeError(
f"Cannot have `total_processes > len(scenes)`"
f" ({total_processes} > {len(scenes)}) when `allow_oversample` is `False`."
)
if total_processes % len(scenes) != 0:
get_logger().warning(oversample_warning)
scenes = scenes * int(ceil(total_processes / len(scenes)))
scenes = scenes[: total_processes * (len(scenes) // total_processes)]
elif len(scenes) % total_processes != 0:
get_logger().warning(oversample_warning)
inds = self._partition_inds(len(scenes), total_processes)
if not self.headless:
x_display: Optional[str] = None
if platform.system() == "Linux":
x_displays = get_open_x_displays(throw_error_if_empty=True)
if len([d for d in devices if d != torch.device("cpu")]) > len(
x_displays
):
get_logger().warning(
f"More GPU devices found than X-displays (devices: `{x_displays}`, x_displays: `{x_displays}`)."
f" This is not necessarily a bad thing but may mean that you're not using GPU memory as"
f" efficiently as possible. Consider following the instructions here:"
f" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin"
f" describing how to start an X-display on every GPU."
)
x_display = x_displays[process_ind % len(x_displays)]
device_dict = dict(x_display=x_display)
else:
device_dict = dict(
gpu_device=devices[process_ind % len(devices)],
platform=ai2thor.platform.CloudRendering,
)
return {
"scenes": scenes[inds[process_ind] : inds[process_ind + 1]],
"object_types": self.TARGET_TYPES,
"max_steps": self.MAX_STEPS,
"sensors": [
s
for s in self.SENSORS
if (include_expert_sensor or not isinstance(s, ExpertActionSensor))
],
"action_space": self.ACTION_SPACE,
"seed": seeds[process_ind] if seeds is not None else None,
"deterministic_cudnn": deterministic_cudnn,
"rewards_config": self.REWARD_CONFIG,
"env_args": {**self.env_args(), **device_dict},
}
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.TRAIN_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
allow_oversample=True,
)
res["scene_directory"] = self.TRAIN_DATASET_DIR
res["loop_dataset"] = True
res["allow_flipping"] = True
res["randomize_materials_in_training"] = self.randomize_train_materials
return res
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.VAL_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=False,
allow_oversample=False,
)
res["scene_directory"] = self.VAL_DATASET_DIR
res["loop_dataset"] = False
return res
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
if self.test_on_validation or self.TEST_DATASET_DIR is None:
if not self.test_on_validation:
get_logger().warning(
"`test_on_validation` is set to `True` and thus we will run evaluation on the validation set instead."
" Be careful as the saved metrics json and tensorboard files **will still be labeled as"
" 'test' rather than 'valid'**."
)
else:
get_logger().warning(
"No test dataset dir detected, running test on validation set instead."
" Be careful as the saved metrics json and tensorboard files *will still be labeled as"
" 'test' rather than 'valid'**."
)
return self.valid_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
else:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.TEST_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=False,
allow_oversample=False,
)
res["env_args"]["all_metadata_available"] = False
res["rewards_config"] = {**res["rewards_config"], "shaping_weight": 0}
res["scene_directory"] = self.TEST_DATASET_DIR
res["loop_dataset"] = False
return res
| allenact-main | projects/objectnav_baselines/experiments/objectnav_thor_base.py |
from abc import ABC
from typing import Optional, Sequence, Union
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import Builder
class ObjectNavBaseConfig(ExperimentConfig, ABC):
"""The base object navigation configuration file."""
STEP_SIZE = 0.25
ROTATION_DEGREES = 30.0
VISIBILITY_DISTANCE = 1.0
STOCHASTIC = True
HORIZONTAL_FIELD_OF_VIEW = 79
CAMERA_WIDTH = 400
CAMERA_HEIGHT = 300
SCREEN_SIZE = 224
MAX_STEPS = 500
ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
SENSORS: Sequence[Sensor] = []
def __init__(self):
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"failed_stop_reward": 0.0,
"shaping_weight": 1.0,
}
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return tuple()
| allenact-main | projects/objectnav_baselines/experiments/objectnav_base.py |
from typing import Sequence, Union
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.experiment_utils import Builder, TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ResNetPreprocessGRUActorCriticMixin,
ObjectNavPPOMixin,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGBD
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
DepthSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin(
sensors=self.SENSORS,
resnet_type="RN18",
screen_size=self.SCREEN_SIZE,
goal_sensor_type=GoalObjectTypeThorSensor,
)
def training_pipeline(self, **kwargs) -> TrainingPipeline:
return ObjectNavPPOMixin.training_pipeline(
auxiliary_uuids=[],
multiple_beliefs=False,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
)
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return self.preprocessing_and_model.preprocessors()
def create_model(self, **kwargs) -> nn.Module:
return self.preprocessing_and_model.create_model(
num_actions=self.ACTION_SPACE.n, **kwargs
)
def tag(self):
return "ObjectNav-RoboTHOR-RGBD-ResNet18GRU-DDPPO"
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgbd_resnet18gru_ddppo.py |
from typing import Sequence, Union
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.experiment_utils import Builder, TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_sensors import (
GoalObjectTypeThorSensor,
RGBSensorThor,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ResNetPreprocessGRUActorCriticMixin,
ObjectNavPPOMixin,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin(
sensors=self.SENSORS,
resnet_type="RN50",
screen_size=self.SCREEN_SIZE,
goal_sensor_type=GoalObjectTypeThorSensor,
)
def training_pipeline(self, **kwargs) -> TrainingPipeline:
return ObjectNavPPOMixin.training_pipeline(
auxiliary_uuids=[],
multiple_beliefs=False,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
)
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return self.preprocessing_and_model.preprocessors()
def create_model(self, **kwargs) -> nn.Module:
return self.preprocessing_and_model.create_model(
num_actions=self.ACTION_SPACE.n, **kwargs
)
def tag(self):
return "ObjectNav-RoboTHOR-RGB-ResNet50GRU-DDPPO"
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnet50gru_ddppo.py |
import os
from abc import ABC
from typing import Optional, List, Any, Dict
import torch
from allenact.utils.misc_utils import prepare_locals_for_super
from projects.objectnav_baselines.experiments.objectnav_thor_base import (
ObjectNavThorBaseConfig,
)
class ObjectNavRoboThorBaseConfig(ObjectNavThorBaseConfig, ABC):
"""The base config for all RoboTHOR ObjectNav experiments."""
THOR_COMMIT_ID = "bad5bc2b250615cb766ffb45d455c211329af17e"
THOR_COMMIT_ID_FOR_RAND_MATERIALS = "9549791ce2e7f472063a10abb1fb7664159fec23"
AGENT_MODE = "locobot"
DEFAULT_NUM_TRAIN_PROCESSES = 60 if torch.cuda.is_available() else 1
TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-objectnav/train")
VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-objectnav/val")
TEST_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-objectnav/test")
TARGET_TYPES = tuple(
sorted(
[
"AlarmClock",
"Apple",
"BaseballBat",
"BasketBall",
"Bowl",
"GarbageCan",
"HousePlant",
"Laptop",
"Mug",
"SprayBottle",
"Television",
"Vase",
]
)
)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
kwargs = super(ObjectNavRoboThorBaseConfig, self).train_task_sampler_args(
**prepare_locals_for_super(locals())
)
if self.randomize_train_materials:
kwargs["env_args"]["commit_id"] = self.THOR_COMMIT_ID_FOR_RAND_MATERIALS
return kwargs
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_base.py |
from typing import Sequence, Union
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.experiment_utils import Builder, TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_sensors import (
GoalObjectTypeThorSensor,
RGBSensorThor,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ResNetPreprocessGRUActorCriticMixin,
ObjectNavPPOMixin,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin(
sensors=self.SENSORS,
resnet_type="RN18",
screen_size=self.SCREEN_SIZE,
goal_sensor_type=GoalObjectTypeThorSensor,
)
def training_pipeline(self, **kwargs) -> TrainingPipeline:
return ObjectNavPPOMixin.training_pipeline(
auxiliary_uuids=[],
multiple_beliefs=False,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
)
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return self.preprocessing_and_model.preprocessors()
def create_model(self, **kwargs) -> nn.Module:
return self.preprocessing_and_model.create_model(
num_actions=self.ACTION_SPACE.n, **kwargs
)
@classmethod
def tag(cls):
return "ObjectNav-RoboTHOR-RGB-ResNet18GRU-DDPPO"
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnet18gru_ddppo.py |
allenact-main | projects/objectnav_baselines/experiments/robothor/__init__.py |
|
from typing import Sequence, Union
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import ExpertActionSensor
from allenact.utils.experiment_utils import Builder, TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_sensors import (
GoalObjectTypeThorSensor,
RGBSensorThor,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ResNetPreprocessGRUActorCriticMixin,
ObjectNavDAggerMixin,
)
class ObjectNavRoboThorRGBDAggerExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()),),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.REWARD_CONFIG["shaping"] = 0
self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin(
sensors=self.SENSORS,
resnet_type="RN18",
screen_size=self.SCREEN_SIZE,
goal_sensor_type=GoalObjectTypeThorSensor,
)
def training_pipeline(self, **kwargs) -> TrainingPipeline:
return ObjectNavDAggerMixin.training_pipeline(
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
)
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return self.preprocessing_and_model.preprocessors()
def create_model(self, **kwargs) -> nn.Module:
return self.preprocessing_and_model.create_model(
num_actions=self.ACTION_SPACE.n, **kwargs
)
@classmethod
def tag(cls):
return "ObjectNav-RoboTHOR-RGB-ResNet18GRU-DAgger"
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnet18gru_dagger.py |
import torch.nn as nn
from allenact.utils.experiment_utils import TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ObjectNavUnfrozenResNetWithGRUActorCriticMixin,
ObjectNavPPOMixin,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGB input
without preprocessing by frozen ResNet (instead, a trainable ResNet)."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model_creation_handler = ObjectNavUnfrozenResNetWithGRUActorCriticMixin(
backbone="gnresnet18",
sensors=self.SENSORS,
auxiliary_uuids=[],
add_prev_actions=True,
multiple_beliefs=False,
belief_fusion=None,
)
def training_pipeline(self, **kwargs) -> TrainingPipeline:
return ObjectNavPPOMixin.training_pipeline(
auxiliary_uuids=[],
multiple_beliefs=False,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
)
def create_model(self, **kwargs) -> nn.Module:
return self.model_creation_handler.create_model(**kwargs)
def tag(self):
return "ObjectNav-RoboTHOR-RGB-UnfrozenResNet18GRU-DDPPO"
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_unfrozenresnet18gru_ddppo.py |
from typing import Sequence, Union
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.experiment_utils import Builder, TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ResNetPreprocessGRUActorCriticMixin,
ObjectNavPPOMixin,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with Depth
input."""
SENSORS = (
DepthSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin(
sensors=self.SENSORS,
resnet_type="RN18",
screen_size=self.SCREEN_SIZE,
goal_sensor_type=GoalObjectTypeThorSensor,
)
def training_pipeline(self, **kwargs) -> TrainingPipeline:
return ObjectNavPPOMixin.training_pipeline(
auxiliary_uuids=[],
multiple_beliefs=False,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
)
def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return self.preprocessing_and_model.preprocessors()
def create_model(self, **kwargs) -> nn.Module:
return self.preprocessing_and_model.create_model(
num_actions=self.ACTION_SPACE.n, **kwargs
)
def tag(self):
return "ObjectNav-RoboTHOR-Depth-ResNet18GRU-DDPPO"
| allenact-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_depth_resnet18gru_ddppo.py |
allenact-main | projects/objectnav_baselines/experiments/robothor/beta/__init__.py |
|
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.grouped_action_imitation import (
GroupedActionImitation,
)
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.ithor_plugin.ithor_sensors import TakeEndActionThorNavSensor
from allenact_plugins.robothor_plugin import robothor_constants
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import ResNetPreprocessGRUActorCriticMixin
class ObjectNavRoboThorResNet18GRURGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = ( # type:ignore
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
TakeEndActionThorNavSensor(
nactions=len(ObjectNavTask.class_action_names()), uuid="expert_group_action"
),
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin(
sensors=self.SENSORS,
resnet_type="RN18",
screen_size=self.SCREEN_SIZE,
goal_sensor_type=GoalObjectTypeThorSensor,
)
def preprocessors(self):
return self.preprocessing_and_model.preprocessors()
def create_model(self, **kwargs):
return self.preprocessing_and_model.create_model(
num_actions=self.ACTION_SPACE.n, **kwargs
)
def training_pipeline(self, **kwargs):
ppo_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
action_strs = ObjectNavTask.class_action_names()
non_end_action_inds_set = {
i for i, a in enumerate(action_strs) if a != robothor_constants.END
}
end_action_ind_set = {action_strs.index(robothor_constants.END)}
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(**PPOConfig),
"grouped_action_imitation": GroupedActionImitation(
nactions=len(ObjectNavTask.class_action_names()),
action_groups=[non_end_action_inds_set, end_action_ind_set],
),
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss", "grouped_action_imitation"],
max_stage_steps=ppo_steps,
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
def tag(self):
return "ObjectNav-RoboTHOR-RGB-ResNet18GRU-DDPPOAndGBC"
| allenact-main | projects/objectnav_baselines/experiments/robothor/beta/objectnav_robothor_rgb_resnetgru_ddppo_and_gbc.py |
from typing import Union, Optional, Any
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.algorithms.onpolicy_sync.storage import RolloutBlockStorage
# noinspection PyUnresolvedReferences
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.embodiedai.storage.vdr_storage import (
DiscreteVisualDynamicsReplayStorage,
InverseDynamicsVDRLoss,
)
from allenact.utils.experiment_utils import Builder, TrainingSettings
from allenact.utils.experiment_utils import (
PipelineStage,
LinearDecay,
StageComponent,
)
from allenact.utils.experiment_utils import TrainingPipeline
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.mixins import (
ObjectNavUnfrozenResNetWithGRUActorCriticMixin,
update_with_auxiliary_losses,
)
def compute_inv_dyn_action_logits(
model, img0, img1,
):
rgb_uuid = model.visual_encoder.rgb_uuid
img0_enc = model.visual_encoder({rgb_uuid: img0.unsqueeze(0)}).squeeze(0)
img1_enc = model.visual_encoder({rgb_uuid: img1.unsqueeze(0)}).squeeze(0)
return model.inv_dyn_mlp(torch.cat((img0_enc, img1_enc), dim=1))
class LastActionSuccessSensor(
Sensor[
Union[IThorEnvironment, RoboThorEnvironment],
Union[Task[IThorEnvironment], Task[RoboThorEnvironment]],
]
):
def __init__(self, uuid: str = "last_action_success", **kwargs: Any):
super().__init__(
uuid=uuid, observation_space=gym.spaces.MultiBinary(1), **kwargs
)
def get_observation(
self,
env: Union[IThorEnvironment, RoboThorEnvironment],
task: Optional[Task],
*args: Any,
**kwargs: Any
) -> Any:
return 1 * task.last_action_success
class VisibleObjectTypesSensor(
Sensor[
Union[IThorEnvironment, RoboThorEnvironment],
Union[Task[IThorEnvironment], Task[RoboThorEnvironment]],
]
):
def __init__(self, uuid: str = "visible_objects", **kwargs: Any):
super().__init__(
uuid=uuid,
observation_space=gym.spaces.Box(
low=0, high=1, shape=(len(ObjectNavRoboThorBaseConfig.TARGET_TYPES),)
),
**kwargs
)
self.type_to_index = {
tt: i for i, tt in enumerate(ObjectNavRoboThorBaseConfig.TARGET_TYPES)
}
def get_observation(
self,
env: Union[IThorEnvironment, RoboThorEnvironment],
task: Optional[Task],
*args: Any,
**kwargs: Any
) -> Any:
out = np.zeros((len(self.type_to_index),))
for o in env.controller.last_event.metadata["objects"]:
if o["visible"] and o["objectType"] in self.type_to_index:
out[self.type_to_index[o["objectType"]]] = 1.0
return out
class ObjectNavRoboThorVdrTmpRGBExperimentConfig(ObjectNavRoboThorBaseConfig):
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
LastActionSuccessSensor(),
VisibleObjectTypesSensor(),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model_creation_handler = ObjectNavUnfrozenResNetWithGRUActorCriticMixin(
backbone="gnresnet18",
sensors=self.SENSORS,
auxiliary_uuids=[],
add_prev_actions=True,
multiple_beliefs=False,
belief_fusion=None,
)
def training_pipeline(self, **kwargs):
# PPO
ppo_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
auxiliary_uuids = tuple()
multiple_beliefs = False
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = update_with_auxiliary_losses(
named_losses=named_losses,
auxiliary_uuids=auxiliary_uuids,
multiple_beliefs=multiple_beliefs,
)
default_ts = TrainingSettings(
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
save_interval=save_interval,
metric_accumulate_interval=log_interval,
)
named_losses = {
**named_losses,
"inv_dyn_vdr": (
InverseDynamicsVDRLoss(
compute_action_logits_fn=compute_inv_dyn_action_logits,
img0_key="img0",
img1_key="img1",
action_key="action",
),
1.0,
),
}
sorted_loss_names = list(sorted(named_losses.keys()))
return TrainingPipeline(
training_settings=default_ts,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
named_losses={k: v[0] for k, v in named_losses.items()},
named_storages={
"onpolicy": RolloutBlockStorage(init_size=num_steps),
"discrete_vdr": DiscreteVisualDynamicsReplayStorage(
image_uuid="rgb_lowres",
action_success_uuid="last_action_success",
extra_targets=["visible_objects"],
nactions=6,
num_to_store_per_action=200 if torch.cuda.is_available() else 10,
max_to_save_per_episode=6,
target_batch_size=256 if torch.cuda.is_available() else 128,
),
},
pipeline_stages=[
PipelineStage(
loss_names=sorted_loss_names,
max_stage_steps=ppo_steps,
loss_weights=[
named_losses[loss_name][1] for loss_name in sorted_loss_names
],
stage_components=[
StageComponent(
uuid="onpolicy",
storage_uuid="onpolicy",
loss_names=[
ln for ln in sorted_loss_names if ln != "inv_dyn_vdr"
],
),
StageComponent(
uuid="vdr",
storage_uuid="discrete_vdr",
loss_names=["inv_dyn_vdr"],
training_settings=TrainingSettings(
num_mini_batch=1, update_repeats=1,
),
),
],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
def create_model(self, **kwargs) -> nn.Module:
model = self.model_creation_handler.create_model(**kwargs)
model.inv_dyn_mlp = nn.Sequential(
nn.Linear(1024, 256), nn.ReLU(inplace=True), nn.Linear(256, 6),
)
return model
def tag(self):
return "Objectnav-RoboTHOR-RGB-UnfrozenResNet18GRU-VDR"
| allenact-main | projects/objectnav_baselines/experiments/robothor/beta/objectnav_robothor_rgb_unfrozenresnet18gru_vdr_ddppo.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.