python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import abc
from typing import Any, Union, Callable, TypeVar, Dict, Optional, cast
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.distributions.utils import lazy_property
import gym
from allenact.base_abstractions.sensor import AbstractExpertActionSensor as Expert
from allenact.utils import spaces_utils as su
from allenact.utils.misc_utils import all_unique
TeacherForcingAnnealingType = TypeVar("TeacherForcingAnnealingType")
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
class Distr(abc.ABC):
@abc.abstractmethod
def log_prob(self, actions: Any):
"""Return the log probability/ies of the provided action/s."""
raise NotImplementedError()
@abc.abstractmethod
def entropy(self):
"""Return the entropy or entropies."""
raise NotImplementedError()
@abc.abstractmethod
def sample(self, sample_shape=torch.Size()):
"""Sample actions."""
raise NotImplementedError()
def mode(self):
"""If available, return the action(s) with highest probability.
It will only be called if using deterministic agents.
"""
raise NotImplementedError()
class CategoricalDistr(torch.distributions.Categorical, Distr):
"""A categorical distribution extending PyTorch's Categorical.
probs or logits are assumed to be passed with step and sampler
dimensions as in: [step, samplers, ...]
"""
def mode(self):
return self._param.argmax(dim=-1, keepdim=False) # match sample()'s shape
def log_prob(self, value: torch.Tensor):
if value.shape == self.logits.shape[:-1]:
return super(CategoricalDistr, self).log_prob(value=value)
elif value.shape == self.logits.shape[:-1] + (1,):
return (
super(CategoricalDistr, self)
.log_prob(value=value.squeeze(-1))
.unsqueeze(-1)
)
else:
raise NotImplementedError(
"Broadcasting in categorical distribution is disabled as it often leads"
f" to unexpected results. We have that `value.shape == {value.shape}` but"
f" expected a shape of "
f" `self.logits.shape[:-1] == {self.logits.shape[:-1]}` or"
f" `self.logits.shape[:-1] + (1,) == {self.logits.shape[:-1] + (1,)}`"
)
@lazy_property
def log_probs_tensor(self):
return torch.log_softmax(self.logits, dim=-1)
@lazy_property
def probs_tensor(self):
return torch.softmax(self.logits, dim=-1)
class ConditionalDistr(Distr):
"""Action distribution conditional which is conditioned on other
information (i.e. part of a hierarchical distribution)
# Attributes
action_group_name : the identifier of the group of actions (`OrderedDict`) produced by this `ConditionalDistr`
"""
action_group_name: str
def __init__(
self,
distr_conditioned_on_input_fn_or_instance: Union[Callable, Distr],
action_group_name: str,
*distr_conditioned_on_input_args,
**distr_conditioned_on_input_kwargs,
):
"""Initialize an ConditionalDistr.
# Parameters
distr_conditioned_on_input_fn_or_instance : Callable to generate `ConditionalDistr` given sampled actions,
or given `Distr`.
action_group_name : the identifier of the group of actions (`OrderedDict`) produced by this `ConditionalDistr`
distr_conditioned_on_input_args : positional arguments for Callable `distr_conditioned_on_input_fn_or_instance`
distr_conditioned_on_input_kwargs : keyword arguments for Callable `distr_conditioned_on_input_fn_or_instance`
"""
self.distr: Optional[Distr] = None
self.distr_conditioned_on_input_fn: Optional[Callable] = None
self.distr_conditioned_on_input_args = distr_conditioned_on_input_args
self.distr_conditioned_on_input_kwargs = distr_conditioned_on_input_kwargs
if isinstance(distr_conditioned_on_input_fn_or_instance, Distr):
self.distr = distr_conditioned_on_input_fn_or_instance
else:
self.distr_conditioned_on_input_fn = (
distr_conditioned_on_input_fn_or_instance
)
self.action_group_name = action_group_name
def log_prob(self, actions):
return self.distr.log_prob(actions)
def entropy(self):
return self.distr.entropy()
def condition_on_input(self, **ready_actions):
if self.distr is None:
assert all(
key not in self.distr_conditioned_on_input_kwargs
for key in ready_actions
)
self.distr = self.distr_conditioned_on_input_fn(
*self.distr_conditioned_on_input_args,
**self.distr_conditioned_on_input_kwargs,
**ready_actions,
)
def reset(self):
if (self.distr is not None) and (
self.distr_conditioned_on_input_fn is not None
):
self.distr = None
def sample(self, sample_shape=torch.Size()) -> OrderedDict:
return OrderedDict([(self.action_group_name, self.distr.sample(sample_shape))])
def mode(self) -> OrderedDict:
return OrderedDict([(self.action_group_name, self.distr.mode())])
class SequentialDistr(Distr):
def __init__(self, *conditional_distrs: ConditionalDistr):
action_group_names = [cd.action_group_name for cd in conditional_distrs]
assert all_unique(
action_group_names
), f"All conditional distribution `action_group_name`, must be unique, given names {action_group_names}"
self.conditional_distrs = conditional_distrs
def sample(self, sample_shape=torch.Size()):
actions = OrderedDict()
for cd in self.conditional_distrs:
cd.condition_on_input(**actions)
actions.update(cd.sample(sample_shape=sample_shape))
return actions
def mode(self):
actions = OrderedDict()
for cd in self.conditional_distrs:
cd.condition_on_input(**actions)
actions.update(cd.mode())
return actions
def conditional_entropy(self):
sum = 0
for cd in self.conditional_distrs:
sum = sum + cd.entropy()
return sum
def entropy(self):
raise NotImplementedError(
"Please use 'conditional_entropy' instead of 'entropy' as the `entropy_method_name` "
"parameter in your loss when using `SequentialDistr`."
)
def log_prob(
self, actions: Dict[str, Any], return_dict: bool = False
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
assert len(actions) == len(
self.conditional_distrs
), f"{len(self.conditional_distrs)} conditional distributions for {len(actions)} action groups"
res: Union[int, torch.Tensor, Dict[str, torch.Tensor]] = (
0 if not return_dict else OrderedDict()
)
for cd in self.conditional_distrs:
cd.condition_on_input(**actions)
current_log_prob = cd.log_prob(actions[cd.action_group_name])
if not return_dict:
res = res + current_log_prob
else:
res[cd.action_group_name] = current_log_prob
return res
class TeacherForcingDistr(Distr):
def __init__(
self,
distr: Distr,
obs: Dict[str, Any],
action_space: gym.spaces.Space,
num_active_samplers: Optional[int],
approx_steps: Optional[int],
teacher_forcing: Optional[TeacherForcingAnnealingType],
tracking_info: Optional[Dict[str, Any]],
always_enforce: bool = False,
):
self.distr = distr
self.is_sequential = isinstance(self.distr, SequentialDistr)
# action_space is a gym.spaces.Dict for SequentialDistr, or any gym.Space for other Distr
self.action_space = action_space
self.num_active_samplers = num_active_samplers
self.approx_steps = approx_steps
self.teacher_forcing = teacher_forcing
self.tracking_info = tracking_info
self.always_enforce = always_enforce
assert (
"expert_action" in obs
), "When using teacher forcing, obs must contain an `expert_action` uuid"
obs_space = Expert.flagged_space(
self.action_space, use_dict_as_groups=self.is_sequential
)
self.expert = su.unflatten(obs_space, obs["expert_action"])
def enforce(
self,
sample: Any,
action_space: gym.spaces.Space,
teacher: OrderedDict,
teacher_force_info: Optional[Dict[str, Any]],
action_name: Optional[str] = None,
):
actions = su.flatten(action_space, sample)
assert (
len(actions.shape) == 3
), f"Got flattened actions with shape {actions.shape} (it should be [1 x `samplers` x `flatdims`])"
if self.num_active_samplers is not None:
assert actions.shape[1] == self.num_active_samplers
expert_actions = su.flatten(action_space, teacher[Expert.ACTION_POLICY_LABEL])
## avoiding since partially multidimensional
assert (
expert_actions.shape == actions.shape
), f"expert actions shape {expert_actions.shape} doesn't match the model's {actions.shape}"
# expert_success is 0 if the expert action could not be computed and otherwise equals 1.
expert_action_exists_mask = teacher[Expert.EXPERT_SUCCESS_LABEL]
if not self.always_enforce:
teacher_forcing_mask = (
torch.distributions.bernoulli.Bernoulli(
torch.tensor(self.teacher_forcing(self.approx_steps))
)
.sample(expert_action_exists_mask.shape)
.long()
.to(actions.device)
) * expert_action_exists_mask
else:
teacher_forcing_mask = expert_action_exists_mask
if teacher_force_info is not None:
teacher_force_info[
"teacher_ratio/sampled{}".format(
f"_{action_name}" if action_name is not None else ""
)
] = (teacher_forcing_mask.float().mean().item())
extended_shape = teacher_forcing_mask.shape + (1,) * (
len(actions.shape) - len(teacher_forcing_mask.shape)
)
actions = torch.where(
teacher_forcing_mask.byte().view(extended_shape), expert_actions, actions
)
return su.unflatten(action_space, actions)
def log_prob(self, actions: Any):
return self.distr.log_prob(actions)
def entropy(self):
return self.distr.entropy()
def conditional_entropy(self):
return self.distr.conditional_entropy()
def sample(self, sample_shape=torch.Size()):
teacher_force_info: Optional[Dict[str, Any]] = None
if self.approx_steps is not None:
teacher_force_info = {
"teacher_ratio/enforced": self.teacher_forcing(self.approx_steps),
}
if self.is_sequential:
res = OrderedDict()
for cd in cast(SequentialDistr, self.distr).conditional_distrs:
cd.condition_on_input(**res)
action_group_name = cd.action_group_name
res[action_group_name] = self.enforce(
cd.sample(sample_shape)[action_group_name],
cast(gym.spaces.Dict, self.action_space)[action_group_name],
self.expert[action_group_name],
teacher_force_info,
action_group_name,
)
else:
res = self.enforce(
self.distr.sample(sample_shape),
self.action_space,
self.expert,
teacher_force_info,
)
if self.tracking_info is not None and self.num_active_samplers is not None:
self.tracking_info["teacher"].append(
("teacher_package", teacher_force_info, self.num_active_samplers)
)
return res
class AddBias(nn.Module):
"""Adding bias parameters to input values."""
def __init__(self, bias: torch.FloatTensor):
"""Initializer.
# Parameters
bias : data to use as the initial values of the bias.
"""
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1), requires_grad=True)
def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: # type: ignore
"""Adds the stored bias parameters to `x`."""
assert x.dim() in [2, 4]
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias # type:ignore
| ask4help-main | allenact/base_abstractions/distributions.py |
ask4help-main | allenact/algorithms/__init__.py |
|
"""Defines the reinforcement learning `OnPolicyRunner`."""
import copy
import glob
import itertools
import json
import math
import os
import pathlib
import queue
import random
import signal
import subprocess
import sys
import time
import traceback
from collections import defaultdict
from multiprocessing.context import BaseContext
from multiprocessing.process import BaseProcess
from typing import Optional, Dict, Union, Tuple, Sequence, List, Any
import enum
import filelock
import numpy as np
import torch
import torch.multiprocessing as mp
from setproctitle import setproctitle as ptitle
from allenact.algorithms.onpolicy_sync.engine import (
OnPolicyTrainer,
OnPolicyInference,
TRAIN_MODE_STR,
VALID_MODE_STR,
TEST_MODE_STR,
OnPolicyRLEngine,
)
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.utils.experiment_utils import (
ScalarMeanTracker,
set_deterministic_cudnn,
set_seed,
LoggingPackage,
)
from allenact.utils.misc_utils import (
all_equal,
get_git_diff_of_project,
NumpyJSONEncoder,
)
from allenact.utils.model_utils import md5_hash_of_state_dict
from allenact.utils.system import get_logger, find_free_port
from allenact.utils.tensor_utils import SummaryWriter
from allenact.utils.viz_utils import VizSuite
_CONFIG_KWARGS_STR = "__CONFIG_KWARGS__"
class SaveDirFormat(enum.Enum):
"""Directory formats that can be used when saving tensorboard logs,
checkpoints, etc.
during training/evaluation.
FLAT: the first-level directories are logs, checkpoints, metrics, etc; the second-level are time strings of each experiment
NESTED: the opposite to FLAT.
"""
FLAT = "FLAT"
NESTED = "NESTED"
# Has results queue (aggregated per trainer), checkpoints queue and mp context
# Instantiates train, validate, and test workers
# Logging
# Saves configs, makes folder for trainer models
class OnPolicyRunner(object):
def __init__(
self,
config: ExperimentConfig,
output_dir: str,
loaded_config_src_files: Optional[Dict[str, str]],
seed: Optional[int] = None,
mode: str = "train",
deterministic_cudnn: bool = False,
deterministic_agents: bool = False,
mp_ctx: Optional[BaseContext] = None,
multiprocessing_start_method: str = "default",
extra_tag: str = "",
disable_tensorboard: bool = False,
disable_config_saving: bool = False,
distributed_ip_and_port: str = "127.0.0.1:0",
machine_id: int = 0,
save_dir_fmt: SaveDirFormat = SaveDirFormat.FLAT,
):
self.config = config
self.output_dir = output_dir
self.loaded_config_src_files = loaded_config_src_files
self.seed = seed if seed is not None else random.randint(0, 2 ** 31 - 1)
self.deterministic_cudnn = deterministic_cudnn
if multiprocessing_start_method == "default":
if torch.cuda.is_available():
multiprocessing_start_method = "forkserver"
else:
# Spawn seems to play nicer with cpus and debugging
multiprocessing_start_method = "spawn"
self.mp_ctx = self.init_context(mp_ctx, multiprocessing_start_method)
self.extra_tag = extra_tag
self.mode = mode.lower().strip()
self.visualizer: Optional[VizSuite] = None
self.deterministic_agents = deterministic_agents
self.disable_tensorboard = disable_tensorboard
self.disable_config_saving = disable_config_saving
assert self.mode in [
TRAIN_MODE_STR,
TEST_MODE_STR,
], "Only 'train' and 'test' modes supported in runner"
if self.deterministic_cudnn:
set_deterministic_cudnn()
set_seed(self.seed)
self.queues: Optional[Dict[str, mp.Queue]] = None
self.processes: Dict[str, List[Union[BaseProcess, mp.Process]]] = defaultdict(
list
)
self.current_checkpoint = None
self._local_start_time_str: Optional[str] = None
self._is_closed: bool = False
self._collect_valid_results: bool = False
self.distributed_ip_and_port = distributed_ip_and_port
self.machine_id = machine_id
self.save_dir_fmt = save_dir_fmt
@property
def local_start_time_str(self) -> str:
if self._local_start_time_str is None:
raise RuntimeError(
"Local start time string does not exist as neither `start_train()` or `start_test()`"
" has been called on this runner."
)
return self._local_start_time_str
@property
def running_validation(self):
return (
sum(
MachineParams.instance_from(
self.config.machine_params(VALID_MODE_STR)
).nprocesses
)
> 0
) and self.machine_id == 0
@staticmethod
def init_context(
mp_ctx: Optional[BaseContext] = None,
multiprocessing_start_method: str = "forkserver",
valid_start_methods: Tuple[str, ...] = ("forkserver", "spawn", "fork"),
):
if mp_ctx is None:
assert multiprocessing_start_method in valid_start_methods, (
f"multiprocessing_start_method must be one of {valid_start_methods}."
f" Got '{multiprocessing_start_method}'"
)
mp_ctx = mp.get_context(multiprocessing_start_method)
elif multiprocessing_start_method != mp_ctx.get_start_method():
get_logger().warning(
f"ignoring multiprocessing_start_method '{multiprocessing_start_method}'"
f" and using given context with '{mp_ctx.get_start_method()}'"
)
return mp_ctx
def _acquire_unique_local_start_time_string(self) -> str:
"""Creates a (unique) local start time string for this experiment.
Ensures through file locks that the local start time string
produced is unique. This implies that, if one has many
experiments starting in in parallel, at most one will be started
every second (as the local start time string only records the
time up to the current second).
"""
os.makedirs(self.output_dir, exist_ok=True)
start_time_string_lock_path = os.path.abspath(
os.path.join(self.output_dir, ".allenact_start_time_string.lock")
)
try:
with filelock.FileLock(start_time_string_lock_path, timeout=60):
last_start_time_string_path = os.path.join(
self.output_dir, ".allenact_last_start_time_string"
)
pathlib.Path(last_start_time_string_path).touch()
with open(last_start_time_string_path, "r") as f:
last_start_time_string_list = f.readlines()
while True:
candidate_str = time.strftime(
"%Y-%m-%d_%H-%M-%S", time.localtime(time.time())
)
if (
len(last_start_time_string_list) == 0
or last_start_time_string_list[0].strip() != candidate_str
):
break
time.sleep(0.2)
with open(last_start_time_string_path, "w") as f:
f.write(candidate_str)
except filelock.Timeout as e:
get_logger().exception(
f"Could not acquire the lock for {start_time_string_lock_path} for 60 seconds,"
" this suggests an unexpected deadlock. Please close all AllenAct training processes,"
" delete this lockfile, and try again."
)
raise e
assert candidate_str is not None
return candidate_str
def worker_devices(self, mode: str):
machine_params: MachineParams = MachineParams.instance_from(
self.config.machine_params(mode)
)
devices = machine_params.devices
assert all_equal(devices) or all(
d.index >= 0 for d in devices
), f"Cannot have a mix of CPU and GPU devices (`devices == {devices}`)"
get_logger().info(f"Using {len(devices)} {mode} workers on devices {devices}")
return devices
def local_worker_ids(self, mode: str):
machine_params: MachineParams = MachineParams.instance_from(
self.config.machine_params(mode, machine_id=self.machine_id)
)
ids = machine_params.local_worker_ids
get_logger().info(
f"Using local worker ids {ids} (total {len(ids)} workers in machine {self.machine_id})"
)
return ids
def init_visualizer(self, mode: str):
if not self.disable_tensorboard:
# Note: Avoid instantiating anything in machine_params (use Builder if needed)
machine_params = MachineParams.instance_from(
self.config.machine_params(mode)
)
self.visualizer = machine_params.visualizer
@staticmethod
def init_process(mode: str, id: int, to_close_on_termination: OnPolicyRLEngine):
ptitle(f"{mode}-{id}")
def create_handler(termination_type: str):
def handler(_signo, _frame):
prefix = f"{termination_type} signal sent to worker {mode}-{id}."
if to_close_on_termination._is_closed:
get_logger().info(
f"{prefix} Worker {mode}-{id} is already closed, exiting."
)
sys.exit(0)
elif not to_close_on_termination._is_closing:
get_logger().info(
f"{prefix} Forcing worker {mode}-{id} to close and exiting."
)
try:
to_close_on_termination.close(True)
except Exception:
get_logger().error(
f"Error occurred when closing the RL engine used by work {mode}-{id}."
f" We cannot recover from this and will simply exit. The exception:"
)
get_logger().exception(traceback.format_exc())
sys.exit(1)
sys.exit(0)
else:
get_logger().info(
f"{prefix} Worker {mode}-{id} is already closing, ignoring this signal."
)
return handler
signal.signal(signal.SIGTERM, create_handler("Termination"))
signal.signal(signal.SIGINT, create_handler("Interrupt"))
@staticmethod
def init_worker(engine_class, args, kwargs):
mode = kwargs["mode"]
id = kwargs["worker_id"]
worker = None
try:
worker = engine_class(*args, **kwargs)
except Exception:
get_logger().error(f"Encountered Exception. Terminating {mode} worker {id}")
get_logger().exception(traceback.format_exc())
kwargs["results_queue"].put((f"{mode}_stopped", 1 + id))
finally:
return worker
@staticmethod
def train_loop(
id: int = 0,
checkpoint: Optional[str] = None,
restart_pipeline: bool = False,
*engine_args,
**engine_kwargs,
):
engine_kwargs["mode"] = TRAIN_MODE_STR
engine_kwargs["worker_id"] = id
engine_kwargs_for_print = {
k: (v if k != "initial_model_state_dict" else "[SUPPRESSED]")
for k, v in engine_kwargs.items()
}
get_logger().info(f"train {id} args {engine_kwargs_for_print}")
trainer: OnPolicyTrainer = OnPolicyRunner.init_worker(
engine_class=OnPolicyTrainer, args=engine_args, kwargs=engine_kwargs
)
if trainer is not None:
OnPolicyRunner.init_process("Train", id, to_close_on_termination=trainer)
trainer.train(
checkpoint_file_name=checkpoint, restart_pipeline=restart_pipeline
)
@staticmethod
def valid_loop(id: int = 0, *engine_args, **engine_kwargs):
engine_kwargs["mode"] = VALID_MODE_STR
engine_kwargs["worker_id"] = id
get_logger().info(f"valid {id} args {engine_kwargs}")
valid = OnPolicyRunner.init_worker(
engine_class=OnPolicyInference, args=engine_args, kwargs=engine_kwargs
)
if valid is not None:
OnPolicyRunner.init_process("Valid", id, to_close_on_termination=valid)
valid.process_checkpoints() # gets checkpoints via queue
@staticmethod
def test_loop(id: int = 0, *engine_args, **engine_kwargs):
engine_kwargs["mode"] = TEST_MODE_STR
engine_kwargs["worker_id"] = id
get_logger().info(f"test {id} args {engine_kwargs}")
test = OnPolicyRunner.init_worker(OnPolicyInference, engine_args, engine_kwargs)
if test is not None:
OnPolicyRunner.init_process("Test", id, to_close_on_termination=test)
test.process_checkpoints() # gets checkpoints via queue
def _initialize_start_train_or_start_test(self):
self._is_closed = False
if self.queues is not None:
for k, q in self.queues.items():
try:
out = q.get(timeout=1)
raise RuntimeError(
f"{k} queue was not empty before starting new training/testing (contained {out})."
f" This should not happen, please report how you obtained this error"
f" by creating an issue at https://github.com/allenai/allenact/issues."
)
except queue.Empty:
pass
self.queues = {
"results": self.mp_ctx.Queue(),
"checkpoints": self.mp_ctx.Queue(),
}
self._local_start_time_str = self._acquire_unique_local_start_time_string()
def get_port(self):
passed_port = int(self.distributed_ip_and_port.split(":")[1])
if passed_port == 0:
assert (
self.machine_id == 0
), "Only runner with `machine_id` == 0 can search for a free port."
distributed_port = find_free_port(
self.distributed_ip_and_port.split(":")[0]
)
else:
distributed_port = passed_port
get_logger().info(
f"Engines on machine_id == {self.machine_id} using port {distributed_port} and seed {self.seed}"
)
return distributed_port
def start_train(
self,
checkpoint: Optional[str] = None,
restart_pipeline: bool = False,
max_sampler_processes_per_worker: Optional[int] = None,
save_ckpt_after_every_pipeline_stage: bool = True,
collect_valid_results: bool = False,
):
self._initialize_start_train_or_start_test()
self._collect_valid_results = collect_valid_results
if not self.disable_config_saving:
self.save_project_state()
devices = self.worker_devices(TRAIN_MODE_STR)
num_workers = len(devices)
# Be extra careful to ensure that all models start
# with the same initializations.
set_seed(self.seed)
initial_model_state_dict = self.config.create_model(
sensor_preprocessor_graph=MachineParams.instance_from(
self.config.machine_params(self.mode)
).sensor_preprocessor_graph
).state_dict()
distributed_port = 0 if num_workers == 1 else self.get_port()
worker_ids = self.local_worker_ids(TRAIN_MODE_STR)
model_hash = None
for trainer_id in worker_ids:
training_kwargs = dict(
id=trainer_id,
checkpoint=checkpoint,
restart_pipeline=restart_pipeline,
experiment_name=self.experiment_name,
config=self.config,
results_queue=self.queues["results"],
checkpoints_queue=self.queues["checkpoints"]
if self.running_validation
else None,
checkpoints_dir=self.checkpoint_dir(),
seed=self.seed,
deterministic_cudnn=self.deterministic_cudnn,
mp_ctx=self.mp_ctx,
num_workers=num_workers,
device=devices[trainer_id],
distributed_ip=self.distributed_ip_and_port.split(":")[0],
distributed_port=distributed_port,
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
save_ckpt_after_every_pipeline_stage=save_ckpt_after_every_pipeline_stage,
initial_model_state_dict=initial_model_state_dict
if model_hash is None
else model_hash,
first_local_worker_id=worker_ids[0],
)
train: BaseProcess = self.mp_ctx.Process(
target=self.train_loop, kwargs=training_kwargs,
)
try:
train.start()
except ValueError as e:
# If the `initial_model_state_dict` is too large we sometimes
# run into errors passing it with multiprocessing. In such cases
# we instead hash the state_dict and confirm, in each engine worker, that
# this hash equals the model the engine worker instantiates.
if e.args[0] == "too many fds":
model_hash = md5_hash_of_state_dict(initial_model_state_dict)
training_kwargs["initial_model_state_dict"] = model_hash
train = self.mp_ctx.Process(
target=self.train_loop, kwargs=training_kwargs,
)
train.start()
else:
raise e
self.processes[TRAIN_MODE_STR].append(train)
get_logger().info(
f"Started {len(self.processes[TRAIN_MODE_STR])} train processes"
)
# Validation
if self.running_validation:
device = self.worker_devices(VALID_MODE_STR)[0]
self.init_visualizer(VALID_MODE_STR)
valid: BaseProcess = self.mp_ctx.Process(
target=self.valid_loop,
args=(0,),
kwargs=dict(
config=self.config,
results_queue=self.queues["results"],
checkpoints_queue=self.queues["checkpoints"],
seed=12345, # TODO allow same order for randomly sampled tasks? Is this any useful anyway?
deterministic_cudnn=self.deterministic_cudnn,
deterministic_agents=self.deterministic_agents,
mp_ctx=self.mp_ctx,
device=device,
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
),
)
valid.start()
self.processes[VALID_MODE_STR].append(valid)
get_logger().info(
f"Started {len(self.processes[VALID_MODE_STR])} valid processes"
)
else:
get_logger().info(
"No processes allocated to validation, no validation will be run."
)
metrics_file_template: Optional[str] = None
if self._collect_valid_results:
metrics_dir = self.metric_path(self.local_start_time_str)
os.makedirs(metrics_dir, exist_ok=True)
suffix = f"__valid_{self.local_start_time_str}"
metrics_file_template = os.path.join(
metrics_dir, "metrics" + suffix + "{:012d}.json"
) # template for training steps
get_logger().info(
f"Saving valid metrics with template {metrics_file_template}"
)
# Check output file can be written
with open(metrics_file_template.format(0), "w") as f:
json.dump([], f, indent=4, sort_keys=True, cls=NumpyJSONEncoder)
valid_results = self.log_and_close(
start_time_str=self.local_start_time_str,
nworkers=len(worker_ids), # TODO num_workers once we forward metrics,
metrics_file=metrics_file_template,
)
if not self._collect_valid_results:
return self.local_start_time_str
else:
return self.local_start_time_str, valid_results
def start_test(
self,
checkpoint_path_dir_or_pattern: str,
infer_output_dir: bool = False,
approx_ckpt_step_interval: Optional[Union[float, int]] = None,
max_sampler_processes_per_worker: Optional[int] = None,
inference_expert: bool = False,
) -> List[Dict]:
# Tester always runs on a single machine
assert (
self.machine_id == 0
), f"Received `machine_id={self.machine_id} for test. Only one machine supported."
self.extra_tag += (
"__" * (len(self.extra_tag) > 0) + "enforced_test_expert"
) * inference_expert
self._initialize_start_train_or_start_test()
devices = self.worker_devices(TEST_MODE_STR)
self.init_visualizer(TEST_MODE_STR)
num_testers = len(devices)
distributed_port = 0
if num_testers > 1:
distributed_port = find_free_port()
# Tester always runs on a single machine
for tester_it in range(num_testers):
test: BaseProcess = self.mp_ctx.Process(
target=self.test_loop,
args=(tester_it,),
kwargs=dict(
config=self.config,
results_queue=self.queues["results"],
checkpoints_queue=self.queues["checkpoints"],
seed=12345, # TODO allow same order for randomly sampled tasks? Is this any useful anyway?
deterministic_cudnn=self.deterministic_cudnn,
deterministic_agents=self.deterministic_agents,
mp_ctx=self.mp_ctx,
num_workers=num_testers,
device=devices[tester_it],
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
distributed_port=distributed_port,
enforce_expert=inference_expert,
),
)
test.start()
self.processes[TEST_MODE_STR].append(test)
get_logger().info(
f"Started {len(self.processes[TEST_MODE_STR])} test processes"
)
checkpoint_paths = self.get_checkpoint_files(
checkpoint_path_dir_or_pattern=checkpoint_path_dir_or_pattern,
approx_ckpt_step_interval=approx_ckpt_step_interval,
)
steps = [self.step_from_checkpoint(cp) for cp in checkpoint_paths]
get_logger().info(f"Running test on {len(steps)} steps {steps}")
for checkpoint_path in checkpoint_paths:
# Make all testers work on each checkpoint
for tester_it in range(num_testers):
self.queues["checkpoints"].put(("eval", checkpoint_path))
# Signal all testers to terminate cleanly
for _ in range(num_testers):
self.queues["checkpoints"].put(("quit", None))
if self.save_dir_fmt == SaveDirFormat.NESTED:
if infer_output_dir: # NOTE: we change output_dir here
self.output_dir = self.checkpoint_log_folder_str(checkpoint_paths[0])
suffix = ""
elif self.save_dir_fmt == SaveDirFormat.FLAT:
suffix = f"__test_{self.local_start_time_str}"
else:
raise NotImplementedError
metrics_dir = self.metric_path(self.local_start_time_str)
os.makedirs(metrics_dir, exist_ok=True)
metrics_file_path = os.path.join(metrics_dir, "metrics" + suffix + ".json")
get_logger().info(f"Saving test metrics in {metrics_file_path}")
# Check output file can be written
with open(metrics_file_path, "w") as f:
json.dump([], f, indent=4, sort_keys=True, cls=NumpyJSONEncoder)
return self.log_and_close(
start_time_str=self.checkpoint_start_time_str(checkpoint_paths[0]),
nworkers=num_testers,
test_steps=steps,
metrics_file=metrics_file_path,
)
@staticmethod
def checkpoint_start_time_str(checkpoint_file_name):
parts = checkpoint_file_name.split(os.path.sep)
assert len(parts) > 1, f"{checkpoint_file_name} is not a valid checkpoint path"
start_time_str = parts[-2]
get_logger().info(f"Using checkpoint start time {start_time_str}")
return start_time_str
@staticmethod
def checkpoint_log_folder_str(checkpoint_file_name):
parts = checkpoint_file_name.split(os.path.sep)
assert len(parts) > 1, f"{checkpoint_file_name} is not a valid checkpoint path"
log_folder_str = (os.path.sep).join(parts[:-2]) # remove checkpoints/*.pt
get_logger().info(f"Using log folder {log_folder_str}")
return log_folder_str
@property
def experiment_name(self):
if len(self.extra_tag) > 0:
return f"{self.config.tag()}_{self.extra_tag}"
return self.config.tag()
def checkpoint_dir(
self, start_time_str: Optional[str] = None, create_if_none: bool = True
):
path_parts = [
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str or self.local_start_time_str,
]
if self.save_dir_fmt == SaveDirFormat.NESTED:
folder = os.path.join(self.output_dir, *path_parts, "checkpoints",)
elif self.save_dir_fmt == SaveDirFormat.FLAT:
folder = os.path.join(self.output_dir, "checkpoints", *path_parts,)
else:
raise NotImplementedError
if create_if_none:
os.makedirs(folder, exist_ok=True)
return folder
def log_writer_path(self, start_time_str: str) -> str:
if self.save_dir_fmt == SaveDirFormat.NESTED:
if self.mode == TEST_MODE_STR:
return os.path.join(
self.output_dir,
"test",
self.config.tag(),
self.local_start_time_str,
)
path = os.path.join(
self.output_dir,
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str,
"train_tb",
)
return path
elif self.save_dir_fmt == SaveDirFormat.FLAT:
path = os.path.join(
self.output_dir,
"tb",
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str,
)
if self.mode == TEST_MODE_STR:
path = os.path.join(path, "test", self.local_start_time_str)
return path
else:
raise NotImplementedError
def metric_path(self, start_time_str: str) -> str:
if self.save_dir_fmt == SaveDirFormat.NESTED:
return os.path.join(
self.output_dir, "test", self.config.tag(), start_time_str,
)
elif self.save_dir_fmt == SaveDirFormat.FLAT:
return os.path.join(
self.output_dir,
"metrics",
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str,
)
else:
raise NotImplementedError
def save_project_state(self):
path_parts = [
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
self.local_start_time_str,
]
if self.save_dir_fmt == SaveDirFormat.NESTED:
base_dir = os.path.join(self.output_dir, *path_parts, "used_configs",)
elif self.save_dir_fmt == SaveDirFormat.FLAT:
base_dir = os.path.join(self.output_dir, "used_configs", *path_parts,)
else:
raise NotImplementedError
os.makedirs(base_dir, exist_ok=True)
# Saving current git diff
try:
sha, diff_str = get_git_diff_of_project()
with open(os.path.join(base_dir, f"{sha}.patch"), "w") as f:
f.write(diff_str)
get_logger().info(f"Git diff saved to {base_dir}")
except subprocess.CalledProcessError:
get_logger().warning(
"Failed to get a git diff of the current project."
f" Is it possible that {os.getcwd()} is not under version control?"
)
# Saving configs
if self.loaded_config_src_files is not None:
for src_path in self.loaded_config_src_files:
if src_path == _CONFIG_KWARGS_STR:
# We also save key-word arguments passed to to the experiment
# initializer.
save_path = os.path.join(base_dir, "config_kwargs.json")
assert not os.path.exists(
save_path
), f"{save_path} should not already exist."
with open(save_path, "w") as f:
json.dump(json.loads(self.loaded_config_src_files[src_path]), f)
continue
assert os.path.isfile(src_path), f"Config file {src_path} not found"
src_path = os.path.abspath(src_path)
# To prevent overwriting files with the same name, we loop
# here until we find a prefix (if necessary) to prevent
# name collisions.
k = -1
while True:
prefix = "" if k == -1 else f"namecollision{k}__"
k += 1
dst_path = os.path.join(
base_dir, f"{prefix}{os.path.basename(src_path)}",
)
if not os.path.exists(dst_path):
os.makedirs(os.path.dirname(dst_path), exist_ok=True)
with open(src_path, "r") as f:
file_contents = f.read()
with open(dst_path, "w") as f:
f.write(
f"### THIS FILE ORIGINALLY LOCATED AT '{src_path}'\n\n{file_contents}"
)
break
get_logger().info(f"Config files saved to {base_dir}")
def process_eval_package(
self,
log_writer: Optional[SummaryWriter],
pkg: LoggingPackage,
all_results: Optional[List[Any]] = None,
):
training_steps = pkg.training_steps
checkpoint_file_name = pkg.checkpoint_file_name
render = pkg.viz_data
task_outputs = pkg.metric_dicts
num_tasks = pkg.num_non_empty_metrics_dicts_added
metric_means = pkg.metrics_tracker.means()
mode = pkg.mode
if log_writer is not None:
log_writer.add_scalar(
f"{mode}-misc/num_tasks_evaled", num_tasks, training_steps
)
message = [f"{mode} {training_steps} steps:"]
for k in sorted(metric_means.keys()):
if log_writer is not None:
log_writer.add_scalar(
f"{mode}-metrics/{k}", metric_means[k], training_steps
)
message.append(f"{k} {metric_means[k]}")
if all_results is not None:
results = copy.deepcopy(metric_means)
results.update({"training_steps": training_steps, "tasks": task_outputs})
all_results.append(results)
message.append(f"tasks {num_tasks} checkpoint {checkpoint_file_name}")
get_logger().info(" ".join(message))
if self.visualizer is not None:
self.visualizer.log(
log_writer=log_writer,
task_outputs=task_outputs,
render=render,
num_steps=training_steps,
)
def process_train_packages(
self,
log_writer: Optional[SummaryWriter],
pkgs: List[LoggingPackage],
last_steps=0,
last_offpolicy_steps=0,
last_time=0.0,
):
assert self.mode == TRAIN_MODE_STR
current_time = time.time()
training_steps = pkgs[0].training_steps
offpolicy_steps = pkgs[0].off_policy_steps
if log_writer is not None:
log_writer.add_scalar(
tag="train-misc/pipeline_stage",
scalar_value=pkgs[0].pipeline_stage,
global_step=training_steps,
)
def add_prefix(d: Dict[str, Any], tag: str) -> Dict[str, Any]:
new_dict = {}
for k, v in d.items():
if "offpolicy" in k:
pass
elif k.startswith("losses/"):
k = f"{self.mode}-{k}"
else:
k = f"{self.mode}-{tag}/{k}"
new_dict[k] = v
return new_dict
metrics_and_train_info_tracker = ScalarMeanTracker()
for pkg in pkgs:
metrics_and_train_info_tracker.add_scalars(
scalars=add_prefix(pkg.metrics_tracker.means(), "metrics"),
n=add_prefix(pkg.metrics_tracker.counts(), "metrics"),
)
metrics_and_train_info_tracker.add_scalars(
scalars=add_prefix(pkg.train_info_tracker.means(), "misc"),
n=add_prefix(pkg.train_info_tracker.counts(), "misc"),
)
message = [f"train {training_steps} steps {offpolicy_steps} offpolicy:"]
means = metrics_and_train_info_tracker.means()
for k in sorted(
means.keys(), key=lambda mean_key: (mean_key.count("/"), mean_key)
):
if log_writer is not None:
log_writer.add_scalar(k, means[k], training_steps)
short_key = (
"/".join(k.split("/")[1:]) if k.startswith("train-") and "/" in k else k
)
message.append(f"{short_key} {means[k]:.3g}")
message += [f"elapsed_time {(current_time - last_time):.3g}s"]
if last_steps > 0:
fps = (training_steps - last_steps) / (current_time - last_time)
message += [f"approx_fps {fps:.3g}"]
if log_writer is not None:
log_writer.add_scalar("train-misc/approx_fps", fps, training_steps)
if last_offpolicy_steps > 0:
fps = (offpolicy_steps - last_offpolicy_steps) / (current_time - last_time)
message += [f"offpolicy/approx_fps {fps:.3g}"]
if log_writer is not None:
log_writer.add_scalar("offpolicy/approx_fps", fps, training_steps)
get_logger().info(" ".join(message))
return training_steps, offpolicy_steps, current_time
def process_test_packages(
self,
log_writer: Optional[SummaryWriter],
pkgs: List[LoggingPackage],
all_results: Optional[List[Any]] = None,
):
mode = pkgs[0].mode
assert mode == TEST_MODE_STR
training_steps = pkgs[0].training_steps
all_metrics_tracker = ScalarMeanTracker()
metric_dicts_list, render, checkpoint_file_name = [], {}, []
for pkg in pkgs:
all_metrics_tracker.add_scalars(
scalars=pkg.metrics_tracker.means(), n=pkg.metrics_tracker.counts()
)
metric_dicts_list.extend(pkg.metric_dicts)
if pkg.viz_data is not None:
render.update(pkg.viz_data)
checkpoint_file_name.append(pkg.checkpoint_file_name)
assert all_equal(checkpoint_file_name)
message = [f"{mode} {training_steps} steps:"]
metric_means = all_metrics_tracker.means()
for k in sorted(metric_means.keys()):
if log_writer is not None:
log_writer.add_scalar(
f"{mode}-metrics/{k}", metric_means[k], training_steps
)
message.append(k + f" {metric_means[k]:.3g}")
if all_results is not None:
results = copy.deepcopy(metric_means)
results.update(
{"training_steps": training_steps, "tasks": metric_dicts_list}
)
all_results.append(results)
num_tasks = sum([pkg.num_non_empty_metrics_dicts_added for pkg in pkgs])
if log_writer is not None:
log_writer.add_scalar(
f"{mode}-misc/num_tasks_evaled", num_tasks, training_steps
)
message.append(f"tasks {num_tasks} checkpoint {checkpoint_file_name[0]}")
get_logger().info(" ".join(message))
if self.visualizer is not None:
self.visualizer.log(
log_writer=log_writer,
task_outputs=metric_dicts_list,
render=render,
num_steps=training_steps,
)
def log_and_close(
self,
start_time_str: str,
nworkers: int,
test_steps: Sequence[int] = (),
metrics_file: Optional[str] = None,
) -> List[Dict]:
finalized = False
log_writer: Optional[SummaryWriter] = None
if not self.disable_tensorboard:
log_writer = SummaryWriter(
log_dir=self.log_writer_path(start_time_str),
filename_suffix=f"__{self.mode}_{self.local_start_time_str}",
)
# To aggregate/buffer metrics from trainers/testers
collected: List[LoggingPackage] = []
last_train_steps = 0
last_offpolicy_steps = 0
last_train_time = time.time()
# test_steps = sorted(test_steps, reverse=True)
eval_results: List[Dict] = []
unfinished_workers = nworkers
try:
while True:
try:
package: Union[
LoggingPackage, Union[Tuple[str, Any], Tuple[str, Any, Any]]
] = self.queues["results"].get(timeout=1)
if isinstance(package, LoggingPackage):
pkg_mode = package.mode
if pkg_mode == TRAIN_MODE_STR:
collected.append(package)
if len(collected) >= nworkers:
collected = sorted(
collected,
key=lambda pkg: (
pkg.training_steps,
pkg.off_policy_steps,
),
)
if (
collected[nworkers - 1].training_steps
== collected[0].training_steps
and collected[nworkers - 1].off_policy_steps
== collected[0].off_policy_steps
): # ensure nworkers have provided the same num_steps
(
last_train_steps,
last_offpolicy_steps,
last_train_time,
) = self.process_train_packages(
log_writer=log_writer,
pkgs=collected[:nworkers],
last_steps=last_train_steps,
last_offpolicy_steps=last_offpolicy_steps,
last_time=last_train_time,
)
collected = collected[nworkers:]
elif len(collected) > 2 * nworkers:
get_logger().warning(
f"Unable to aggregate train packages from all {nworkers} workers"
f"after {len(collected)} packages collected"
)
elif (
pkg_mode == VALID_MODE_STR
): # they all come from a single worker
if (
package.training_steps is not None
): # no validation samplers
self.process_eval_package(
log_writer=log_writer,
pkg=package,
all_results=eval_results
if self._collect_valid_results
else None,
)
if metrics_file is not None:
with open(
metrics_file.format(package.training_steps), "w"
) as f:
json.dump(
eval_results[-1],
f,
indent=4,
sort_keys=True,
cls=NumpyJSONEncoder,
)
get_logger().info(
"Written valid results file {}".format(
metrics_file.format(
package.training_steps
),
)
)
if (
finalized and self.queues["checkpoints"].empty()
): # assume queue is actually empty after trainer finished and no checkpoints in queue
break
elif pkg_mode == TEST_MODE_STR:
collected.append(package)
if len(collected) >= nworkers:
collected = sorted(
collected, key=lambda x: x.training_steps
) # sort by num_steps
if (
collected[nworkers - 1].training_steps
== collected[0].training_steps
): # ensure nworkers have provided the same num_steps
self.process_test_packages(
log_writer=log_writer,
pkgs=collected[:nworkers],
all_results=eval_results,
)
collected = collected[nworkers:]
with open(metrics_file, "w") as f:
json.dump(
eval_results,
f,
indent=4,
sort_keys=True,
cls=NumpyJSONEncoder,
)
get_logger().info(
"Updated {} up to checkpoint {}".format(
metrics_file,
test_steps[len(eval_results) - 1],
)
)
else:
get_logger().error(
f"Runner received unknown package of type {pkg_mode}"
)
else:
pkg_mode = package[0]
if pkg_mode == "train_stopped":
if package[1] == 0:
finalized = True
if not self.running_validation:
get_logger().info(
"Terminating runner after trainer done (no validation)"
)
break
else:
raise Exception(
f"Train worker {package[1] - 1} abnormally terminated"
)
elif pkg_mode == "valid_stopped":
raise Exception(
f"Valid worker {package[1] - 1} abnormally terminated"
)
elif pkg_mode == "test_stopped":
if package[1] == 0:
unfinished_workers -= 1
if unfinished_workers == 0:
get_logger().info(
"Last tester finished. Terminating"
)
finalized = True
break
else:
raise RuntimeError(
f"Test worker {package[1] - 1} abnormally terminated"
)
else:
get_logger().error(
f"Runner received invalid package tuple {package}"
)
except queue.Empty as _:
if all(
p.exitcode is not None
for p in itertools.chain(*self.processes.values())
):
break
except KeyboardInterrupt:
get_logger().info("KeyboardInterrupt. Terminating runner.")
except Exception:
get_logger().error("Encountered Exception. Terminating runner.")
get_logger().exception(traceback.format_exc())
finally:
if finalized:
get_logger().info("Done")
if log_writer is not None:
log_writer.close()
self.close()
return eval_results
def get_checkpoint_files(
self,
checkpoint_path_dir_or_pattern: str,
approx_ckpt_step_interval: Optional[int] = None,
):
if os.path.isdir(checkpoint_path_dir_or_pattern):
# The fragment is a path to a directory, lets use this directory
# as the base dir to search for checkpoints
checkpoint_path_dir_or_pattern = os.path.join(
checkpoint_path_dir_or_pattern, "*.pt"
)
ckpt_paths = glob.glob(checkpoint_path_dir_or_pattern, recursive=True)
if len(ckpt_paths) == 0:
raise FileNotFoundError(
f"Could not find any checkpoints at {os.path.abspath(checkpoint_path_dir_or_pattern)}, is it possible"
f" the path has been mispecified?"
)
step_count_ckpt_pairs = [(self.step_from_checkpoint(p), p) for p in ckpt_paths]
step_count_ckpt_pairs.sort()
ckpts_paths = [p for _, p in step_count_ckpt_pairs]
step_counts = np.array([sc for sc, _ in step_count_ckpt_pairs])
if approx_ckpt_step_interval is not None:
assert (
approx_ckpt_step_interval > 0
), "`approx_ckpt_step_interval` must be >0"
inds_to_eval = set()
for i in range(
math.ceil(step_count_ckpt_pairs[-1][0] / approx_ckpt_step_interval) + 1
):
inds_to_eval.add(
int(np.argmin(np.abs(step_counts - i * approx_ckpt_step_interval)))
)
ckpts_paths = [ckpts_paths[ind] for ind in sorted(list(inds_to_eval))]
return ckpts_paths
@staticmethod
def step_from_checkpoint(ckpt_path: str) -> int:
parts = os.path.basename(ckpt_path).split("__")
for part in parts:
if "steps_" in part:
possible_num = part.split("_")[-1].split(".")[0]
if possible_num.isdigit():
return int(possible_num)
get_logger().warning(
f"The checkpoint {os.path.basename(ckpt_path)} does not follow the checkpoint naming convention"
f" used by AllenAct. As a fall back we must load the checkpoint into memory to find the"
f" training step count, this may increase startup time if the checkpoints are large or many"
f" must be loaded in sequence."
)
ckpt = torch.load(ckpt_path, map_location="cpu")
return ckpt["total_steps"]
def close(self, verbose=True):
if self._is_closed:
return
def logif(s: Union[str, Exception]):
if verbose:
if isinstance(s, str):
get_logger().info(s)
elif isinstance(s, Exception):
get_logger().exception(traceback.format_exc())
else:
raise NotImplementedError()
# First send termination signals
for process_type in self.processes:
for it, process in enumerate(self.processes[process_type]):
if process.is_alive():
logif(f"Terminating {process_type} {it}")
process.terminate()
# Now join processes
for process_type in self.processes:
for it, process in enumerate(self.processes[process_type]):
try:
logif(f"Joining {process_type} {it}")
process.join(1)
logif(f"Closed {process_type} {it}")
except Exception as e:
logif(f"Exception raised when closing {process_type} {it}")
logif(e)
self.processes.clear()
self._is_closed = True
def __del__(self):
self.close(verbose=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close(verbose=True)
| ask4help-main | allenact/algorithms/onpolicy_sync/runner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from collections import OrderedDict
from typing import TypeVar, Generic, Tuple, Optional, Union, Dict, List, Any
import gym
import torch
from gym.spaces.dict import Dict as SpaceDict
import torch.nn as nn
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
DistributionType = TypeVar("DistributionType")
MemoryDimType = Tuple[str, Optional[int]]
MemoryShapeType = Tuple[MemoryDimType, ...]
MemorySpecType = Tuple[MemoryShapeType, torch.dtype]
FullMemorySpecType = Dict[str, MemorySpecType]
ObservationType = Dict[str, Union[torch.Tensor, Dict[str, Any]]]
ActionType = Union[torch.Tensor, OrderedDict, Tuple, int]
class ActorCriticModel(Generic[DistributionType], nn.Module):
"""Abstract class defining a deep (recurrent) actor critic agent.
When defining a new agent, you should subclass this class and implement the abstract methods.
# Attributes
action_space : The space of actions available to the agent. This is of type `gym.spaces.Space`.
observation_space: The observation space expected by the agent. This is of type `gym.spaces.dict`.
"""
def __init__(self, action_space: gym.Space, observation_space: SpaceDict):
"""Initializer.
# Parameters
action_space : The space of actions available to the agent.
observation_space: The observation space expected by the agent.
"""
super().__init__()
self.action_space = action_space
self.observation_space = observation_space
self.memory_spec: Optional[List[Optional[FullMemorySpecType]]] = None
@property
def recurrent_memory_specification(self) -> Optional[FullMemorySpecType]:
"""The memory specification for the `ActorCriticModel`. See docs for
`_recurrent_memory_shape`
# Returns
The memory specification from `_recurrent_memory_shape`.
"""
if self.memory_spec is None:
self.memory_spec = [self._recurrent_memory_specification()]
spec = self.memory_spec[0]
if spec is None:
return None
for key in spec:
dims, _ = spec[key]
dim_names = [d[0] for d in dims]
assert (
"step" not in dim_names
), "`step` is automatically added and cannot be reused"
assert "sampler" in dim_names, "`sampler` dim must be defined"
return self.memory_spec[0]
@abc.abstractmethod
def _recurrent_memory_specification(self) -> Optional[FullMemorySpecType]:
"""Implementation of memory specification for the `ActorCriticModel`.
# Returns
If None, it indicates the model is memory-less.
Otherwise, it is a one-level dictionary (a map) with string keys (memory type identification) and
tuple values (memory type specification). Each specification tuple contains:
1. Memory type named shape, e.g.
`(("layer", 1), ("sampler", None), ("agent", 2), ("hidden", 32))`
for a two-agent GRU memory, where
the `sampler` dimension placeholder *always* precedes the optional `agent` dimension;
the optional `agent` dimension has the number of agents in the model and is *always* the one after
`sampler` if present;
and `layer` and `hidden` correspond to the standard RNN hidden state parametrization.
2. The data type, e.g. `torch.float32`.
The `sampler` dimension placeholder is mandatory for all memories.
For a single-agent ActorCritic model it is often more convenient to skip the agent dimension, e.g.
`(("layer", 1), ("sampler", None), ("hidden", 32))` for a GRU memory.
"""
raise NotImplementedError()
@abc.abstractmethod
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: ActionType,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
"""Transforms input observations (& previous hidden state) into action
probabilities and the state value.
# Parameters
observations : Multi-level map from key strings to tensors of shape [steps, samplers, (agents,) ...] with the
current observations.
memory : `Memory` object with recurrent memory. The shape of each tensor is determined by the corresponding
entry in `_recurrent_memory_specification`.
prev_actions : ActionType with tensors of shape [steps, samplers, ...] with the previous actions.
masks : tensor of shape [steps, samplers, agents, 1] with zeros indicating steps where a new episode/task
starts.
# Returns
A tuple whose first element is an object of class ActorCriticOutput which stores
the agents' probability distribution over possible actions (shape [steps, samplers, ...]),
the agents' value for the state (shape [steps, samplers, ..., 1]), and any extra information needed for
loss computations. The second element is an optional `Memory`, which is only used in models with recurrent
memory.
"""
raise NotImplementedError()
class LinearActorCriticHead(nn.Module):
def __init__(self, input_size: int, num_actions: int):
super().__init__()
self.input_size = input_size
self.num_actions = num_actions
self.actor_and_critic = nn.Linear(input_size, 1 + num_actions)
nn.init.orthogonal_(self.actor_and_critic.weight)
nn.init.constant_(self.actor_and_critic.bias, 0)
def forward(self, x) -> Tuple[CategoricalDistr, torch.Tensor]:
out = self.actor_and_critic(x)
logits = out[..., :-1]
values = out[..., -1:]
# noinspection PyArgumentList
return (
# logits are [step, sampler, ...]
CategoricalDistr(logits=logits),
# values are [step, sampler, flattened]
values.view(*values.shape[:2], -1),
)
class LinearCriticHead(nn.Module):
def __init__(self, input_size: int):
super().__init__()
self.fc = nn.Linear(input_size, 1)
nn.init.orthogonal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
def forward(self, x):
return self.fc(x).view(*x.shape[:2], -1) # [steps, samplers, flattened]
class LinearActorHead(nn.Module):
def __init__(self, num_inputs: int, num_outputs: int):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
nn.init.orthogonal_(self.linear.weight, gain=0.01)
nn.init.constant_(self.linear.bias, 0)
def forward(self, x: torch.FloatTensor): # type: ignore
x = self.linear(x) # type:ignore
# noinspection PyArgumentList
return CategoricalDistr(logits=x) # logits are [step, sampler, ...]
| ask4help-main | allenact/algorithms/onpolicy_sync/policy.py |
ask4help-main | allenact/algorithms/onpolicy_sync/__init__.py |
|
"""Defines the reinforcement learning `OnPolicyRLEngine`."""
import datetime
import itertools
import logging
import os
import random
import time
import traceback
from collections import defaultdict
from multiprocessing.context import BaseContext
from typing import (
Optional,
Any,
Dict,
Union,
List,
Sequence,
cast,
Iterator,
Callable,
Tuple,
)
from functools import partial
import torch
import torch.distributed as dist # type: ignore
import torch.distributions # type: ignore
import torch.multiprocessing as mp # type: ignore
import torch.nn as nn
import torch.optim as optim
from allenact.utils.model_utils import md5_hash_of_state_dict
try:
# noinspection PyProtectedMember
from torch.optim.lr_scheduler import _LRScheduler
except (ImportError, ModuleNotFoundError):
raise ImportError("`_LRScheduler` was not found in `torch.optim.lr_scheduler`")
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.algorithms.onpolicy_sync.storage import RolloutStorage
from allenact.algorithms.onpolicy_sync.vector_sampled_tasks import (
VectorSampledTasks,
COMPLETE_TASK_METRICS_KEY,
SingleProcessVectorSampledTasks,
)
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.distributions import TeacherForcingDistr
from allenact.utils import spaces_utils as su
from allenact.utils.experiment_utils import (
set_seed,
TrainingPipeline,
LoggingPackage,
Builder,
PipelineStage,
set_deterministic_cudnn,
ScalarMeanTracker,
)
from allenact.utils.system import get_logger
from allenact.utils.tensor_utils import (
batch_observations,
to_device_recursively,
detach_recursively,
)
from allenact.utils.viz_utils import VizSuite
TRAIN_MODE_STR = "train"
VALID_MODE_STR = "valid"
TEST_MODE_STR = "test"
class OnPolicyRLEngine(object):
"""The reinforcement learning primary controller.
This `OnPolicyRLEngine` class handles all training, validation, and
testing as well as logging and checkpointing. You are not expected
to instantiate this class yourself, instead you should define an
experiment which will then be used to instantiate an
`OnPolicyRLEngine` and perform any desired tasks.
"""
def __init__(
self,
experiment_name: str,
config: ExperimentConfig,
results_queue: mp.Queue, # to output aggregated results
checkpoints_queue: Optional[
mp.Queue
], # to write/read (trainer/evaluator) ready checkpoints
checkpoints_dir: str,
mode: str = "train",
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
mp_ctx: Optional[BaseContext] = None,
worker_id: int = 0,
num_workers: int = 1,
device: Union[str, torch.device, int] = "cpu",
distributed_ip: str = "127.0.0.1",
distributed_port: int = 0,
deterministic_agents: bool = False,
max_sampler_processes_per_worker: Optional[int] = None,
initial_model_state_dict: Optional[Union[Dict[str, Any], int]] = None,
**kwargs,
):
"""Initializer.
# Parameters
config : The ExperimentConfig defining the experiment to run.
output_dir : Root directory at which checkpoints and logs should be saved.
seed : Seed used to encourage deterministic behavior (it is difficult to ensure
completely deterministic behavior due to CUDA issues and nondeterminism
in environments).
mode : "train", "valid", or "test".
deterministic_cudnn : Whether or not to use deterministic cudnn. If `True` this may lower
training performance this is necessary (but not sufficient) if you desire
deterministic behavior.
extra_tag : An additional label to add to the experiment when saving tensorboard logs.
"""
self.config = config
self.results_queue = results_queue
self.checkpoints_queue = checkpoints_queue
self.mp_ctx = mp_ctx
self.checkpoints_dir = checkpoints_dir
self.worker_id = worker_id
self.num_workers = num_workers
self.device = torch.device("cpu") if device == -1 else torch.device(device) # type: ignore
self.distributed_ip = distributed_ip
self.distributed_port = distributed_port
self.mode = mode.lower().strip()
assert self.mode in [
TRAIN_MODE_STR,
VALID_MODE_STR,
TEST_MODE_STR,
], 'Only "train", "valid", "test" modes supported'
self.deterministic_cudnn = deterministic_cudnn
if self.deterministic_cudnn:
set_deterministic_cudnn()
self.seed = seed
set_seed(self.seed)
self.experiment_name = experiment_name
assert (
max_sampler_processes_per_worker is None
or max_sampler_processes_per_worker >= 1
), "`max_sampler_processes_per_worker` must be either `None` or a positive integer."
self.max_sampler_processes_per_worker = max_sampler_processes_per_worker
machine_params = config.machine_params(self.mode)
self.machine_params: MachineParams
if isinstance(machine_params, MachineParams):
self.machine_params = machine_params
else:
self.machine_params = MachineParams(**machine_params)
self.num_samplers_per_worker = self.machine_params.nprocesses
self.num_samplers = self.num_samplers_per_worker[self.worker_id]
self._vector_tasks: Optional[
Union[VectorSampledTasks, SingleProcessVectorSampledTasks]
] = None
self.sensor_preprocessor_graph = None
self.actor_critic: Optional[ActorCriticModel] = None
if self.num_samplers > 0:
create_model_kwargs = {}
if self.machine_params.sensor_preprocessor_graph is not None:
self.sensor_preprocessor_graph = self.machine_params.sensor_preprocessor_graph.to(
self.device
)
create_model_kwargs[
"sensor_preprocessor_graph"
] = self.sensor_preprocessor_graph
set_seed(self.seed)
self.actor_critic = cast(
ActorCriticModel, self.config.create_model(**create_model_kwargs),
).to(self.device)
if initial_model_state_dict is not None:
if isinstance(initial_model_state_dict, int):
assert (
md5_hash_of_state_dict(self.actor_critic.state_dict())
== initial_model_state_dict
), (
f"Could not reproduce the correct model state dict on worker {self.worker_id} despite seeding."
f" Please ensure that your model's initialization is reproducable when `set_seed(...)`"
f"] has been called with a fixed seed before initialization."
)
else:
self.actor_critic.load_state_dict(state_dict=initial_model_state_dict)
else:
assert mode != TRAIN_MODE_STR or self.num_workers == 1, (
"When training with multiple workers you must pass a,"
" non-`None` value for the `initial_model_state_dict` argument."
)
if get_logger().level == logging.DEBUG:
model_hash = md5_hash_of_state_dict(self.actor_critic.state_dict())
get_logger().debug(
f"WORKER ({self.mode}): {self.worker_id}, model weights hash: {model_hash}"
)
self.is_distributed = False
self.store: Optional[torch.distributed.TCPStore] = None # type:ignore
if self.num_workers > 1:
self.store = torch.distributed.TCPStore( # type:ignore
self.distributed_ip,
self.distributed_port,
self.num_workers,
self.worker_id == 0,
)
cpu_device = self.device == torch.device("cpu") # type:ignore
dist.init_process_group( # type:ignore
backend="gloo" if cpu_device or self.mode == TEST_MODE_STR else "nccl",
store=self.store,
rank=self.worker_id,
world_size=self.num_workers,
# During testing we sometimes found that default timeout was too short
# resulting in the run terminating surprisingly, we increase it here.
timeout=datetime.timedelta(minutes=3000)
if self.mode == TEST_MODE_STR
else dist.default_pg_timeout,
)
self.is_distributed = True
self.deterministic_agents = deterministic_agents
self._is_closing: bool = (
False # Useful for letting the RL runner know if this is closing
)
self._is_closed: bool = False
self.training_pipeline: Optional[TrainingPipeline] = None
# Keeping track of metrics during training/inference
self.single_process_metrics: List = []
@property
def vector_tasks(
self,
) -> Union[VectorSampledTasks, SingleProcessVectorSampledTasks]:
if self._vector_tasks is None and self.num_samplers > 0:
if self.is_distributed:
total_processes = sum(
self.num_samplers_per_worker
) # TODO this will break the fixed seed for multi-device test
else:
total_processes = self.num_samplers
seeds = self.worker_seeds(
total_processes,
initial_seed=self.seed, # do not update the RNG state (creation might happen after seed resetting)
)
# TODO: The `self.max_sampler_processes_per_worker == 1` case below would be
# great to have but it does not play nicely with us wanting to kill things
# using SIGTERM/SIGINT signals. Would be nice to figure out a solution to
# this at some point.
# if self.max_sampler_processes_per_worker == 1:
# # No need to instantiate a new task sampler processes if we're
# # restricted to one sampler process for this worker.
# self._vector_tasks = SingleProcessVectorSampledTasks(
# make_sampler_fn=self.config.make_sampler_fn,
# sampler_fn_args_list=self.get_sampler_fn_args(seeds),
# )
# else:
self._vector_tasks = VectorSampledTasks(
make_sampler_fn=self.config.make_sampler_fn,
sampler_fn_args=self.get_sampler_fn_args(seeds),
multiprocessing_start_method="forkserver"
if self.mp_ctx is None
else None,
mp_ctx=self.mp_ctx,
max_processes=self.max_sampler_processes_per_worker,
)
return self._vector_tasks
@staticmethod
def worker_seeds(nprocesses: int, initial_seed: Optional[int]) -> List[int]:
"""Create a collection of seeds for workers without modifying the RNG
state."""
rstate = None # type:ignore
if initial_seed is not None:
rstate = random.getstate()
random.seed(initial_seed)
seeds = [random.randint(0, (2 ** 31) - 1) for _ in range(nprocesses)]
if initial_seed is not None:
random.setstate(rstate)
return seeds
def get_sampler_fn_args(self, seeds: Optional[List[int]] = None):
sampler_devices = self.machine_params.sampler_devices
if self.mode == TRAIN_MODE_STR:
fn = self.config.train_task_sampler_args
elif self.mode == VALID_MODE_STR:
fn = self.config.valid_task_sampler_args
elif self.mode == TEST_MODE_STR:
fn = self.config.test_task_sampler_args
else:
raise NotImplementedError(
"self.mode must be one of `train`, `valid` or `test`."
)
if self.is_distributed:
total_processes = sum(self.num_samplers_per_worker)
process_offset = sum(self.num_samplers_per_worker[: self.worker_id])
else:
total_processes = self.num_samplers
process_offset = 0
sampler_devices_as_ints: Optional[List[int]] = None
if (
self.is_distributed or self.mode == TEST_MODE_STR
) and self.device.index is not None:
sampler_devices_as_ints = [self.device.index]
elif sampler_devices is not None:
sampler_devices_as_ints = [
-1 if sd.index is None else sd.index for sd in sampler_devices
]
return [
fn(
process_ind=process_offset + it,
total_processes=total_processes,
devices=sampler_devices_as_ints,
seeds=seeds,
)
for it in range(self.num_samplers)
]
def checkpoint_load(
self, ckpt: Union[str, Dict[str, Any]]
) -> Dict[str, Union[Dict[str, Any], torch.Tensor, float, int, str, List]]:
if isinstance(ckpt, str):
get_logger().info(
"{} worker {} loading checkpoint from {}".format(
self.mode, self.worker_id, ckpt
)
)
# Map location CPU is almost always better than mapping to a CUDA device.
ckpt = torch.load(os.path.abspath(ckpt), map_location="cpu")
ckpt = cast(
Dict[str, Union[Dict[str, Any], torch.Tensor, float, int, str, List]], ckpt,
)
self.actor_critic.load_state_dict(ckpt["model_state_dict"]) # type:ignore
return ckpt
# aggregates task metrics currently in queue
def aggregate_task_metrics(
self, logging_pkg: LoggingPackage, num_tasks: int = -1,
) -> LoggingPackage:
if num_tasks > 0:
if len(self.single_process_metrics) != num_tasks:
error_msg = (
"shorter"
if len(self.single_process_metrics) < num_tasks
else "longer"
)
get_logger().error(
f"Metrics out is {error_msg} than expected number of tasks."
" This should only happen if a positive number of `num_tasks` were"
" set during testing but the queue did not contain this number of entries."
" Please file an issue at https://github.com/allenai/allenact/issues."
)
num_empty_tasks_dequeued = 0
for metrics_dict in self.single_process_metrics:
num_empty_tasks_dequeued += not logging_pkg.add_metrics_dict(
single_task_metrics_dict=metrics_dict
)
self.single_process_metrics = []
if num_empty_tasks_dequeued != 0:
get_logger().warning(
"Discarded {} empty task metrics".format(num_empty_tasks_dequeued)
)
return logging_pkg
def _preprocess_observations(self, batched_observations):
if self.sensor_preprocessor_graph is None:
return batched_observations
return self.sensor_preprocessor_graph.get_observations(batched_observations)
def remove_paused(self, observations):
paused, keep, running = [], [], []
for it, obs in enumerate(observations):
if obs is None:
paused.append(it)
else:
keep.append(it)
running.append(obs)
for p in reversed(paused):
self.vector_tasks.pause_at(p)
# Group samplers along new dim:
batch = batch_observations(running, device=self.device)
return len(paused), keep, batch
def initialize_rollouts(self, rollouts, visualizer: Optional[VizSuite] = None):
observations = self.vector_tasks.get_observations()
npaused, keep, batch = self.remove_paused(observations)
if npaused > 0:
rollouts.sampler_select(keep)
rollouts.to(self.device)
rollouts.insert_observations(
self._preprocess_observations(batch) if len(keep) > 0 else batch
)
if visualizer is not None and len(keep) > 0:
visualizer.collect(vector_task=self.vector_tasks, alive=keep)
return npaused
@property
def num_active_samplers(self):
return self.vector_tasks.num_unpaused_tasks
def act(self, rollouts: RolloutStorage, dist_wrapper_class: Optional[type] = None):
with torch.no_grad():
step_observation = rollouts.pick_observation_step(rollouts.step)
memory = rollouts.pick_memory_step(rollouts.step)
prev_actions = rollouts.pick_prev_actions_step(rollouts.step)
actor_critic_output, memory = self.actor_critic(
step_observation,
memory,
prev_actions,
rollouts.masks[rollouts.step : rollouts.step + 1],
)
distr = actor_critic_output.distributions
if dist_wrapper_class is not None:
distr = dist_wrapper_class(distr=distr, obs=step_observation)
actions = distr.sample() if not self.deterministic_agents else distr.mode()
return actions, actor_critic_output, memory, step_observation
@staticmethod
def _active_memory(memory, keep):
return memory.sampler_select(keep) if memory is not None else memory
def probe(self, dones: List[bool], npaused, period=100000):
"""Debugging util. When called from self.collect_rollout_step(...),
calls render for the 0-th task sampler of the 0-th distributed worker
for the first beginning episode spaced at least period steps from the
beginning of the previous one.
For valid, train, it currently renders all episodes for the 0-th task sampler of the
0-th distributed worker. If this is not wanted, it must be hard-coded for now below.
:param dones: dones list from self.collect_rollout_step(...)
:param npaused: number of newly paused tasks returned by self.removed_paused(...)
:param period: minimal spacing in sampled steps between the beginning of episodes to be shown.
"""
sampler_id = 0
done = dones[sampler_id]
if self.mode != TRAIN_MODE_STR:
setattr(
self, "_probe_npaused", getattr(self, "_probe_npaused", 0) + npaused
)
if self._probe_npaused == self.num_samplers: # type:ignore
del self._probe_npaused # type:ignore
return
period = 0
if self.worker_id == 0:
if done:
if period > 0 and (
getattr(self, "_probe_steps", None) is None
or (
self._probe_steps < 0 # type:ignore
and (
self.training_pipeline.total_steps
+ self._probe_steps # type:ignore
)
>= period
)
):
self._probe_steps = self.training_pipeline.total_steps
if period == 0 or (
getattr(self, "_probe_steps", None) is not None
and self._probe_steps >= 0
and ((self.training_pipeline.total_steps - self._probe_steps) < period)
):
if (
period == 0
or not done
or self._probe_steps == self.training_pipeline.total_steps
):
self.vector_tasks.call_at(sampler_id, "render", ["human"])
else:
self._probe_steps = -self._probe_steps
def collect_rollout_step(
self, rollouts: RolloutStorage, visualizer=None, dist_wrapper_class=None
) -> int:
actions, actor_critic_output, memory, _ = self.act(
rollouts=rollouts, dist_wrapper_class=dist_wrapper_class
)
# Flatten actions
flat_actions = su.flatten(self.actor_critic.action_space, actions)
assert len(flat_actions.shape) == 3, (
"Distribution samples must include step and task sampler dimensions [step, sampler, ...]. The simplest way"
"to accomplish this is to pass param tensors (like `logits` in a `CategoricalDistr`) with these dimensions"
"to the Distribution."
)
# Convert flattened actions into list of actions and send them
outputs: List[RLStepResult] = self.vector_tasks.step(
su.action_list(self.actor_critic.action_space, flat_actions)
)
# Save after task completion metrics
for step_result in outputs:
if (
step_result.info is not None
and COMPLETE_TASK_METRICS_KEY in step_result.info
):
self.single_process_metrics.append(
step_result.info[COMPLETE_TASK_METRICS_KEY]
)
del step_result.info[COMPLETE_TASK_METRICS_KEY]
rewards: Union[List, torch.Tensor]
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
rewards = torch.tensor(
rewards, dtype=torch.float, device=self.device, # type:ignore
)
# We want rewards to have dimensions [sampler, reward]
if len(rewards.shape) == 1:
# Rewards are of shape [sampler,]
rewards = rewards.unsqueeze(-1)
elif len(rewards.shape) > 1:
raise NotImplementedError()
# If done then clean the history of observations.
masks = (
1.0
- torch.tensor(
dones, dtype=torch.float32, device=self.device, # type:ignore
)
).view(
-1, 1
) # [sampler, 1]
npaused, keep, batch = self.remove_paused(observations)
# TODO self.probe(...) can be useful for debugging (we might want to control it from main?)
# self.probe(dones, npaused)
if npaused > 0:
rollouts.sampler_select(keep)
rollouts.insert(
observations=self._preprocess_observations(batch)
if len(keep) > 0
else batch,
memory=self._active_memory(memory, keep),
actions=flat_actions[0, keep],
action_log_probs=actor_critic_output.distributions.log_prob(actions)[
0, keep
],
value_preds=actor_critic_output.values[0, keep],
rewards=rewards[keep],
masks=masks[keep],
)
# TODO we always miss tensors for the last action in the last episode of each worker
if visualizer is not None:
if len(keep) > 0:
visualizer.collect(
rollout=rollouts,
vector_task=self.vector_tasks,
alive=keep,
actor_critic=actor_critic_output,
)
else:
visualizer.collect(actor_critic=actor_critic_output)
return npaused
def close(self, verbose=True):
self._is_closing = True
if "_is_closed" in self.__dict__ and self._is_closed:
return
def logif(s: Union[str, Exception]):
if verbose:
if isinstance(s, str):
get_logger().info(s)
elif isinstance(s, Exception):
get_logger().error(traceback.format_exc())
else:
raise NotImplementedError()
if "_vector_tasks" in self.__dict__ and self._vector_tasks is not None:
try:
logif(
"{} worker {} Closing OnPolicyRLEngine.vector_tasks.".format(
self.mode, self.worker_id
)
)
self._vector_tasks.close()
logif("{} worker {} Closed.".format(self.mode, self.worker_id))
except Exception as e:
logif(
"{} worker {} Exception raised when closing OnPolicyRLEngine.vector_tasks:".format(
self.mode, self.worker_id
)
)
logif(e)
self._is_closed = True
self._is_closing = False
def __del__(self):
self.close(verbose=False)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close(verbose=False)
class OnPolicyTrainer(OnPolicyRLEngine):
def __init__(
self,
experiment_name: str,
config: ExperimentConfig,
results_queue: mp.Queue,
checkpoints_queue: Optional[mp.Queue],
checkpoints_dir: str = "",
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
mp_ctx: Optional[BaseContext] = None,
worker_id: int = 0,
num_workers: int = 1,
device: Union[str, torch.device, int] = "cpu",
distributed_ip: str = "127.0.0.1",
distributed_port: int = 0,
deterministic_agents: bool = False,
distributed_preemption_threshold: float = 0.7,
max_sampler_processes_per_worker: Optional[int] = None,
save_ckpt_after_every_pipeline_stage: bool = True,
first_local_worker_id: int = 0,
**kwargs,
):
kwargs["mode"] = TRAIN_MODE_STR
super().__init__(
experiment_name=experiment_name,
config=config,
results_queue=results_queue,
checkpoints_queue=checkpoints_queue,
checkpoints_dir=checkpoints_dir,
seed=seed,
deterministic_cudnn=deterministic_cudnn,
mp_ctx=mp_ctx,
worker_id=worker_id,
num_workers=num_workers,
device=device,
distributed_ip=distributed_ip,
distributed_port=distributed_port,
deterministic_agents=deterministic_agents,
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
**kwargs,
)
self.save_ckpt_after_every_pipeline_stage = save_ckpt_after_every_pipeline_stage
self.actor_critic.train()
self.training_pipeline: TrainingPipeline = config.training_pipeline()
if self.num_workers != 1:
# Ensure that we're only using early stopping criterions in the non-distributed setting.
if any(
stage.early_stopping_criterion is not None
for stage in self.training_pipeline.pipeline_stages
):
raise NotImplementedError(
"Early stopping criterions are currently only allowed when using a single training worker, i.e."
" no distributed (multi-GPU) training. If this is a feature you'd like please create an issue"
" at https://github.com/allenai/allenact/issues or (even better) create a pull request with this "
" feature and we'll be happy to review it."
)
self.optimizer: optim.optimizer.Optimizer = (
self.training_pipeline.optimizer_builder(
params=[p for p in self.actor_critic.parameters() if p.requires_grad]
)
)
# noinspection PyProtectedMember
self.lr_scheduler: Optional[optim.lr_scheduler._LRScheduler] = None
if self.training_pipeline.lr_scheduler_builder is not None:
self.lr_scheduler = self.training_pipeline.lr_scheduler_builder(
optimizer=self.optimizer
)
if self.is_distributed:
# Tracks how many workers have finished their rollout
self.num_workers_done = torch.distributed.PrefixStore( # type:ignore
"num_workers_done", self.store
)
# Tracks the number of steps taken by each worker in current rollout
self.num_workers_steps = torch.distributed.PrefixStore( # type:ignore
"num_workers_steps", self.store
)
self.distributed_preemption_threshold = distributed_preemption_threshold
# Flag for finished worker in current epoch
self.offpolicy_epoch_done = torch.distributed.PrefixStore( # type:ignore
"offpolicy_epoch_done", self.store
)
else:
self.num_workers_done = None
self.num_workers_steps = None
self.distributed_preemption_threshold = 1.0
self.offpolicy_epoch_done = None
# Keeping track of training state
self.tracking_info: Dict[str, List] = defaultdict(lambda: [])
self.former_steps: Optional[int] = None
self.last_log: Optional[int] = None
self.last_save: Optional[int] = None
# The `self._last_aggregated_train_task_metrics` attribute defined
# below is used for early stopping criterion computations
self._last_aggregated_train_task_metrics: ScalarMeanTracker = (
ScalarMeanTracker()
)
self.first_local_worker_id = first_local_worker_id
def advance_seed(
self, seed: Optional[int], return_same_seed_per_worker=False
) -> Optional[int]:
if seed is None:
return seed
seed = (seed ^ (self.training_pipeline.total_steps + 1)) % (
2 ** 31 - 1
) # same seed for all workers
if (not return_same_seed_per_worker) and (
self.mode == TRAIN_MODE_STR or self.mode == TEST_MODE_STR
):
return self.worker_seeds(self.num_workers, seed)[
self.worker_id
] # doesn't modify the current rng state
else:
return self.worker_seeds(1, seed)[0] # doesn't modify the current rng state
def deterministic_seeds(self) -> None:
if self.seed is not None:
set_seed(self.advance_seed(self.seed)) # known state for all workers
seeds = self.worker_seeds(
self.num_samplers, None
) # use latest seed for workers and update rng state
self.vector_tasks.set_seeds(seeds)
def checkpoint_save(self, pipeline_stage_index: Optional[int] = None) -> str:
model_path = os.path.join(
self.checkpoints_dir,
"exp_{}__stage_{:02d}__steps_{:012d}.pt".format(
self.experiment_name,
self.training_pipeline.current_stage_index
if pipeline_stage_index is None
else pipeline_stage_index,
self.training_pipeline.total_steps,
),
)
save_dict = {
"model_state_dict": self.actor_critic.state_dict(), # type:ignore
"total_steps": self.training_pipeline.total_steps, # Total steps including current stage
"optimizer_state_dict": self.optimizer.state_dict(), # type: ignore
"training_pipeline_state_dict": self.training_pipeline.state_dict(),
"trainer_seed": self.seed,
}
if self.lr_scheduler is not None:
save_dict["scheduler_state"] = cast(
_LRScheduler, self.lr_scheduler
).state_dict()
torch.save(save_dict, model_path)
return model_path
def checkpoint_load(
self, ckpt: Union[str, Dict[str, Any]], restart_pipeline: bool = False
) -> Dict[str, Union[Dict[str, Any], torch.Tensor, float, int, str, List]]:
ckpt = super().checkpoint_load(ckpt)
self.training_pipeline.load_state_dict(
cast(Dict[str, Any], ckpt["training_pipeline_state_dict"])
)
if restart_pipeline:
self.training_pipeline.restart_pipeline()
else:
self.seed = cast(int, ckpt["trainer_seed"])
self.optimizer.load_state_dict(ckpt["optimizer_state_dict"]) # type: ignore
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(ckpt["scheduler_state"]) # type: ignore
self.deterministic_seeds()
return ckpt
def _get_loss(self, loss_name) -> AbstractActorCriticLoss:
assert (
loss_name in self.training_pipeline.named_losses
), "undefined referenced loss {}".format(loss_name)
if isinstance(self.training_pipeline.named_losses[loss_name], Builder):
return cast(
Builder[AbstractActorCriticLoss],
self.training_pipeline.named_losses[loss_name],
)()
else:
return cast(
AbstractActorCriticLoss, self.training_pipeline.named_losses[loss_name]
)
def _load_losses(self, stage: PipelineStage):
stage_losses: Dict[str, AbstractActorCriticLoss] = {}
for loss_name in stage.loss_names:
stage_losses[loss_name] = self._get_loss(loss_name)
loss_weights_list = (
stage.loss_weights
if stage.loss_weights is not None
else [1.0] * len(stage.loss_names)
)
stage_loss_weights = {
name: weight for name, weight in zip(stage.loss_names, loss_weights_list)
}
return stage_losses, stage_loss_weights
def _stage_value(self, stage: PipelineStage, field: str, allow_none: bool = False):
if hasattr(stage, field) and getattr(stage, field) is not None:
return getattr(stage, field)
if (
hasattr(self.training_pipeline, field)
and getattr(self.training_pipeline, field) is not None
):
return getattr(self.training_pipeline, field)
if (
hasattr(self.machine_params, field)
and getattr(self.machine_params, field) is not None
):
return getattr(self.machine_params, field)
if allow_none:
return None
else:
raise RuntimeError("missing value for {}".format(field))
@property
def step_count(self):
return self.training_pipeline.current_stage.steps_taken_in_stage
@step_count.setter
def step_count(self, val: int):
self.training_pipeline.current_stage.steps_taken_in_stage = val
@property
def log_interval(self):
return self.training_pipeline.metric_accumulate_interval
@property
def approx_steps(self):
if self.is_distributed:
# the actual number of steps gets synchronized after each rollout
return (
self.step_count - self.former_steps
) * self.num_workers + self.former_steps
else:
return self.step_count # this is actually accurate
def act(self, rollouts: RolloutStorage, dist_wrapper_class: Optional[type] = None):
if self.training_pipeline.current_stage.teacher_forcing is not None:
assert dist_wrapper_class is None
dist_wrapper_class = partial(
TeacherForcingDistr,
action_space=self.actor_critic.action_space,
num_active_samplers=self.num_active_samplers,
approx_steps=self.approx_steps,
teacher_forcing=self.training_pipeline.current_stage.teacher_forcing,
tracking_info=self.tracking_info,
)
actions, actor_critic_output, memory, step_observation = super().act(
rollouts=rollouts, dist_wrapper_class=dist_wrapper_class
)
self.step_count += self.num_active_samplers
return actions, actor_critic_output, memory, step_observation
def advantage_stats(
self, advantages: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Computes the mean and variances of advantages (possibly over multiple workers).
For multiple workers, this method is equivalent to first collecting all versions of
advantages and then computing the mean and variance locally over that.
# Parameters
advantages: Tensors to compute mean and variance over. Assumed to be solely the
worker's local copy of this tensor, the resultant mean and variance will be computed
as though _all_ workers' versions of this tensor were concatenated together in
distributed training.
"""
# Step count has already been updated with the steps from all workers
global_rollout_steps = self.step_count - self.former_steps
if self.is_distributed:
summed_advantages = advantages.sum()
dist.all_reduce(summed_advantages)
mean = summed_advantages / global_rollout_steps
summed_squares = (advantages - mean).pow(2).sum()
dist.all_reduce(summed_squares)
std = (summed_squares / (global_rollout_steps - 1)).sqrt()
else:
mean, std = advantages.mean(), advantages.std()
return mean, std
def distributed_weighted_sum(
self,
to_share: Union[torch.Tensor, float, int],
weight: Union[torch.Tensor, float, int],
):
"""Weighted sum of scalar across distributed workers."""
if self.is_distributed:
aggregate = torch.tensor(to_share * weight).to(self.device)
dist.all_reduce(aggregate)
return aggregate.item()
else:
if abs(1 - weight) > 1e-5:
get_logger().warning(
f"Scaling non-distributed value with weight {weight}"
)
return torch.tensor(to_share * weight).item()
def update(self, rollouts: RolloutStorage):
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
adv_mean, adv_std = self.advantage_stats(advantages)
for e in range(self.training_pipeline.current_stage.update_repeats):
data_generator = rollouts.recurrent_generator(
advantages=advantages,
adv_mean=adv_mean,
adv_std=adv_std,
num_mini_batch=self.training_pipeline.current_stage.num_mini_batch,
)
for bit, batch in enumerate(data_generator):
# masks is always [steps, samplers, 1]:
num_rollout_steps, num_samplers = batch["masks"].shape[:2]
bsize = int(num_rollout_steps * num_samplers)
aggregate_bsize = self.distributed_weighted_sum(bsize, 1)
actor_critic_output, memory = self.actor_critic(
observations=batch["observations"],
memory=batch["memory"],
prev_actions=batch["prev_actions"],
masks=batch["masks"],
)
info: Dict[str, float] = {}
current_pipeline_stage = self.training_pipeline.current_stage
total_loss: Optional[torch.Tensor] = None
for loss_name in self.training_pipeline.current_stage_losses:
loss, loss_weight, loss_update_repeats = (
self.training_pipeline.current_stage_losses[loss_name],
current_pipeline_stage.named_loss_weights[loss_name],
current_pipeline_stage.named_loss_update_repeats[loss_name],
)
if loss_update_repeats is not None and e >= loss_update_repeats:
# Skip losses which should not be repeated more than `loss_update_repeats` times.
continue
loss_return = loss.loss(
step_count=self.step_count,
batch=batch,
actor_critic_output=actor_critic_output,
)
per_epoch_info = {}
if len(loss_return) == 2:
current_loss, current_info = loss_return
elif len(loss_return) == 3:
current_loss, current_info, per_epoch_info = loss_return
else:
raise NotImplementedError
if total_loss is None:
total_loss = loss_weight * current_loss
else:
total_loss = total_loss + loss_weight * current_loss
for key, value in current_info.items():
info[f"{loss_name}/{key}"] = self.distributed_weighted_sum(
value, bsize / aggregate_bsize
)
for key, value in per_epoch_info.items():
value = self.distributed_weighted_sum(
value, bsize / aggregate_bsize
)
if self.training_pipeline.current_stage.update_repeats > 1:
info[f"{loss_name}/{key}_epoch{e:02d}"] = value
info[f"{loss_name}/{key}_combined"] = value
else:
info[f"{loss_name}/{key}"] = value
assert (
total_loss is not None
), "No losses specified for training in stage {}".format(
self.training_pipeline.current_stage_index
)
total_loss_scalar = total_loss.item()
if self.is_distributed:
info["total_loss"] = self.distributed_weighted_sum(
total_loss_scalar, bsize / aggregate_bsize
)
info["total_loss"] = total_loss_scalar
self.tracking_info["losses"].append(("losses", info, bsize))
to_track = {
"lr": self.optimizer.param_groups[0]["lr"],
"rollout_num_mini_batch": self.training_pipeline.current_stage.num_mini_batch,
"rollout_epochs": self.training_pipeline.current_stage.update_repeats,
"global_batch_size": aggregate_bsize,
"worker_batch_size": bsize,
}
for k, v in to_track.items():
self.tracking_info[k].append((k, {k: v}, bsize))
self.backprop_step(
total_loss=total_loss,
local_to_global_batch_size_ratio=bsize / aggregate_bsize,
)
# # TODO Unit test to ensure correctness of distributed infrastructure
# state_dict = self.actor_critic.state_dict()
# keys = sorted(list(state_dict.keys()))
# get_logger().debug(
# "worker {} param 0 {} param -1 {}".format(
# self.worker_id,
# state_dict[keys[0]].flatten()[0],
# state_dict[keys[-1]].flatten()[-1],
# )
# )
def make_offpolicy_iterator(
self, data_iterator_builder: Callable[..., Iterator],
):
stage = self.training_pipeline.current_stage
if self.num_workers == 1:
rollouts_per_worker: Sequence[int] = [self.num_samplers]
else:
rollouts_per_worker = self.num_samplers_per_worker
# common seed for all workers (in case we wish to shuffle the full dataset before iterating on one partition)
seed = self.advance_seed(self.seed, return_same_seed_per_worker=True)
kwargs = stage.offpolicy_component.data_iterator_kwargs_generator(
self.worker_id, rollouts_per_worker, seed
)
offpolicy_iterator = data_iterator_builder(**kwargs)
stage.offpolicy_memory.clear()
if stage.offpolicy_epochs is None:
stage.offpolicy_epochs = 0
else:
stage.offpolicy_epochs += 1
if self.is_distributed:
self.offpolicy_epoch_done.set("offpolicy_epoch_done", str(0))
dist.barrier() # sync
return offpolicy_iterator
def backprop_step(
self, total_loss: torch.Tensor, local_to_global_batch_size_ratio: float = 1.0,
):
self.optimizer.zero_grad() # type: ignore
if isinstance(total_loss, torch.Tensor):
total_loss.backward()
if self.is_distributed:
# From https://github.com/pytorch/pytorch/issues/43135
reductions, all_params = [], []
for p in self.actor_critic.parameters():
# you can also organize grads to larger buckets to make all_reduce more efficient
if p.requires_grad:
if p.grad is None:
p.grad = torch.zeros_like(p.data)
else: # local_global_batch_size_tuple is not None, since we're distributed:
p.grad = p.grad * local_to_global_batch_size_ratio
reductions.append(
dist.all_reduce(p.grad, async_op=True,) # sum
) # synchronize
all_params.append(p)
for reduction, p in zip(reductions, all_params):
reduction.wait()
nn.utils.clip_grad_norm_(
self.actor_critic.parameters(),
self.training_pipeline.current_stage.max_grad_norm, # type: ignore
)
self.optimizer.step() # type: ignore
def offpolicy_update(
self,
updates: int,
data_iterator: Optional[Iterator],
data_iterator_builder: Callable[..., Iterator],
) -> Iterator:
stage = self.training_pipeline.current_stage
current_steps = 0
if self.is_distributed:
self.num_workers_steps.set("steps", str(0))
dist.barrier()
for e in range(updates):
if data_iterator is None:
data_iterator = self.make_offpolicy_iterator(data_iterator_builder)
try:
batch = next(data_iterator)
except StopIteration:
batch = None
if self.is_distributed:
self.offpolicy_epoch_done.add("offpolicy_epoch_done", 1)
if self.is_distributed:
dist.barrier() # sync after every batch!
if int(self.offpolicy_epoch_done.get("offpolicy_epoch_done")) != 0:
batch = None
if batch is None:
data_iterator = self.make_offpolicy_iterator(data_iterator_builder)
# TODO: (batch, bsize) from iterator instead of waiting for the loss?
batch = next(data_iterator)
batch = to_device_recursively(batch, device=self.device, inplace=True)
info: Dict[str, float] = dict()
info["lr"] = self.optimizer.param_groups[0]["lr"] # type: ignore
bsize: Optional[int] = None
total_loss: Optional[torch.Tensor] = None
for loss_name in stage.offpolicy_named_loss_weights:
loss, loss_weight = (
self.training_pipeline.current_stage_offpolicy_losses[loss_name],
stage.offpolicy_named_loss_weights[loss_name],
)
current_loss, current_info, stage.offpolicy_memory, bsize = loss.loss(
model=self.actor_critic,
batch=batch,
step_count=self.step_count,
memory=stage.offpolicy_memory,
)
if total_loss is None:
total_loss = loss_weight * current_loss
else:
total_loss = total_loss + loss_weight * current_loss
for key in current_info:
info["offpolicy/" + loss_name + "/" + key] = current_info[key]
assert (
total_loss is not None
), "No offline losses specified for training in stage {}".format(
self.training_pipeline.current_stage_index
)
info["offpolicy/total_loss"] = total_loss.item()
info["offpolicy/epoch"] = stage.offpolicy_epochs
self.tracking_info["offpolicy_update"].append(
("offpolicy_update_package", info, bsize)
)
aggregate_bsize = self.distributed_weighted_sum(bsize, 1)
self.backprop_step(
total_loss=total_loss,
local_to_global_batch_size_ratio=bsize / aggregate_bsize,
)
stage.offpolicy_memory = detach_recursively(
input=stage.offpolicy_memory, inplace=True
)
if self.is_distributed:
self.num_workers_steps.add("steps", bsize) # counts samplers x steps
else:
current_steps += bsize
if self.is_distributed:
dist.barrier()
stage.offpolicy_steps_taken_in_stage += int(
self.num_workers_steps.get("steps")
)
dist.barrier()
else:
stage.offpolicy_steps_taken_in_stage += current_steps
return data_iterator
def aggregate_and_send_logging_package(self, tracking_info: Dict[str, List]):
logging_pkg = LoggingPackage(
mode=self.mode,
training_steps=self.training_pipeline.total_steps,
off_policy_steps=self.training_pipeline.total_offpolicy_steps,
pipeline_stage=self.training_pipeline.current_stage_index,
)
self.aggregate_task_metrics(logging_pkg=logging_pkg)
if self.mode == TRAIN_MODE_STR:
# Technically self.mode should always be "train" here (as this is the training engine),
# this conditional is defensive
self._last_aggregated_train_task_metrics.add_scalars(
scalars=logging_pkg.metrics_tracker.means(),
n=logging_pkg.metrics_tracker.counts(),
)
for (info_type, train_info_dict, n) in itertools.chain(*tracking_info.values()):
if n < 0:
get_logger().warning(
f"Obtained a train_info_dict with {n} elements."
f" Full info: ({info_type}, {train_info_dict}, {n})."
)
elif info_type == "losses":
logging_pkg.add_train_info_dict(
train_info_dict={
f"losses/{k}": v for k, v in train_info_dict.items()
},
n=n,
)
else:
logging_pkg.add_train_info_dict(train_info_dict=train_info_dict, n=n)
self.results_queue.put(logging_pkg)
def _save_checkpoint_then_send_checkpoint_for_validation_and_update_last_save_counter(
self, pipeline_stage_index: Optional[int] = None
):
self.deterministic_seeds()
if self.worker_id == self.first_local_worker_id:
model_path = self.checkpoint_save(pipeline_stage_index=pipeline_stage_index)
if self.checkpoints_queue is not None:
self.checkpoints_queue.put(("eval", model_path))
self.last_save = self.training_pipeline.total_steps
def run_pipeline(self, rollouts: RolloutStorage):
self.initialize_rollouts(rollouts)
self.tracking_info.clear()
self.last_log = self.training_pipeline.total_steps
if self.last_save is None:
self.last_save = self.training_pipeline.total_steps
offpolicy_data_iterator: Optional[Iterator] = None
should_save_checkpoints = (
self.checkpoints_dir != ""
and self.training_pipeline.current_stage.save_interval is not None
and self.training_pipeline.current_stage.save_interval > 0
)
already_saved_checkpoint = False
while True:
pipeline_stage_changed = self.training_pipeline.before_rollout(
train_metrics=self._last_aggregated_train_task_metrics
)
self._last_aggregated_train_task_metrics.reset()
# Here we handle saving a checkpoint after a pipeline stage ends. We
# do this when
# (1) after every pipeline stage if the `self.save_ckpt_after_every_pipeline_stage`
# boolean is True,
# (2) we have reached the end of ALL training (i.e. all stages are complete)
# We handle saving every `save_interval` steps
training_is_complete = self.training_pipeline.current_stage is None
if (
should_save_checkpoints
and ( # Might happen if the `save_interval` was hit just previously, see below
not already_saved_checkpoint
)
and pipeline_stage_changed
and ( # Don't save at start
self.training_pipeline.current_stage_index != 0
)
and (self.save_ckpt_after_every_pipeline_stage or training_is_complete)
):
self._save_checkpoint_then_send_checkpoint_for_validation_and_update_last_save_counter(
pipeline_stage_index=self.training_pipeline.current_stage_index - 1
if not training_is_complete
else len(self.training_pipeline.pipeline_stages) - 1
)
already_saved_checkpoint = False
if training_is_complete:
break
if self.is_distributed:
self.num_workers_done.set("done", str(0))
self.num_workers_steps.set("steps", str(0))
# Ensure all workers are done before incrementing num_workers_{steps, done}
dist.barrier()
self.former_steps = self.step_count
for step in range(self.training_pipeline.current_stage.num_steps):
num_paused = self.collect_rollout_step(rollouts=rollouts)
# Make sure we've collected the entire set of tensors (including memory)
if rollouts.num_steps != self.training_pipeline.current_stage.num_steps:
rollouts.unnarrow(unnarrow_to_maximum_size=True)
assert rollouts.num_steps == self.training_pipeline.num_steps
rollouts.narrow(self.training_pipeline.current_stage.num_steps)
if num_paused > 0:
raise NotImplementedError(
"When trying to get a new task from a task sampler (using the `.next_task()` method)"
" the task sampler returned `None`. This is not currently supported during training"
" (and almost certainly a bug in the implementation of the task sampler or in the "
" initialization of the task sampler for training)."
)
if self.is_distributed:
# Preempt stragglers
# Each worker will stop collecting steps for the current rollout whenever a
# 100 * distributed_preemption_threshold percentage of workers are finished collecting their
# rollout steps and we have collected at least 25% but less than 90% of the steps.
num_done = int(self.num_workers_done.get("done"))
if (
num_done
> self.distributed_preemption_threshold * self.num_workers
and 0.25 * self.training_pipeline.current_stage.num_steps
<= step
< 0.9 * self.training_pipeline.current_stage.num_steps
):
get_logger().debug(
"{} worker {} narrowed rollouts after {} steps (out of {}) with {} workers done".format(
self.mode, self.worker_id, rollouts.step, step, num_done
)
)
rollouts.narrow()
break
with torch.no_grad():
actor_critic_output, _ = self.actor_critic(
observations=rollouts.pick_observation_step(-1),
memory=rollouts.pick_memory_step(-1),
prev_actions=su.unflatten(
self.actor_critic.action_space, rollouts.prev_actions[-1:]
),
masks=rollouts.masks[-1:],
)
if self.is_distributed:
# Mark that a worker is done collecting experience
self.num_workers_done.add("done", 1)
self.num_workers_steps.add("steps", self.step_count - self.former_steps)
# Ensure all workers are done before updating step counter
dist.barrier()
ndone = int(self.num_workers_done.get("done"))
assert (
ndone == self.num_workers
), "# workers done {} != # workers {}".format(ndone, self.num_workers)
# get the actual step_count
self.step_count = (
int(self.num_workers_steps.get("steps")) + self.former_steps
)
rollouts.compute_returns(
next_value=actor_critic_output.values.detach(),
use_gae=self.training_pipeline.current_stage.use_gae,
gamma=self.training_pipeline.current_stage.gamma,
tau=self.training_pipeline.current_stage.gae_lambda,
)
self.update(rollouts=rollouts) # here we synchronize
self.training_pipeline.rollout_count += 1
rollouts.after_update()
if self.training_pipeline.current_stage.offpolicy_component is not None:
offpolicy_component = (
self.training_pipeline.current_stage.offpolicy_component
)
offpolicy_data_iterator = self.offpolicy_update(
updates=offpolicy_component.updates,
data_iterator=offpolicy_data_iterator,
data_iterator_builder=offpolicy_component.data_iterator_builder,
)
if self.lr_scheduler is not None:
self.lr_scheduler.step(epoch=self.training_pipeline.total_steps)
if (
self.training_pipeline.total_steps - self.last_log >= self.log_interval
or self.training_pipeline.current_stage.is_complete
):
self.aggregate_and_send_logging_package(
tracking_info=self.tracking_info
)
self.tracking_info.clear()
self.last_log = self.training_pipeline.total_steps
# Here we handle saving a checkpoint every `save_interval` steps, saving after
# a pipeline stage completes is controlled above
if should_save_checkpoints and (
self.training_pipeline.total_steps - self.last_save
>= self.training_pipeline.current_stage.save_interval
):
self._save_checkpoint_then_send_checkpoint_for_validation_and_update_last_save_counter()
already_saved_checkpoint = True
if (
self.training_pipeline.current_stage.advance_scene_rollout_period
is not None
) and (
self.training_pipeline.rollout_count
% self.training_pipeline.current_stage.advance_scene_rollout_period
== 0
):
get_logger().info(
"{} worker {} Force advance tasks with {} rollouts".format(
self.mode, self.worker_id, self.training_pipeline.rollout_count
)
)
self.vector_tasks.next_task(force_advance_scene=True)
self.initialize_rollouts(rollouts)
def train(
self, checkpoint_file_name: Optional[str] = None, restart_pipeline: bool = False
):
assert (
self.mode == TRAIN_MODE_STR
), "train only to be called from a train instance"
training_completed_successfully = False
try:
if checkpoint_file_name is not None:
self.checkpoint_load(checkpoint_file_name, restart_pipeline)
self.run_pipeline(
RolloutStorage(
num_steps=self.training_pipeline.num_steps,
num_samplers=self.num_samplers,
actor_critic=self.actor_critic
if isinstance(self.actor_critic, ActorCriticModel)
else cast(ActorCriticModel, self.actor_critic.module),
)
)
training_completed_successfully = True
except KeyboardInterrupt:
get_logger().info(
"KeyboardInterrupt. Terminating {} worker {}".format(
self.mode, self.worker_id
)
)
except Exception:
get_logger().error(
"Encountered Exception. Terminating {} worker {}".format(
self.mode, self.worker_id
)
)
get_logger().exception(traceback.format_exc())
finally:
if training_completed_successfully:
if self.worker_id == 0:
self.results_queue.put(("train_stopped", 0))
get_logger().info(
"{} worker {} COMPLETE".format(self.mode, self.worker_id)
)
else:
self.results_queue.put(("train_stopped", 1 + self.worker_id))
self.close()
class OnPolicyInference(OnPolicyRLEngine):
def __init__(
self,
config: ExperimentConfig,
results_queue: mp.Queue, # to output aggregated results
checkpoints_queue: mp.Queue, # to write/read (trainer/evaluator) ready checkpoints
checkpoints_dir: str = "",
mode: str = "valid", # or "test"
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
mp_ctx: Optional[BaseContext] = None,
device: Union[str, torch.device, int] = "cpu",
deterministic_agents: bool = False,
worker_id: int = 0,
num_workers: int = 1,
distributed_port: int = 0,
enforce_expert: bool = False,
**kwargs,
):
super().__init__(
experiment_name="",
config=config,
results_queue=results_queue,
checkpoints_queue=checkpoints_queue,
checkpoints_dir=checkpoints_dir,
mode=mode,
seed=seed,
deterministic_cudnn=deterministic_cudnn,
mp_ctx=mp_ctx,
deterministic_agents=deterministic_agents,
device=device,
worker_id=worker_id,
num_workers=num_workers,
distributed_port=distributed_port,
**kwargs,
)
self.enforce_expert = enforce_expert
def run_eval(
self,
checkpoint_file_path: str,
rollout_steps: int = 100,
visualizer: Optional[VizSuite] = None,
update_secs: float = 20.0,
verbose: bool = False,
) -> LoggingPackage:
assert self.actor_critic is not None, "called run_eval with no actor_critic"
ckpt = self.checkpoint_load(checkpoint_file_path)
total_steps = cast(int, ckpt["total_steps"])
rollouts = RolloutStorage(
num_steps=rollout_steps,
num_samplers=self.num_samplers,
actor_critic=cast(ActorCriticModel, self.actor_critic),
)
if visualizer is not None:
assert visualizer.empty()
num_paused = self.initialize_rollouts(rollouts, visualizer=visualizer)
assert num_paused == 0, f"{num_paused} tasks paused when initializing eval"
num_tasks = sum(
self.vector_tasks.command(
"sampler_attr", ["length"] * self.num_active_samplers
)
) + ( # We need to add this as the first tasks have already been sampled
self.num_active_samplers
)
# get_logger().debug(
# "worker {} number of tasks {}".format(self.worker_id, num_tasks)
# )
steps = 0
self.actor_critic.eval()
last_time: float = time.time()
init_time: float = last_time
frames: int = 0
if verbose:
get_logger().info(
f"[{self.mode}] worker {self.worker_id}: running evaluation on {num_tasks} tasks"
f" for ckpt {checkpoint_file_path}"
)
if self.enforce_expert:
dist_wrapper_class = partial(
TeacherForcingDistr,
action_space=self.actor_critic.action_space,
num_active_samplers=None,
approx_steps=None,
teacher_forcing=None,
tracking_info=None,
always_enforce=True,
)
else:
dist_wrapper_class = None
logging_pkg = LoggingPackage(mode=self.mode, training_steps=total_steps)
while self.num_active_samplers > 0:
frames += self.num_active_samplers
self.collect_rollout_step(
rollouts, visualizer=visualizer, dist_wrapper_class=dist_wrapper_class
)
steps += 1
if steps % rollout_steps == 0:
rollouts.after_update()
cur_time = time.time()
if self.num_active_samplers == 0 or cur_time - last_time >= update_secs:
self.aggregate_task_metrics(logging_pkg=logging_pkg)
if verbose:
npending: int
lengths: List[int]
if self.num_active_samplers > 0:
lengths = self.vector_tasks.command(
"sampler_attr", ["length"] * self.num_active_samplers,
)
npending = sum(lengths)
else:
lengths = []
npending = 0
est_time_to_complete = (
"{:.2f}".format(
(
(cur_time - init_time)
* (npending / (num_tasks - npending))
/ 60
)
)
if npending != num_tasks
else "???"
)
get_logger().info(
f"[{self.mode}] worker {self.worker_id}:"
f" for ckpt {checkpoint_file_path}"
f" {frames / (cur_time - init_time):.1f} fps,"
f" {npending}/{num_tasks} tasks pending ({lengths})."
f" ~{est_time_to_complete} min. to complete."
)
if logging_pkg.num_non_empty_metrics_dicts_added != 0:
get_logger().info(
", ".join(
[
f"[{self.mode}] worker {self.worker_id}:"
f" num_{self.mode}_tasks_complete {logging_pkg.num_non_empty_metrics_dicts_added}",
*[
f"{k} {v:.3g}"
for k, v in logging_pkg.metrics_tracker.means().items()
],
]
)
)
last_time = cur_time
get_logger().info(
"worker {}: {} complete, all task samplers paused".format(
self.mode, self.worker_id
)
)
self.vector_tasks.resume_all()
self.vector_tasks.set_seeds(self.worker_seeds(self.num_samplers, self.seed))
self.vector_tasks.reset_all()
self.aggregate_task_metrics(logging_pkg=logging_pkg)
logging_pkg.viz_data = (
visualizer.read_and_reset() if visualizer is not None else None
)
logging_pkg.checkpoint_file_name = checkpoint_file_path
return logging_pkg
@staticmethod
def skip_to_latest(checkpoints_queue: mp.Queue, command: Optional[str], data):
assert (
checkpoints_queue is not None
), "Attempting to process checkpoints queue but this queue is `None`."
cond = True
while cond:
sentinel = ("skip.AUTO.sentinel", time.time())
checkpoints_queue.put(
sentinel
) # valid since a single valid process is the only consumer
forwarded = False
while not forwarded:
new_command: Optional[str]
new_data: Any
(
new_command,
new_data,
) = checkpoints_queue.get() # block until next command arrives
if new_command == command:
data = new_data
elif new_command == sentinel[0]:
assert (
new_data == sentinel[1]
), "wrong sentinel found: {} vs {}".format(new_data, sentinel[1])
forwarded = True
else:
raise ValueError(
"Unexpected command {} with data {}".format(
new_command, new_data
)
)
time.sleep(1)
cond = not checkpoints_queue.empty()
return data
def process_checkpoints(self):
assert (
self.mode != TRAIN_MODE_STR
), "process_checkpoints only to be called from a valid or test instance"
assert (
self.checkpoints_queue is not None
), "Attempting to process checkpoints queue but this queue is `None`."
visualizer: Optional[VizSuite] = None
finalized = False
try:
while True:
command: Optional[str]
ckp_file_path: Any
(
command,
ckp_file_path,
) = self.checkpoints_queue.get() # block until first command arrives
# get_logger().debug(
# "{} {} command {} data {}".format(
# self.mode, self.worker_id, command, data
# )
# )
if command == "eval":
if self.num_samplers > 0:
if self.mode == VALID_MODE_STR:
# skip to latest using
# 1. there's only consumer in valid
# 2. there's no quit/exit/close message issued by runner nor trainer
ckp_file_path = self.skip_to_latest(
checkpoints_queue=self.checkpoints_queue,
command=command,
data=ckp_file_path,
)
if (
visualizer is None
and self.machine_params.visualizer is not None
):
visualizer = self.machine_params.visualizer
eval_package = self.run_eval(
checkpoint_file_path=ckp_file_path,
visualizer=visualizer,
verbose=True,
update_secs=20 if self.mode == TEST_MODE_STR else 5 * 60,
)
self.results_queue.put(eval_package)
if self.is_distributed:
dist.barrier()
else:
self.results_queue.put(
LoggingPackage(mode=self.mode, training_steps=None,)
)
elif command in ["quit", "exit", "close"]:
finalized = True
break
else:
raise NotImplementedError()
except KeyboardInterrupt:
get_logger().info(
"KeyboardInterrupt. Terminating {} worker {}".format(
self.mode, self.worker_id
)
)
except Exception:
get_logger().error(
"Encountered Exception. Terminating {} worker {}".format(
self.mode, self.worker_id
)
)
get_logger().error(traceback.format_exc())
finally:
if finalized:
if self.mode == TEST_MODE_STR:
self.results_queue.put(("test_stopped", 0))
get_logger().info(
"{} worker {} complete".format(self.mode, self.worker_id)
)
else:
if self.mode == TEST_MODE_STR:
self.results_queue.put(("test_stopped", self.worker_id + 1))
self.close(verbose=self.mode == TEST_MODE_STR)
| ask4help-main | allenact/algorithms/onpolicy_sync/engine.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import signal
import time
import traceback
from multiprocessing.connection import Connection
from multiprocessing.context import BaseContext
from multiprocessing.process import BaseProcess
from threading import Thread
from typing import (
Any,
Callable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
Dict,
Generator,
Iterator,
cast,
)
import numpy as np
from gym.spaces.dict import Dict as SpaceDict
from setproctitle import setproctitle as ptitle
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.misc_utils import partition_sequence
from allenact.utils.system import get_logger
from allenact.utils.tensor_utils import tile_images
try:
# Use torch.multiprocessing if we can.
# We have yet to find a reason to not use it and
# you are required to use it when sending a torch.Tensor
# between processes
import torch.multiprocessing as mp
except ImportError:
import multiprocessing as mp # type: ignore
DEFAULT_MP_CONTEXT_TYPE = "forkserver"
COMPLETE_TASK_METRICS_KEY = "__AFTER_TASK_METRICS__"
STEP_COMMAND = "step"
NEXT_TASK_COMMAND = "next_task"
RENDER_COMMAND = "render"
CLOSE_COMMAND = "close"
OBSERVATION_SPACE_COMMAND = "observation_space"
ACTION_SPACE_COMMAND = "action_space"
CALL_COMMAND = "call"
SAMPLER_COMMAND = "call_sampler"
ATTR_COMMAND = "attr"
SAMPLER_ATTR_COMMAND = "sampler_attr"
RESET_COMMAND = "reset"
SEED_COMMAND = "seed"
PAUSE_COMMAND = "pause"
RESUME_COMMAND = "resume"
class DelaySignalHandling:
# Modified from https://stackoverflow.com/a/21919644
def __enter__(self):
self.int_signal_received: Optional[Any] = None
self.term_signal_received: Optional[Any] = None
self.old_int_handler = signal.signal(signal.SIGINT, self.int_handler)
self.old_term_handler = signal.signal(signal.SIGTERM, self.term_handler)
def int_handler(self, sig, frame):
self.int_signal_received = (sig, frame)
get_logger().debug("SIGINT received. Delaying KeyboardInterrupt.")
def term_handler(self, sig, frame):
self.term_signal_received = (sig, frame)
get_logger().debug("SIGTERM received. Delaying termination.")
def __exit__(self, type, value, traceback):
signal.signal(signal.SIGINT, self.old_int_handler)
signal.signal(signal.SIGTERM, self.old_term_handler)
if self.term_signal_received:
# For some reason there appear to be cases where the original termination
# handler is not callable. It is unclear to me exactly why this is the case
# but here we add a guard to double check that the handler is callable and,
# if it's not, we re-send the termination signal to the process and let
# the python internals handle it (note that we've already reset the termination
# handler to what it was originaly above in the signal.signal(...) code).
if callable(self.old_term_handler):
self.old_term_handler(*self.term_signal_received)
else:
get_logger().warning(
"Termination handler could not be called after delaying signal handling."
f" Resending the SIGTERM signal. Last (sig, frame) == ({self.term_signal_received})."
)
os.kill(os.getpid(), signal.SIGTERM)
if self.int_signal_received:
if callable(self.old_int_handler):
self.old_int_handler(*self.int_signal_received)
else:
signal.default_int_handler(*self.int_signal_received)
class VectorSampledTasks(object):
"""Vectorized collection of tasks. Creates multiple processes where each
process runs its own TaskSampler. Each process generates one Task from its
TaskSampler at a time and this class allows for interacting with these
tasks in a vectorized manner. When a task on a process completes, the
process samples another task from its task sampler. All the tasks are
synchronized (for step and new_task methods).
# Attributes
make_sampler_fn : function which creates a single TaskSampler.
sampler_fn_args : sequence of dictionaries describing the args
to pass to make_sampler_fn on each individual process.
auto_resample_when_done : automatically sample a new Task from the TaskSampler when
the Task completes. If False, a new Task will not be resampled until all
Tasks on all processes have completed. This functionality is provided for seamless training
of vectorized Tasks.
multiprocessing_start_method : the multiprocessing method used to
spawn worker processes. Valid methods are
``{'spawn', 'forkserver', 'fork'}`` ``'forkserver'`` is the
recommended method as it works well with CUDA. If
``'fork'`` is used, the subproccess must be started before
any other GPU useage.
"""
observation_space: SpaceDict
_workers: List[Union[mp.Process, Thread, BaseProcess]]
_is_waiting: bool
_num_task_samplers: int
_auto_resample_when_done: bool
_mp_ctx: BaseContext
_connection_read_fns: List[Callable[[], Any]]
_connection_write_fns: List[Callable[[Any], None]]
def __init__(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args: Sequence[Dict[str, Any]] = None,
auto_resample_when_done: bool = True,
multiprocessing_start_method: Optional[str] = "forkserver",
mp_ctx: Optional[BaseContext] = None,
should_log: bool = True,
max_processes: Optional[int] = None,
) -> None:
self._is_waiting = False
self._is_closed = True
self.should_log = should_log
self.max_processes = max_processes
assert (
sampler_fn_args is not None and len(sampler_fn_args) > 0
), "number of processes to be created should be greater than 0"
self._num_task_samplers = len(sampler_fn_args)
self._num_processes = (
self._num_task_samplers
if max_processes is None
else min(max_processes, self._num_task_samplers)
)
self._auto_resample_when_done = auto_resample_when_done
assert (multiprocessing_start_method is None) != (
mp_ctx is None
), "Exactly one of `multiprocessing_start_method`, and `mp_ctx` must be not None."
if multiprocessing_start_method is not None:
assert multiprocessing_start_method in self._valid_start_methods, (
"multiprocessing_start_method must be one of {}. Got '{}'"
).format(self._valid_start_methods, multiprocessing_start_method)
self._mp_ctx = mp.get_context(multiprocessing_start_method)
else:
self._mp_ctx = cast(BaseContext, mp_ctx)
self.npaused_per_process = [0] * self._num_processes
self.sampler_index_to_process_ind_and_subprocess_ind: Optional[
List[List[int]]
] = None
self._reset_sampler_index_to_process_ind_and_subprocess_ind()
self._workers: Optional[List] = None
for args in sampler_fn_args:
args["mp_ctx"] = self._mp_ctx
(
self._connection_read_fns,
self._connection_write_fns,
) = self._spawn_workers( # noqa
make_sampler_fn=make_sampler_fn,
sampler_fn_args_list=[
args_list for args_list in self._partition_to_processes(sampler_fn_args)
],
)
self._is_closed = False
for write_fn in self._connection_write_fns:
write_fn((OBSERVATION_SPACE_COMMAND, None))
observation_spaces = [
space for read_fn in self._connection_read_fns for space in read_fn()
]
if any(os is None for os in observation_spaces):
raise NotImplementedError(
"It appears that the `all_observation_spaces_equal`"
" is not True for some task sampler created by"
" VectorSampledTasks. This is not currently supported."
)
if any(observation_spaces[0] != os for os in observation_spaces):
raise NotImplementedError(
"It appears that the observation spaces of the samplers"
" created in VectorSampledTasks are not equal."
" This is not currently supported."
)
self.observation_space = observation_spaces[0]
for write_fn in self._connection_write_fns:
write_fn((ACTION_SPACE_COMMAND, None))
self.action_spaces = [
space for read_fn in self._connection_read_fns for space in read_fn()
]
def _reset_sampler_index_to_process_ind_and_subprocess_ind(self):
self.sampler_index_to_process_ind_and_subprocess_ind = [
[i, j]
for i, part in enumerate(
partition_sequence([1] * self._num_task_samplers, self._num_processes)
)
for j in range(len(part))
]
def _partition_to_processes(self, seq: Union[Iterator, Sequence]):
subparts_list: List[List] = [[] for _ in range(self._num_processes)]
seq = list(seq)
assert len(seq) == len(self.sampler_index_to_process_ind_and_subprocess_ind)
for sampler_index, (process_ind, subprocess_ind) in enumerate(
self.sampler_index_to_process_ind_and_subprocess_ind
):
assert len(subparts_list[process_ind]) == subprocess_ind
subparts_list[process_ind].append(seq[sampler_index])
return subparts_list
@property
def is_closed(self) -> bool:
"""Has the vector task been closed."""
return self._is_closed
@property
def num_unpaused_tasks(self) -> int:
"""Number of unpaused processes.
# Returns
Number of unpaused processes.
"""
return self._num_task_samplers - sum(self.npaused_per_process)
@property
def mp_ctx(self):
"""Get the multiprocessing process used by the vector task.
# Returns
The multiprocessing context.
"""
return self._mp_ctx
@staticmethod
def _task_sampling_loop_worker(
worker_id: Union[int, str],
connection_read_fn: Callable,
connection_write_fn: Callable,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args_list: List[Dict[str, Any]],
auto_resample_when_done: bool,
should_log: bool,
child_pipe: Optional[Connection] = None,
parent_pipe: Optional[Connection] = None,
) -> None:
"""process worker for creating and interacting with the
Tasks/TaskSampler."""
ptitle("VectorSampledTask: {}".format(worker_id))
sp_vector_sampled_tasks = SingleProcessVectorSampledTasks(
make_sampler_fn=make_sampler_fn,
sampler_fn_args_list=sampler_fn_args_list,
auto_resample_when_done=auto_resample_when_done,
should_log=should_log,
)
if parent_pipe is not None:
parent_pipe.close()
try:
while True:
read_input = connection_read_fn()
with DelaySignalHandling():
# Delaying signal handling here is necessary to ensure that we don't
# (when processing a SIGTERM/SIGINT signal) attempt to send data to
# a generator while it is already processing other data.
if len(read_input) == 3:
sampler_index, command, data = read_input
assert (
command != CLOSE_COMMAND
), "Must close all processes at once."
assert (
command != RESUME_COMMAND
), "Must resume all task samplers at once."
if command == PAUSE_COMMAND:
sp_vector_sampled_tasks.pause_at(
sampler_index=sampler_index
)
connection_write_fn("done")
else:
connection_write_fn(
sp_vector_sampled_tasks.command_at(
sampler_index=sampler_index,
command=command,
data=data,
)
)
else:
commands, data_list = read_input
assert (
commands != PAUSE_COMMAND
), "Cannot pause all task samplers at once."
if commands == CLOSE_COMMAND:
sp_vector_sampled_tasks.close()
break
elif commands == RESUME_COMMAND:
sp_vector_sampled_tasks.resume_all()
connection_write_fn("done")
else:
if isinstance(commands, str):
commands = [
commands
] * sp_vector_sampled_tasks.num_unpaused_tasks
connection_write_fn(
sp_vector_sampled_tasks.command(
commands=commands, data_list=data_list
)
)
except KeyboardInterrupt as e:
if should_log:
get_logger().info(f"Worker {worker_id} KeyboardInterrupt")
except Exception as e:
get_logger().error(traceback.format_exc())
raise e
finally:
if child_pipe is not None:
child_pipe.close()
if should_log:
get_logger().info(f"Worker {worker_id} closing.")
def _spawn_workers(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args_list: Sequence[Sequence[Dict[str, Any]]],
) -> Tuple[List[Callable[[], Any]], List[Callable[[Any], None]]]:
parent_connections, worker_connections = zip(
*[self._mp_ctx.Pipe(duplex=True) for _ in range(self._num_processes)]
)
self._workers = []
k = 0
id: Union[int, str]
for id, stuff in enumerate(
zip(worker_connections, parent_connections, sampler_fn_args_list)
):
worker_conn, parent_conn, current_sampler_fn_args_list = stuff # type: ignore
if len(current_sampler_fn_args_list) != 1:
id = "{}({}-{})".format(
id, k, k + len(current_sampler_fn_args_list) - 1
)
k += len(current_sampler_fn_args_list)
if self.should_log:
get_logger().info(
"Starting {}-th VectorSampledTask worker with args {}".format(
id, current_sampler_fn_args_list
)
)
ps = self._mp_ctx.Process( # type: ignore
target=self._task_sampling_loop_worker,
args=(
id,
worker_conn.recv,
worker_conn.send,
make_sampler_fn,
current_sampler_fn_args_list,
self._auto_resample_when_done,
self.should_log,
worker_conn,
parent_conn,
),
)
self._workers.append(ps)
ps.daemon = True
ps.start()
worker_conn.close()
time.sleep(
0.1
) # Useful to ensure things don't lock up when spawning many envs
return (
[p.recv for p in parent_connections],
[p.send for p in parent_connections],
)
def next_task(self, **kwargs):
"""Move to the the next Task for all TaskSamplers.
# Parameters
kwargs : key word arguments passed to the `next_task` function of the samplers.
# Returns
List of initial observations for each of the new tasks.
"""
return self.command(
commands=NEXT_TASK_COMMAND, data_list=[kwargs] * self.num_unpaused_tasks
)
def get_observations(self):
"""Get observations for all unpaused tasks.
# Returns
List of observations for each of the unpaused tasks.
"""
return self.call(["get_observations"] * self.num_unpaused_tasks,)
def command_at(
self, sampler_index: int, command: str, data: Optional[Any] = None
) -> Any:
"""Runs the command on the selected task and returns the result.
# Parameters
# Returns
Result of the command.
"""
self._is_waiting = True
(
process_ind,
subprocess_ind,
) = self.sampler_index_to_process_ind_and_subprocess_ind[sampler_index]
self._connection_write_fns[process_ind]((subprocess_ind, command, data))
result = self._connection_read_fns[process_ind]()
self._is_waiting = False
return result
def call_at(
self,
sampler_index: int,
function_name: str,
function_args: Optional[List[Any]] = None,
) -> Any:
"""Calls a function (which is passed by name) on the selected task and
returns the result.
# Parameters
index : Which task to call the function on.
function_name : The name of the function to call on the task.
function_args : Optional function args.
# Returns
Result of calling the function.
"""
return self.command_at(
sampler_index=sampler_index,
command=CALL_COMMAND,
data=(function_name, function_args),
)
def next_task_at(self, sampler_index: int) -> List[RLStepResult]:
"""Move to the the next Task from the TaskSampler in index_process
process in the vector.
# Parameters
index_process : Index of the process to be reset.
# Returns
List of length one containing the observations the newly sampled task.
"""
return [
self.command_at(
sampler_index=sampler_index, command=NEXT_TASK_COMMAND, data=None
)
]
def step_at(self, sampler_index: int, action: Any) -> List[RLStepResult]:
"""Step in the index_process task in the vector.
# Parameters
sampler_index : Index of the sampler to be reset.
action : The action to take.
# Returns
List containing the output of step method on the task in the indexed process.
"""
return [
self.command_at(
sampler_index=sampler_index, command=STEP_COMMAND, data=action
)
]
def async_step(self, actions: Sequence[Any]) -> None:
"""Asynchronously step in the vectorized Tasks.
# Parameters
actions : actions to be performed in the vectorized Tasks.
"""
self._is_waiting = True
for write_fn, action in zip(
self._connection_write_fns, self._partition_to_processes(actions)
):
write_fn((STEP_COMMAND, action))
def wait_step(self) -> List[Dict[str, Any]]:
"""Wait until all the asynchronized processes have synchronized."""
observations = []
for read_fn in self._connection_read_fns:
observations.extend(read_fn())
self._is_waiting = False
return observations
def step(self, actions: Sequence[Any]):
"""Perform actions in the vectorized tasks.
# Parameters
actions: List of size _num_samplers containing action to be taken in each task.
# Returns
List of outputs from the step method of tasks.
"""
self.async_step(actions)
return self.wait_step()
def reset_all(self):
"""Reset all task samplers to their initial state (except for the RNG
seed)."""
self.command(commands=RESET_COMMAND, data_list=None)
def set_seeds(self, seeds: List[int]):
"""Sets new tasks' RNG seeds.
# Parameters
seeds: List of size _num_samplers containing new RNG seeds.
"""
self.command(commands=SEED_COMMAND, data_list=seeds)
def close(self) -> None:
if self._is_closed:
return
if self._is_waiting:
for read_fn in self._connection_read_fns:
try:
read_fn()
except Exception:
pass
for write_fn in self._connection_write_fns:
try:
write_fn((CLOSE_COMMAND, None))
except Exception:
pass
for process in self._workers:
try:
process.join(timeout=0.1)
except Exception:
pass
self._is_closed = True
def pause_at(self, sampler_index: int) -> None:
"""Pauses computation on the Task in process `index` without destroying
the Task. This is useful for not needing to call steps on all Tasks
when only some are active (for example during the last samples of
running eval).
# Parameters
index : which process to pause. All indexes after this
one will be shifted down by one.
"""
if self._is_waiting:
for read_fn in self._connection_read_fns:
read_fn()
(
process_ind,
subprocess_ind,
) = self.sampler_index_to_process_ind_and_subprocess_ind[sampler_index]
self.command_at(sampler_index=sampler_index, command=PAUSE_COMMAND, data=None)
for i in range(
sampler_index + 1, len(self.sampler_index_to_process_ind_and_subprocess_ind)
):
other_process_and_sub_process_inds = self.sampler_index_to_process_ind_and_subprocess_ind[
i
]
if other_process_and_sub_process_inds[0] == process_ind:
other_process_and_sub_process_inds[1] -= 1
else:
break
self.sampler_index_to_process_ind_and_subprocess_ind.pop(sampler_index)
self.npaused_per_process[process_ind] += 1
def resume_all(self) -> None:
"""Resumes any paused processes."""
self._is_waiting = True
for connection_write_fn in self._connection_write_fns:
connection_write_fn((RESUME_COMMAND, None))
for connection_read_fn in self._connection_read_fns:
connection_read_fn()
self._is_waiting = False
self._reset_sampler_index_to_process_ind_and_subprocess_ind()
for i in range(len(self.npaused_per_process)):
self.npaused_per_process[i] = 0
def command(
self, commands: Union[List[str], str], data_list: Optional[List]
) -> List[Any]:
""""""
self._is_waiting = True
if isinstance(commands, str):
commands = [commands] * self.num_unpaused_tasks
if data_list is None:
data_list = [None] * self.num_unpaused_tasks
for write_fn, subcommands, subdata_list in zip(
self._connection_write_fns,
self._partition_to_processes(commands),
self._partition_to_processes(data_list),
):
write_fn((subcommands, data_list))
results = []
for read_fn in self._connection_read_fns:
results.extend(read_fn())
self._is_waiting = False
return results
def call(
self,
function_names: Union[str, List[str]],
function_args_list: Optional[List[Any]] = None,
) -> List[Any]:
"""Calls a list of functions (which are passed by name) on the
corresponding task (by index).
# Parameters
function_names : The name of the functions to call on the tasks.
function_args_list : List of function args for each function.
If provided, len(function_args_list) should be as long as len(function_names).
# Returns
List of results of calling the functions.
"""
self._is_waiting = True
if isinstance(function_names, str):
function_names = [function_names] * self.num_unpaused_tasks
if function_args_list is None:
function_args_list = [None] * len(function_names)
assert len(function_names) == len(function_args_list)
func_names_and_args_list = zip(function_names, function_args_list)
for write_fn, func_names_and_args in zip(
self._connection_write_fns,
self._partition_to_processes(func_names_and_args_list),
):
write_fn((CALL_COMMAND, func_names_and_args))
results = []
for read_fn in self._connection_read_fns:
results.extend(read_fn())
self._is_waiting = False
return results
def attr_at(self, sampler_index: int, attr_name: str) -> Any:
"""Gets the attribute (specified by name) on the selected task and
returns it.
# Parameters
index : Which task to call the function on.
attr_name : The name of the function to call on the task.
# Returns
Result of calling the function.
"""
return self.command_at(sampler_index, command=ATTR_COMMAND, data=attr_name)
def attr(self, attr_names: Union[List[str], str]) -> List[Any]:
"""Gets the attributes (specified by name) on the tasks.
# Parameters
attr_names : The name of the functions to call on the tasks.
# Returns
List of results of calling the functions.
"""
if isinstance(attr_names, str):
attr_names = [attr_names] * self.num_unpaused_tasks
return self.command(commands=ATTR_COMMAND, data_list=attr_names)
def render(
self, mode: str = "human", *args, **kwargs
) -> Union[np.ndarray, None, List[np.ndarray]]:
"""Render observations from all Tasks in a tiled image or list of
images."""
images = self.command(
commands=RENDER_COMMAND,
data_list=[(args, {"mode": "rgb", **kwargs})] * self.num_unpaused_tasks,
)
if mode == "raw_rgb_list":
return images
tile = tile_images(images)
if mode == "human":
import cv2
cv2.imshow("vectask", tile[:, :, ::-1])
cv2.waitKey(1)
return None
elif mode == "rgb_array":
return tile
else:
raise NotImplementedError
@property
def _valid_start_methods(self) -> Set[str]:
return {"forkserver", "spawn", "fork"}
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class SingleProcessVectorSampledTasks(object):
"""Vectorized collection of tasks.
Simultaneously handles the state of multiple TaskSamplers and their associated tasks.
Allows for interacting with these tasks in a vectorized manner. When a task completes,
another task is sampled from the appropriate task sampler. All the tasks are
synchronized (for step and new_task methods).
# Attributes
make_sampler_fn : function which creates a single TaskSampler.
sampler_fn_args : sequence of dictionaries describing the args
to pass to make_sampler_fn on each individual process.
auto_resample_when_done : automatically sample a new Task from the TaskSampler when
the Task completes. If False, a new Task will not be resampled until all
Tasks on all processes have completed. This functionality is provided for seamless training
of vectorized Tasks.
"""
observation_space: SpaceDict
_vector_task_generators: List[Generator]
_num_task_samplers: int
_auto_resample_when_done: bool
def __init__(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args_list: Sequence[Dict[str, Any]] = None,
auto_resample_when_done: bool = True,
should_log: bool = True,
) -> None:
self._is_closed = True
assert (
sampler_fn_args_list is not None and len(sampler_fn_args_list) > 0
), "number of processes to be created should be greater than 0"
self._num_task_samplers = len(sampler_fn_args_list)
self._auto_resample_when_done = auto_resample_when_done
self.should_log = should_log
self._vector_task_generators: List[Generator] = self._create_generators(
make_sampler_fn=make_sampler_fn,
sampler_fn_args=[{"mp_ctx": None, **args} for args in sampler_fn_args_list],
)
self._is_closed = False
observation_spaces = [
vsi.send((OBSERVATION_SPACE_COMMAND, None))
for vsi in self._vector_task_generators
]
if any(os is None for os in observation_spaces):
raise NotImplementedError(
"It appears that the `all_observation_spaces_equal`"
" is not True for some task sampler created by"
" VectorSampledTasks. This is not currently supported."
)
if any(observation_spaces[0] != os for os in observation_spaces):
raise NotImplementedError(
"It appears that the observation spaces of the samplers"
" created in VectorSampledTasks are not equal."
" This is not currently supported."
)
self.observation_space = observation_spaces[0]
self.action_spaces = [
vsi.send((ACTION_SPACE_COMMAND, None))
for vsi in self._vector_task_generators
]
self._paused: List[Tuple[int, Generator]] = []
@property
def is_closed(self) -> bool:
"""Has the vector task been closed."""
return self._is_closed
@property
def mp_ctx(self) -> Optional[BaseContext]:
return None
@property
def num_unpaused_tasks(self) -> int:
"""Number of unpaused processes.
# Returns
Number of unpaused processes.
"""
return self._num_task_samplers - len(self._paused)
@staticmethod
def _task_sampling_loop_generator_fn(
worker_id: int,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args: Dict[str, Any],
auto_resample_when_done: bool,
should_log: bool,
) -> Generator:
"""Generator for working with Tasks/TaskSampler."""
task_sampler = make_sampler_fn(**sampler_fn_args)
current_task = task_sampler.next_task()
if current_task is None:
raise RuntimeError(
"Newly created task sampler had `None` as it's first task. This likely means that"
" it was not provided with any tasks to generate. This can happen if, e.g., during testing"
" you have started more processes than you had tasks to test. Currently this is not supported:"
" every task sampler must be able to generate at least one task."
)
try:
command, data = yield "started"
while command != CLOSE_COMMAND:
if command == STEP_COMMAND:
step_result: RLStepResult = current_task.step(data)
if current_task.is_done():
metrics = current_task.metrics()
if metrics is not None and len(metrics) != 0:
if step_result.info is None:
step_result = step_result.clone({"info": {}})
step_result.info[COMPLETE_TASK_METRICS_KEY] = metrics
if auto_resample_when_done:
current_task = task_sampler.next_task()
if current_task is None:
step_result = step_result.clone({"observation": None})
else:
step_result = step_result.clone(
{"observation": current_task.get_observations()}
)
command, data = yield step_result
elif command == NEXT_TASK_COMMAND:
if data is not None:
current_task = task_sampler.next_task(**data)
else:
current_task = task_sampler.next_task()
observations = current_task.get_observations()
command, data = yield observations
elif command == RENDER_COMMAND:
command, data = yield current_task.render(*data[0], **data[1])
elif (
command == OBSERVATION_SPACE_COMMAND
or command == ACTION_SPACE_COMMAND
):
res = getattr(current_task, command)
command, data = yield res
elif command == CALL_COMMAND:
function_name, function_args = data
if function_args is None or len(function_args) == 0:
result = getattr(current_task, function_name)()
else:
result = getattr(current_task, function_name)(*function_args)
command, data = yield result
elif command == SAMPLER_COMMAND:
function_name, function_args = data
if function_args is None or len(function_args) == 0:
result = getattr(task_sampler, function_name)()
else:
result = getattr(task_sampler, function_name)(*function_args)
command, data = yield result
elif command == ATTR_COMMAND:
property_name = data
result = getattr(current_task, property_name)
command, data = yield result
elif command == SAMPLER_ATTR_COMMAND:
property_name = data
result = getattr(task_sampler, property_name)
command, data = yield result
elif command == RESET_COMMAND:
task_sampler.reset()
current_task = task_sampler.next_task()
if current_task is None:
raise RuntimeError(
"After resetting the task sampler it seems to have"
" no new tasks (the `task_sampler.next_task()` call"
" returned `None` after the reset). This suggests that"
" the task sampler's reset method was not implemented"
f" correctly (task sampler type is {type(task_sampler)})."
)
command, data = yield "done"
elif command == SEED_COMMAND:
task_sampler.set_seed(data)
command, data = yield "done"
else:
raise NotImplementedError()
except KeyboardInterrupt:
if should_log:
get_logger().info(
"SingleProcessVectorSampledTask {} KeyboardInterrupt".format(
worker_id
)
)
except Exception as e:
get_logger().error(traceback.format_exc())
raise e
finally:
if should_log:
get_logger().info(
"SingleProcessVectorSampledTask {} closing.".format(worker_id)
)
task_sampler.close()
def _create_generators(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args: Sequence[Dict[str, Any]],
) -> List[Generator]:
generators = []
for id, current_sampler_fn_args in enumerate(sampler_fn_args):
if self.should_log:
get_logger().info(
"Starting {}-th SingleProcessVectorSampledTasks generator with args {}".format(
id, current_sampler_fn_args
)
)
generators.append(
self._task_sampling_loop_generator_fn(
worker_id=id,
make_sampler_fn=make_sampler_fn,
sampler_fn_args=current_sampler_fn_args,
auto_resample_when_done=self._auto_resample_when_done,
should_log=self.should_log,
)
)
if next(generators[-1]) != "started":
raise RuntimeError("Generator failed to start.")
return generators
def next_task(self, **kwargs):
"""Move to the the next Task for all TaskSamplers.
# Parameters
kwargs : key word arguments passed to the `next_task` function of the samplers.
# Returns
List of initial observations for each of the new tasks.
"""
return [
g.send((NEXT_TASK_COMMAND, kwargs)) for g in self._vector_task_generators
]
def get_observations(self):
"""Get observations for all unpaused tasks.
# Returns
List of observations for each of the unpaused tasks.
"""
return self.call(["get_observations"] * self.num_unpaused_tasks,)
def next_task_at(self, index_process: int) -> List[RLStepResult]:
"""Move to the the next Task from the TaskSampler in index_process
process in the vector.
# Parameters
index_process : Index of the generator to be reset.
# Returns
List of length one containing the observations the newly sampled task.
"""
return [
self._vector_task_generators[index_process].send((NEXT_TASK_COMMAND, None))
]
def step_at(self, index_process: int, action: int) -> List[RLStepResult]:
"""Step in the index_process task in the vector.
# Parameters
index_process : Index of the process to be reset.
action : The action to take.
# Returns
List containing the output of step method on the task in the indexed process.
"""
return self._vector_task_generators[index_process].send((STEP_COMMAND, action))
def step(self, actions: List[List[int]]):
"""Perform actions in the vectorized tasks.
# Parameters
actions: List of size _num_samplers containing action to be taken in each task.
# Returns
List of outputs from the step method of tasks.
"""
return [
g.send((STEP_COMMAND, action))
for g, action in zip(self._vector_task_generators, actions)
]
def reset_all(self):
"""Reset all task samplers to their initial state (except for the RNG
seed)."""
return [g.send((RESET_COMMAND, None)) for g in self._vector_task_generators]
def set_seeds(self, seeds: List[int]):
"""Sets new tasks' RNG seeds.
# Parameters
seeds: List of size _num_samplers containing new RNG seeds.
"""
return [
g.send((SEED_COMMAND, seed))
for g, seed in zip(self._vector_task_generators, seeds)
]
def close(self) -> None:
if self._is_closed:
return
for g in self._vector_task_generators:
try:
try:
g.send((CLOSE_COMMAND, None))
except StopIteration:
pass
except KeyboardInterrupt:
pass
self._is_closed = True
def pause_at(self, sampler_index: int) -> None:
"""Pauses computation on the Task in process `index` without destroying
the Task. This is useful for not needing to call steps on all Tasks
when only some are active (for example during the last samples of
running eval).
# Parameters
index : which process to pause. All indexes after this
one will be shifted down by one.
"""
generator = self._vector_task_generators.pop(sampler_index)
self._paused.append((sampler_index, generator))
def resume_all(self) -> None:
"""Resumes any paused processes."""
for index, generator in reversed(self._paused):
self._vector_task_generators.insert(index, generator)
self._paused = []
def command_at(
self, sampler_index: int, command: str, data: Optional[Any] = None
) -> Any:
"""Calls a function (which is passed by name) on the selected task and
returns the result.
# Parameters
index : Which task to call the function on.
function_name : The name of the function to call on the task.
function_args : Optional function args.
# Returns
Result of calling the function.
"""
return self._vector_task_generators[sampler_index].send((command, data))
def command(
self, commands: Union[List[str], str], data_list: Optional[List]
) -> List[Any]:
""""""
if isinstance(commands, str):
commands = [commands] * self.num_unpaused_tasks
if data_list is None:
data_list = [None] * self.num_unpaused_tasks
return [
g.send((command, data))
for g, command, data in zip(
self._vector_task_generators, commands, data_list
)
]
def call_at(
self,
sampler_index: int,
function_name: str,
function_args: Optional[List[Any]] = None,
) -> Any:
"""Calls a function (which is passed by name) on the selected task and
returns the result.
# Parameters
index : Which task to call the function on.
function_name : The name of the function to call on the task.
function_args : Optional function args.
# Returns
Result of calling the function.
"""
return self._vector_task_generators[sampler_index].send(
(CALL_COMMAND, (function_name, function_args))
)
def call(
self,
function_names: Union[str, List[str]],
function_args_list: Optional[List[Any]] = None,
) -> List[Any]:
"""Calls a list of functions (which are passed by name) on the
corresponding task (by index).
# Parameters
function_names : The name of the functions to call on the tasks.
function_args_list : List of function args for each function.
If provided, len(function_args_list) should be as long as len(function_names).
# Returns
List of results of calling the functions.
"""
if isinstance(function_names, str):
function_names = [function_names] * self.num_unpaused_tasks
if function_args_list is None:
function_args_list = [None] * len(function_names)
assert len(function_names) == len(function_args_list)
return [
g.send((CALL_COMMAND, args))
for g, args in zip(
self._vector_task_generators, zip(function_names, function_args_list)
)
]
def attr_at(self, sampler_index: int, attr_name: str) -> Any:
"""Gets the attribute (specified by name) on the selected task and
returns it.
# Parameters
index : Which task to call the function on.
attr_name : The name of the function to call on the task.
# Returns
Result of calling the function.
"""
return self._vector_task_generators[sampler_index].send(
(ATTR_COMMAND, attr_name)
)
def attr(self, attr_names: Union[List[str], str]) -> List[Any]:
"""Gets the attributes (specified by name) on the tasks.
# Parameters
attr_names : The name of the functions to call on the tasks.
# Returns
List of results of calling the functions.
"""
if isinstance(attr_names, str):
attr_names = [attr_names] * self.num_unpaused_tasks
return [
g.send((ATTR_COMMAND, attr_name))
for g, attr_name in zip(self._vector_task_generators, attr_names)
]
def render(
self, mode: str = "human", *args, **kwargs
) -> Union[np.ndarray, None, List[np.ndarray]]:
"""Render observations from all Tasks in a tiled image or a list of
images."""
images = [
g.send((RENDER_COMMAND, (args, {"mode": "rgb", **kwargs})))
for g in self._vector_task_generators
]
if mode == "raw_rgb_list":
return images
for index, _ in reversed(self._paused):
images.insert(index, np.zeros_like(images[0]))
tile = tile_images(images)
if mode == "human":
import cv2
cv2.imshow("vectask", tile[:, :, ::-1])
cv2.waitKey(1)
return None
elif mode == "rgb_array":
return tile
else:
raise NotImplementedError
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| ask4help-main | allenact/algorithms/onpolicy_sync/vector_sampled_tasks.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from collections import defaultdict
from typing import Union, List, Dict, Tuple, DefaultDict, Sequence, cast, Optional
import numpy as np
import torch
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
FullMemorySpecType,
ObservationType,
ActionType,
)
from allenact.base_abstractions.misc import Memory
from allenact.utils.system import get_logger
import allenact.utils.spaces_utils as su
class RolloutStorage(object):
"""Class for storing rollout information for RL trainers."""
FLATTEN_SEPARATOR: str = "._AUTOFLATTEN_."
def __init__(
self,
num_steps: int,
num_samplers: int,
actor_critic: ActorCriticModel,
only_store_first_and_last_in_memory: bool = True,
):
self.num_steps = num_steps
self.only_store_first_and_last_in_memory = only_store_first_and_last_in_memory
self.flattened_to_unflattened: Dict[str, Dict[str, List[str]]] = {
"memory": dict(),
"observations": dict(),
}
self.unflattened_to_flattened: Dict[str, Dict[Tuple[str, ...], str]] = {
"memory": dict(),
"observations": dict(),
}
self.dim_names = ["step", "sampler", None]
self.memory: Memory = self.create_memory(
actor_critic.recurrent_memory_specification,
num_samplers,
first_and_last_only=only_store_first_and_last_in_memory,
)
self.observations: Memory = Memory()
self.value_preds: Optional[torch.Tensor] = None
self.returns: Optional[torch.Tensor] = None
self.rewards: Optional[torch.Tensor] = None
self.action_log_probs: Optional[torch.Tensor] = None
self.masks = torch.zeros(num_steps + 1, num_samplers, 1)
self.action_space = actor_critic.action_space
action_flat_dim = su.flatdim(self.action_space)
self.actions = torch.zeros(num_steps, num_samplers, action_flat_dim,)
self.prev_actions = torch.zeros(num_steps + 1, num_samplers, action_flat_dim,)
self.step = 0
self.unnarrow_data: DefaultDict[
str, Union[int, torch.Tensor, Dict]
] = defaultdict(dict)
self.permanent_unnarrow_data: DefaultDict[
str, Union[int, torch.Tensor, Dict]
] = defaultdict(dict)
self.device = torch.device("cpu")
def create_memory(
self,
spec: Optional[FullMemorySpecType],
num_samplers: int,
first_and_last_only: bool = False,
) -> Memory:
if spec is None:
return Memory()
memory = Memory()
for key in spec:
dims_template, dtype = spec[key]
dim_names = ["step"] + [d[0] for d in dims_template]
sampler_dim = dim_names.index("sampler")
if not first_and_last_only:
all_dims = [self.num_steps + 1] + [d[1] for d in dims_template]
else:
all_dims = [2] + [d[1] for d in dims_template]
all_dims[sampler_dim] = num_samplers
memory.check_append(
key=key,
tensor=torch.zeros(*all_dims, dtype=dtype),
sampler_dim=sampler_dim,
)
self.flattened_to_unflattened["memory"][key] = [key]
self.unflattened_to_flattened["memory"][(key,)] = key
return memory
def to(self, device: torch.device):
self.observations.to(device)
self.memory.to(device)
self.actions = self.actions.to(device)
self.prev_actions = self.prev_actions.to(device)
self.masks = self.masks.to(device)
if self.rewards is not None:
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.device = device
def insert_observations(
self, observations: ObservationType, time_step: int = 0,
):
self.insert_tensors(
storage_name="observations", unflattened=observations, time_step=time_step
)
def insert_memory(
self, memory: Optional[Memory], time_step: int,
):
if memory is None:
assert len(self.memory) == 0
return
if self.only_store_first_and_last_in_memory and time_step > 0:
time_step = 1
self.insert_tensors(
storage_name="memory", unflattened=memory, time_step=time_step
)
def insert_tensors(
self,
storage_name: str,
unflattened: Union[ObservationType, Memory],
prefix: str = "",
path: Sequence[str] = (),
time_step: int = 0,
):
storage = getattr(self, storage_name)
path = list(path)
for name in unflattened:
current_data = unflattened[name]
if isinstance(current_data, Dict):
self.insert_tensors(
storage_name,
cast(ObservationType, current_data),
prefix=prefix + name + self.FLATTEN_SEPARATOR,
path=path + [name],
time_step=time_step,
)
continue
sampler_dim = self.dim_names.index("sampler")
if isinstance(current_data, tuple):
sampler_dim = current_data[1]
current_data = current_data[0]
flatten_name = prefix + name
if flatten_name not in storage:
assert storage_name == "observations"
storage[flatten_name] = (
torch.zeros_like(current_data) # type:ignore
.repeat(
self.num_steps + 1, # required for observations (and memory)
*(1 for _ in range(len(current_data.shape))),
)
.to(self.device),
sampler_dim,
)
assert (
flatten_name not in self.flattened_to_unflattened[storage_name]
), "new flattened name {} already existing in flattened spaces[{}]".format(
flatten_name, storage_name
)
self.flattened_to_unflattened[storage_name][flatten_name] = path + [
name
]
self.unflattened_to_flattened[storage_name][
tuple(path + [name])
] = flatten_name
if storage_name == "observations":
# current_data has a step dimension
assert time_step >= 0
storage[flatten_name][0][time_step : time_step + 1].copy_(current_data)
else:
# current_data does not have a step dimension
storage[flatten_name][0][time_step].copy_(current_data)
def create_tensor_storage(
self, num_steps: int, template: torch.Tensor
) -> torch.Tensor:
return torch.cat([torch.zeros_like(template).to(self.device)] * num_steps)
def insert(
self,
observations: ObservationType,
memory: Optional[Memory],
actions: torch.Tensor,
action_log_probs: torch.Tensor,
value_preds: torch.Tensor,
rewards: torch.Tensor,
masks: torch.Tensor,
):
self.insert_observations(observations, time_step=self.step + 1)
self.insert_memory(memory, time_step=self.step + 1)
assert actions.shape == self.actions[self.step].shape
self.actions[self.step].copy_(actions) # type:ignore
self.prev_actions[self.step + 1].copy_(actions) # type:ignore
self.masks[self.step + 1].copy_(masks) # type:ignore
if self.rewards is None:
# We delay the instantiation of storage for `rewards`, `value_preds`, `action_log_probs` and `returns`
# as we do not, a priori, know what shape these will be. For instance, if we are in a multi-agent setting
# then there may be many rewards (one for each agent).
self.rewards = self.create_tensor_storage(
self.num_steps, rewards.unsqueeze(0)
) # add step
value_returns_template = value_preds.unsqueeze(0) # add step
self.value_preds = self.create_tensor_storage(
self.num_steps + 1, value_returns_template
)
self.returns = self.create_tensor_storage(
self.num_steps + 1, value_returns_template
)
self.action_log_probs = self.create_tensor_storage(
self.num_steps, action_log_probs.unsqueeze(0)
)
self.value_preds[self.step].copy_(value_preds) # type:ignore
self.rewards[self.step].copy_(rewards) # type:ignore
self.action_log_probs[self.step].copy_( # type:ignore
action_log_probs
)
self.step = (self.step + 1) % self.num_steps
def sampler_select(self, keep_list: Sequence[int]):
keep_list = list(keep_list)
if self.actions.shape[1] == len(keep_list): # samplers dim
return # we are keeping everything, no need to copy
self.observations = self.observations.sampler_select(keep_list)
self.memory = self.memory.sampler_select(keep_list)
self.actions = self.actions[:, keep_list]
self.prev_actions = self.prev_actions[:, keep_list]
self.action_log_probs = self.action_log_probs[:, keep_list]
self.masks = self.masks[:, keep_list]
if self.rewards is not None:
self.value_preds = self.value_preds[:, keep_list]
self.rewards = self.rewards[:, keep_list]
self.returns = self.returns[:, keep_list]
def narrow(self, num_steps=None):
"""This function is used by the training engine to temporarily (after
one interrupted rollout in decentralized distributed settings, without
arguments) or permanently (for a training stage with shorter horizon,
with arguments) narrow the step dimension in the storage.
The reverse operation, `unnarrow`, is automatically called by
`after_update` (without arguments) or when the rollout length
varies in the training pipeline (with arguments).
"""
unnarrow_data = (
self.unnarrow_data if num_steps is None else self.permanent_unnarrow_data
)
assert len(unnarrow_data) == 0, "attempting to narrow narrowed rollouts"
# Check if we're done
if self.step == 0 and num_steps is None:
get_logger().warning("Called narrow with self.step == 0")
return
elif num_steps is not None and num_steps == self.num_steps:
return
base_length = self.step if num_steps is None else num_steps
for storage_name in ["observations", "memory"]:
storage: Memory = getattr(self, storage_name)
for key in storage:
unnarrow_data[storage_name][key] = storage.tensor(key)
if (
storage_name == "memory"
and self.only_store_first_and_last_in_memory
and (self.step > 0 or num_steps is not None)
):
length = 2
else:
length = base_length + 1
storage[key] = (
storage.tensor(key).narrow(dim=0, start=0, length=length),
storage.sampler_dim(key),
)
to_narrow_to_step = ["actions", "action_log_probs", "rewards"]
to_narrow_to_step_plus_1 = ["prev_actions", "value_preds", "returns", "masks"]
for name in to_narrow_to_step + to_narrow_to_step_plus_1:
if getattr(self, name) is not None:
unnarrow_data[name] = getattr(self, name)
setattr(
self,
name,
unnarrow_data[name].narrow(
dim=0,
start=0,
length=base_length + (name in to_narrow_to_step_plus_1),
),
)
unnarrow_data["num_steps"] = self.num_steps
self.num_steps = base_length
if num_steps is None:
self.step = 0 # we just finished a rollout, so we reset it for the next one
def unnarrow(self, unnarrow_to_maximum_size=False):
"""See doc string for the `narrow` method."""
unnarrow_data = (
self.permanent_unnarrow_data
if unnarrow_to_maximum_size
else self.unnarrow_data
)
if len(unnarrow_data) == 0:
return
for storage_name in ["observations", "memory"]:
storage: Memory = getattr(self, storage_name)
for key in storage:
storage[key] = (
unnarrow_data[storage_name][key],
storage.sampler_dim(key),
)
unnarrow_data[storage_name].pop(key)
# Note that memory can be empty
assert (
storage_name not in unnarrow_data
or len(unnarrow_data[storage_name]) == 0
), "unnarrow_data contains {} {}".format(
storage_name, unnarrow_data[storage_name]
)
unnarrow_data.pop(storage_name, None)
for name in [
"prev_actions",
"value_preds",
"returns",
"masks",
"actions",
"action_log_probs",
"rewards",
]:
if name in unnarrow_data:
setattr(self, name, unnarrow_data[name])
unnarrow_data.pop(name)
self.num_steps = unnarrow_data["num_steps"]
unnarrow_data.pop("num_steps")
assert len(unnarrow_data) == 0
def after_update(self):
for storage in [self.observations, self.memory]:
for key in storage:
storage[key][0][0].copy_(storage[key][0][-1])
self.masks[0].copy_(self.masks[-1])
self.prev_actions[0].copy_(self.prev_actions[-1])
if len(self.unnarrow_data) > 0:
self.unnarrow()
def _extend_tensor(self, stored_tensor: torch.Tensor):
# Ensure broadcast to all flattened dimensions
extended_shape = stored_tensor.shape + (1,) * (
len(self.value_preds.shape) - len(stored_tensor.shape)
)
return stored_tensor.view(*extended_shape)
def compute_returns(
self, next_value: torch.Tensor, use_gae: bool, gamma: float, tau: float
):
extended_mask = self._extend_tensor(self.masks)
extended_rewards = self._extend_tensor(self.rewards)
if use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(extended_rewards.shape[0])):
delta = (
extended_rewards[step]
+ gamma * self.value_preds[step + 1] * extended_mask[step + 1]
- self.value_preds[step]
)
gae = delta + gamma * tau * extended_mask[step + 1] * gae # type:ignore
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(extended_rewards.shape[0])):
self.returns[step] = (
self.returns[step + 1] * gamma * extended_mask[step + 1]
+ extended_rewards[step]
)
def recurrent_generator(
self,
advantages: torch.Tensor,
adv_mean: torch.Tensor,
adv_std: torch.Tensor,
num_mini_batch: int,
):
normalized_advantages = (advantages - adv_mean) / (adv_std + 1e-5)
num_samplers = self.rewards.shape[1]
assert num_samplers >= num_mini_batch, (
"The number of task samplers ({}) "
"must be greater than or equal to the number of "
"mini batches ({}).".format(num_samplers, num_mini_batch)
)
inds = np.round(
np.linspace(0, num_samplers, num_mini_batch + 1, endpoint=True)
).astype(np.int32)
pairs = list(zip(inds[:-1], inds[1:]))
random.shuffle(pairs)
for start_ind, end_ind in pairs:
cur_samplers = list(range(start_ind, end_ind))
memory_batch = self.memory.step_squeeze(0).sampler_select(cur_samplers)
observations_batch = self.unflatten_observations(
self.observations.slice(dim=0, stop=-1).sampler_select(cur_samplers)
)
actions_batch = []
prev_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
norm_adv_targ = []
for ind in cur_samplers:
actions_batch.append(self.actions[:, ind])
prev_actions_batch.append(self.prev_actions[:-1, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
norm_adv_targ.append(normalized_advantages[:, ind])
actions_batch = torch.stack(actions_batch, 1) # type:ignore
prev_actions_batch = torch.stack(prev_actions_batch, 1) # type:ignore
value_preds_batch = torch.stack(value_preds_batch, 1) # type:ignore
return_batch = torch.stack(return_batch, 1) # type:ignore
masks_batch = torch.stack(masks_batch, 1) # type:ignore
old_action_log_probs_batch = torch.stack( # type:ignore
old_action_log_probs_batch, 1
)
adv_targ = torch.stack(adv_targ, 1) # type:ignore
norm_adv_targ = torch.stack(norm_adv_targ, 1) # type:ignore
yield {
"observations": observations_batch,
"memory": memory_batch,
"actions": su.unflatten(self.action_space, actions_batch),
"prev_actions": su.unflatten(self.action_space, prev_actions_batch),
"values": value_preds_batch,
"returns": return_batch,
"masks": masks_batch,
"old_action_log_probs": old_action_log_probs_batch,
"adv_targ": adv_targ,
"norm_adv_targ": norm_adv_targ,
}
def unflatten_observations(self, flattened_batch: Memory) -> ObservationType:
result: ObservationType = {}
for name in flattened_batch:
full_path = self.flattened_to_unflattened["observations"][name]
cur_dict = result
for part in full_path[:-1]:
if part not in cur_dict:
cur_dict[part] = {}
cur_dict = cast(ObservationType, cur_dict[part])
cur_dict[full_path[-1]] = flattened_batch[name][0]
return result
def pick_observation_step(self, step: int) -> ObservationType:
return self.unflatten_observations(self.observations.step_select(step))
def pick_memory_step(self, step: int) -> Memory:
if self.only_store_first_and_last_in_memory and step > 0:
step = 1
return self.memory.step_squeeze(step)
def pick_prev_actions_step(self, step: int) -> ActionType:
return su.unflatten(self.action_space, self.prev_actions[step : step + 1])
| ask4help-main | allenact/algorithms/onpolicy_sync/storage.py |
import functools
from typing import Dict, cast, Sequence, Set
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class GroupedActionImitation(AbstractActorCriticLoss):
def __init__(
self, nactions: int, action_groups: Sequence[Set[int]], *args, **kwargs
):
super().__init__(*args, **kwargs)
assert (
sum(len(ag) for ag in action_groups) == nactions
and len(functools.reduce(lambda x, y: x | y, action_groups)) == nactions
), f"`action_groups` (==`{action_groups}`) must be a partition of `[0, 1, 2, ..., nactions - 1]`"
self.nactions = nactions
self.action_groups_mask = torch.FloatTensor(
[
[i in action_group for i in range(nactions)]
for action_group in action_groups
]
+ [[1] * nactions] # type:ignore
)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
):
observations = cast(Dict[str, torch.Tensor], batch["observations"])
assert "expert_group_action" in observations
expert_group_actions = observations["expert_group_action"]
# expert_group_actions = expert_group_actions + (expert_group_actions == -1).long() * (
# 1 + self.action_groups_mask.shape[0]
# )
if self.action_groups_mask.get_device() != expert_group_actions.get_device():
self.action_groups_mask = cast(
torch.FloatTensor,
self.action_groups_mask.cuda(expert_group_actions.get_device()),
)
expert_group_actions_reshaped = expert_group_actions.view(-1, 1)
expert_group_actions_mask = self.action_groups_mask[
expert_group_actions_reshaped
]
probs_tensor = actor_critic_output.distributions.probs_tensor
expert_group_actions_mask = expert_group_actions_mask.view(probs_tensor.shape)
total_loss = -(
torch.log((probs_tensor * expert_group_actions_mask).sum(-1))
).mean()
return total_loss, {"grouped_action_cross_entropy": total_loss.item(),}
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/grouped_action_imitation.py |
from .a2cacktr import A2C, ACKTR, A2CACKTR
from .ppo import PPO
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/__init__.py |
"""Defining imitation losses for actor critic type models."""
from typing import Dict, cast, Optional
from collections import OrderedDict
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import (
Distr,
CategoricalDistr,
SequentialDistr,
ConditionalDistr,
)
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.base_abstractions.sensor import AbstractExpertSensor
import allenact.utils.spaces_utils as su
class Imitation(AbstractActorCriticLoss):
"""Expert imitation loss."""
def __init__(
self, expert_sensor: Optional[AbstractExpertSensor] = None, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.expert_sensor = expert_sensor
def group_loss(
self,
distribution: CategoricalDistr,
expert_actions: torch.Tensor,
expert_actions_masks: torch.Tensor,
):
assert isinstance(distribution, CategoricalDistr) or (
isinstance(distribution, ConditionalDistr)
and isinstance(distribution.distr, CategoricalDistr)
), "This implementation only supports (groups of) `CategoricalDistr`"
expert_successes = expert_actions_masks.sum()
log_probs = distribution.log_prob(cast(torch.LongTensor, expert_actions))
assert (
log_probs.shape[: len(expert_actions_masks.shape)]
== expert_actions_masks.shape
)
# Add dimensions to `expert_actions_masks` on the right to allow for masking
# if necessary.
len_diff = len(log_probs.shape) - len(expert_actions_masks.shape)
assert len_diff >= 0
expert_actions_masks = expert_actions_masks.view(
*expert_actions_masks.shape, *((1,) * len_diff)
)
group_loss = -(expert_actions_masks * log_probs).sum() / torch.clamp(
expert_successes, min=1
)
return group_loss, expert_successes
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[Distr],
*args,
**kwargs,
):
"""Computes the imitation loss.
# Parameters
batch : A batch of data corresponding to the information collected when rolling out (possibly many) agents
over a fixed number of steps. In particular this batch should have the same format as that returned by
`RolloutStorage.recurrent_generator`.
Here `batch["observations"]` must contain `"expert_action"` observations
or `"expert_policy"` observations. See `ExpertActionSensor` (or `ExpertPolicySensor`) for an example of
a sensor producing such observations.
actor_critic_output : The output of calling an ActorCriticModel on the observations in `batch`.
args : Extra args. Ignored.
kwargs : Extra kwargs. Ignored.
# Returns
A (0-dimensional) torch.FloatTensor corresponding to the computed loss. `.backward()` will be called on this
tensor in order to compute a gradient update to the ActorCriticModel's parameters.
"""
observations = cast(Dict[str, torch.Tensor], batch["observations"])
losses = OrderedDict()
should_report_loss = False
if "expert_action" in observations:
if self.expert_sensor is None or not self.expert_sensor.use_groups:
expert_actions_and_mask = observations["expert_action"]
assert expert_actions_and_mask.shape[-1] == 2
expert_actions_and_mask_reshaped = expert_actions_and_mask.view(-1, 2)
expert_actions = expert_actions_and_mask_reshaped[:, 0].view(
*expert_actions_and_mask.shape[:-1], 1
)
expert_actions_masks = (
expert_actions_and_mask_reshaped[:, 1]
.float()
.view(*expert_actions_and_mask.shape[:-1], 1)
)
total_loss, expert_successes = self.group_loss(
cast(CategoricalDistr, actor_critic_output.distributions),
expert_actions,
expert_actions_masks,
)
should_report_loss = expert_successes.item() != 0
else:
expert_actions = su.unflatten(
self.expert_sensor.observation_space, observations["expert_action"]
)
total_loss = 0
ready_actions = OrderedDict()
for group_name, cd in zip(
self.expert_sensor.group_spaces,
cast(
SequentialDistr, actor_critic_output.distributions
).conditional_distrs,
):
assert group_name == cd.action_group_name
cd.reset()
cd.condition_on_input(**ready_actions)
expert_action = expert_actions[group_name][
AbstractExpertSensor.ACTION_POLICY_LABEL
]
expert_action_masks = expert_actions[group_name][
AbstractExpertSensor.EXPERT_SUCCESS_LABEL
]
ready_actions[group_name] = expert_action
current_loss, expert_successes = self.group_loss(
cd, expert_action, expert_action_masks,
)
should_report_loss = (
expert_successes.item() != 0 or should_report_loss
)
cd.reset()
if expert_successes.item() != 0:
losses[group_name + "_cross_entropy"] = current_loss.item()
total_loss = total_loss + current_loss
elif "expert_policy" in observations:
if self.expert_sensor is None or not self.expert_sensor.use_groups:
assert isinstance(
actor_critic_output.distributions, CategoricalDistr
), "This implementation currently only supports `CategoricalDistr`"
expert_policies = cast(Dict[str, torch.Tensor], batch["observations"])[
"expert_policy"
][..., :-1]
expert_actions_masks = cast(
Dict[str, torch.Tensor], batch["observations"]
)["expert_policy"][..., -1:]
expert_successes = expert_actions_masks.sum()
if expert_successes.item() > 0:
should_report_loss = True
log_probs = cast(
CategoricalDistr, actor_critic_output.distributions
).log_probs_tensor
# Add dimensions to `expert_actions_masks` on the right to allow for masking
# if necessary.
len_diff = len(log_probs.shape) - len(expert_actions_masks.shape)
assert len_diff >= 0
expert_actions_masks = expert_actions_masks.view(
*expert_actions_masks.shape, *((1,) * len_diff)
)
total_loss = (
-(log_probs * expert_policies) * expert_actions_masks
).sum() / torch.clamp(expert_successes, min=1)
else:
raise NotImplementedError(
"This implementation currently only supports `CategoricalDistr`"
)
else:
raise NotImplementedError(
"Imitation loss requires either `expert_action` or `expert_policy`"
" sensor to be active."
)
return (
total_loss,
{"expert_cross_entropy": total_loss.item(), **losses}
if should_report_loss
else {},
)
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/imitation.py |
"""Defining abstract loss classes for actor critic models."""
import abc
from typing import Dict, Tuple, Union
import torch
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import Loss, ActorCriticOutput
class AbstractActorCriticLoss(Loss):
"""Abstract class representing a loss function used to train an
ActorCriticModel."""
@abc.abstractmethod
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
) -> Union[
Tuple[torch.FloatTensor, Dict[str, float]],
Tuple[torch.FloatTensor, Dict[str, float], Dict[str, float]],
]:
"""Computes the loss.
# Parameters
batch : A batch of data corresponding to the information collected when rolling out (possibly many) agents
over a fixed number of steps. In particular this batch should have the same format as that returned by
`RolloutStorage.recurrent_generator`.
actor_critic_output : The output of calling an ActorCriticModel on the observations in `batch`.
args : Extra args.
kwargs : Extra kwargs.
# Returns
A (0-dimensional) torch.FloatTensor corresponding to the computed loss. `.backward()` will be called on this
tensor in order to compute a gradient update to the ActorCriticModel's parameters.
A Dict[str, float] with scalar values corresponding to sub-losses.
An optional Dict[str, float] with scalar values corresponding to extra info to be processed per epoch and
combined across epochs by the engine.
"""
# TODO: The above documentation is missing what the batch dimensions are.
raise NotImplementedError()
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/abstract_loss.py |
"""Defining the PPO loss for actor critic type models."""
from typing import Dict, Optional, Callable, cast, Tuple
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class PPO(AbstractActorCriticLoss):
"""Implementation of the Proximal Policy Optimization loss.
# Attributes
clip_param : The clipping parameter to use.
value_loss_coef : Weight of the value loss.
entropy_coef : Weight of the entropy (encouraging) loss.
use_clipped_value_loss : Whether or not to also clip the value loss.
clip_decay : Callable for clip param decay factor (function of the current number of steps)
entropy_method_name : Name of Distr's entropy method name. Default is `entropy`,
but we might use `conditional_entropy` for `SequentialDistr`
show_ratios : If True, adds tracking for the PPO ratio (linear, clamped, and used) in each
epoch to be logged by the engine.
normalize_advantage: Whether or not to use normalized advantage. Default is True.
"""
def __init__(
self,
clip_param: float,
value_loss_coef: float,
entropy_coef: float,
use_clipped_value_loss=True,
clip_decay: Optional[Callable[[int], float]] = None,
entropy_method_name: str = "entropy",
normalize_advantage: bool = True,
show_ratios: bool = False,
*args,
**kwargs
):
"""Initializer.
See the class documentation for parameter definitions.
"""
super().__init__(*args, **kwargs)
self.clip_param = clip_param
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_decay = clip_decay if clip_decay is not None else (lambda x: 1.0)
self.entropy_method_name = entropy_method_name
self.show_ratios = show_ratios
if normalize_advantage:
self.adv_key = "norm_adv_targ"
else:
self.adv_key = "adv_targ"
def loss_per_step(
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
) -> Tuple[
Dict[str, Tuple[torch.Tensor, Optional[float]]], Dict[str, torch.Tensor]
]: # TODO tuple output
actions = cast(torch.LongTensor, batch["actions"])
values = actor_critic_output.values
action_log_probs = actor_critic_output.distributions.log_prob(actions)
dist_entropy: torch.FloatTensor = getattr(
actor_critic_output.distributions, self.entropy_method_name
)()
def add_trailing_dims(t: torch.Tensor):
assert len(t.shape) <= len(batch[self.adv_key].shape)
return t.view(
t.shape + ((1,) * (len(batch[self.adv_key].shape) - len(t.shape)))
)
dist_entropy = add_trailing_dims(dist_entropy)
clip_param = self.clip_param * self.clip_decay(step_count)
ratio = torch.exp(action_log_probs - batch["old_action_log_probs"])
ratio = add_trailing_dims(ratio)
clamped_ratio = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)
surr1 = ratio * batch[self.adv_key]
surr2 = clamped_ratio * batch[self.adv_key]
use_clamped = surr2 < surr1
action_loss = -torch.where(cast(torch.Tensor, use_clamped), surr2, surr1)
if self.use_clipped_value_loss:
value_pred_clipped = batch["values"] + (values - batch["values"]).clamp(
-clip_param, clip_param
)
value_losses = (values - batch["returns"]).pow(2)
value_losses_clipped = (value_pred_clipped - batch["returns"]).pow(2)
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
else:
value_loss = 0.5 * (cast(torch.FloatTensor, batch["returns"]) - values).pow(
2
)
# noinspection PyUnresolvedReferences
return (
{
"value": (value_loss, self.value_loss_coef),
"action": (action_loss, None),
"entropy": (dist_entropy.mul_(-1.0), self.entropy_coef), # type: ignore
},
{
"ratio": ratio,
"ratio_clamped": clamped_ratio,
"ratio_used": torch.where(
cast(torch.Tensor, use_clamped), clamped_ratio, ratio
),
}
if self.show_ratios
else {},
)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
losses_per_step, ratio_info = self.loss_per_step(
step_count=step_count, batch=batch, actor_critic_output=actor_critic_output,
)
losses = {
key: (loss.mean(), weight)
for (key, (loss, weight)) in losses_per_step.items()
}
total_loss = sum(
loss * weight if weight is not None else loss
for loss, weight in losses.values()
)
result = (
total_loss,
{
"ppo_total": cast(torch.Tensor, total_loss).item(),
**{key: loss.item() for key, (loss, _) in losses.items()},
"returns": batch["returns"].mean().item(),
},
{key: float(value.mean().item()) for key, value in ratio_info.items()},
)
return result if self.show_ratios else result[:2]
class PPOValue(AbstractActorCriticLoss):
"""Implementation of the Proximal Policy Optimization loss.
# Attributes
clip_param : The clipping parameter to use.
use_clipped_value_loss : Whether or not to also clip the value loss.
"""
def __init__(
self,
clip_param: float,
use_clipped_value_loss=True,
clip_decay: Optional[Callable[[int], float]] = None,
*args,
**kwargs
):
"""Initializer.
See the class documentation for parameter definitions.
"""
super().__init__(*args, **kwargs)
self.clip_param = clip_param
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_decay = clip_decay if clip_decay is not None else (lambda x: 1.0)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
values = actor_critic_output.values
clip_param = self.clip_param * self.clip_decay(step_count)
if self.use_clipped_value_loss:
value_pred_clipped = batch["values"] + (values - batch["values"]).clamp(
-clip_param, clip_param
)
value_losses = (values - batch["returns"]).pow(2)
value_losses_clipped = (value_pred_clipped - batch["returns"]).pow(2)
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped).mean()
else:
value_loss = (
0.5 * (cast(torch.FloatTensor, batch["returns"]) - values).pow(2).mean()
)
return (
value_loss,
{"value": value_loss.item(),},
)
PPOConfig = dict(clip_param=0.1, value_loss_coef=0.5, entropy_coef=0.01)
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/ppo.py |
"""Implementation of the KFAC optimizer.
TODO: this code is not supported as it currently lacks an implementation for recurrent models.
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from allenact.base_abstractions.distributions import AddBias
# TODO: In order to make this code faster:
# 1) Implement _extract_patches as a single cuda kernel
# 2) Compute QR decomposition in a separate process
# 3) Actually make a general KFAC optimizer so it fits PyTorch
def _extract_patches(x, kernel_size, stride, padding):
if padding[0] + padding[1] > 0:
x = F.pad(
x, [padding[1], padding[1], padding[0], padding[0]]
).data # Actually check dims
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(x.size(0), x.size(1), x.size(2), x.size(3) * x.size(4) * x.size(5))
return x
def compute_cov_a(a, classname, layer_info, fast_cnn):
batch_size = a.size(0)
if classname == "Conv2d":
if fast_cnn:
a = _extract_patches(a, *layer_info)
a = a.view(a.size(0), -1, a.size(-1))
a = a.mean(1)
else:
a = _extract_patches(a, *layer_info)
a = a.view(-1, a.size(-1)).div_(a.size(1)).div_(a.size(2))
elif classname == "AddBias":
is_cuda = a.is_cuda
a = torch.ones(a.size(0), 1)
if is_cuda:
a = a.cuda()
return a.t() @ (a / batch_size)
def compute_cov_g(g, classname, layer_info, fast_cnn):
batch_size = g.size(0)
if classname == "Conv2d":
if fast_cnn:
g = g.view(g.size(0), g.size(1), -1)
g = g.sum(-1)
else:
g = g.transpose(1, 2).transpose(2, 3).contiguous()
g = g.view(-1, g.size(-1)).mul_(g.size(1)).mul_(g.size(2))
elif classname == "AddBias":
g = g.view(g.size(0), g.size(1), -1)
g = g.sum(-1)
g_ = g * batch_size
return g_.t() @ (g_ / g.size(0))
def update_running_stat(aa, m_aa, momentum):
# Do the trick to keep aa unchanged and not create any additional tensors
m_aa *= momentum / (1 - momentum)
m_aa += aa
m_aa *= 1 - momentum
class SplitBias(nn.Module):
def __init__(self, module):
super(SplitBias, self).__init__()
self.module = module
self.add_bias = AddBias(module.bias.data)
self.module.bias = None
def forward(self, x):
y = self.module(x)
y = self.add_bias(y)
return y
class KFACOptimizer(optim.Optimizer): # type: ignore
def __init__(
self,
model,
lr=0.25,
momentum=0.9,
stat_decay=0.99,
kl_clip=0.001,
damping=1e-2,
weight_decay=0,
fast_cnn=False,
Ts=1,
Tf=10,
):
defaults = dict()
def split_bias(module):
for mname, child in module.named_children():
if hasattr(child, "bias") and child.bias is not None:
# noinspection PyProtectedMember
module._modules[mname] = SplitBias(child)
else:
split_bias(child)
split_bias(model)
super(KFACOptimizer, self).__init__(model.parameters(), defaults)
self.known_modules = {"Linear", "Conv2d", "AddBias"}
self.modules = []
self.grad_outputs = {}
self.model = model
self._prepare_model()
self.steps = 0
self.m_aa, self.m_gg = {}, {}
self.Q_a, self.Q_g = {}, {}
self.d_a, self.d_g = {}, {}
self.momentum = momentum
self.stat_decay = stat_decay
self.lr = lr
self.kl_clip = kl_clip
self.damping = damping
self.weight_decay = weight_decay
self.fast_cnn = fast_cnn
self.Ts = Ts
self.Tf = Tf
self.optim = optim.SGD(
model.parameters(), lr=self.lr * (1 - self.momentum), momentum=self.momentum
)
def _save_input(self, module, input_to_save):
if torch.is_grad_enabled() and self.steps % self.Ts == 0:
classname = module.__class__.__name__
layer_info = None
if classname == "Conv2d":
layer_info = (module.kernel_size, module.stride, module.padding)
aa = compute_cov_a(
input_to_save[0].data, classname, layer_info, self.fast_cnn
)
# Initialize buffers
if self.steps == 0:
self.m_aa[module] = aa.clone()
update_running_stat(aa, self.m_aa[module], self.stat_decay)
def _save_grad_output(self, module, grad_input, grad_output):
# Accumulate statistics for Fisher matrices
if self.acc_stats:
classname = module.__class__.__name__
layer_info = None
if classname == "Conv2d":
layer_info = (module.kernel_size, module.stride, module.padding)
gg = compute_cov_g(
grad_output[0].data, classname, layer_info, self.fast_cnn
)
# Initialize buffers
if self.steps == 0:
self.m_gg[module] = gg.clone()
update_running_stat(gg, self.m_gg[module], self.stat_decay)
def _prepare_model(self):
for module in self.model.modules():
classname = module.__class__.__name__
if classname in self.known_modules:
assert not (
(classname in ["Linear", "Conv2d"]) and module.bias is not None
), "You must have a bias as a separate layer"
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
def step(self, closure=None):
# Add weight decay
if self.weight_decay > 0:
for p in self.model.parameters():
p.grad.data.add_(self.weight_decay, p.data)
updates = {}
for i, m in enumerate(self.modules):
assert (
len(list(m.parameters())) == 1
), "Can handle only one parameter at the moment"
classname = m.__class__.__name__
p = next(m.parameters())
la = self.damping + self.weight_decay
if self.steps % self.Tf == 0:
# My asynchronous implementation exists, I will add it later.
# Experimenting with different ways to this in PyTorch.
self.d_a[m], self.Q_a[m] = torch.symeig(self.m_aa[m], eigenvectors=True)
self.d_g[m], self.Q_g[m] = torch.symeig(self.m_gg[m], eigenvectors=True)
self.d_a[m].mul_((self.d_a[m] > 1e-6).float())
self.d_g[m].mul_((self.d_g[m] > 1e-6).float())
if classname == "Conv2d":
p_grad_mat = p.grad.data.view(p.grad.data.size(0), -1)
else:
p_grad_mat = p.grad.data
v1 = self.Q_g[m].t() @ p_grad_mat @ self.Q_a[m]
v2 = v1 / (self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0) + la)
v = self.Q_g[m] @ v2 @ self.Q_a[m].t()
v = v.view(p.grad.data.size())
updates[p] = v
vg_sum = 0
for p in self.model.parameters():
v = updates[p]
vg_sum += (v * p.grad.data * self.lr * self.lr).sum()
nu = min(1.0, math.sqrt(self.kl_clip / vg_sum))
for p in self.model.parameters():
v = updates[p]
p.grad.data.copy_(v)
p.grad.data.mul_(nu)
self.optim.step()
self.steps += 1
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/kfac.py |
"""Implementation of A2C and ACKTR losses."""
from typing import cast, Tuple, Dict, Optional
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.utils.system import get_logger
class A2CACKTR(AbstractActorCriticLoss):
"""Class implementing A2C and ACKTR losses.
# Attributes
acktr : `True` if should use ACKTR loss (currently not supported), otherwise uses A2C loss.
value_loss_coef : Weight of value loss.
entropy_coef : Weight of entropy (encouraging) loss.
entropy_method_name : Name of Distr's entropy method name. Default is `entropy`,
but we might use `conditional_entropy` for `SequentialDistr`.
"""
def __init__(
self,
value_loss_coef,
entropy_coef,
acktr=False,
entropy_method_name: str = "entropy",
*args,
**kwargs,
):
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(*args, **kwargs)
self.acktr = acktr
self.loss_key = "a2c_total" if not acktr else "aktr_total"
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.entropy_method_name = entropy_method_name
def loss_per_step( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
) -> Dict[str, Tuple[torch.Tensor, Optional[float]]]:
actions = cast(torch.LongTensor, batch["actions"])
values = actor_critic_output.values
action_log_probs = actor_critic_output.distributions.log_prob(actions)
action_log_probs = action_log_probs.view(
action_log_probs.shape
+ (1,)
* (
len(cast(torch.Tensor, batch["adv_targ"]).shape)
- len(action_log_probs.shape)
)
)
dist_entropy: torch.FloatTensor = getattr(
actor_critic_output.distributions, self.entropy_method_name
)()
dist_entropy = dist_entropy.view(
dist_entropy.shape
+ ((1,) * (len(action_log_probs.shape) - len(dist_entropy.shape)))
)
value_loss = 0.5 * (cast(torch.FloatTensor, batch["returns"]) - values).pow(2)
# TODO: Decided not to use normalized advantages here,
# is this correct? (it's how it's done in Kostrikov's)
action_loss = -(
cast(torch.FloatTensor, batch["adv_targ"]).detach() * action_log_probs
)
if self.acktr:
# TODO: Currently acktr doesn't really work because of this natural gradient stuff
# that we should figure out how to integrate properly.
get_logger().warning("acktr is only partially supported.")
return {
"value": (value_loss, self.value_loss_coef),
"action": (action_loss, None),
"entropy": (dist_entropy.mul_(-1.0), self.entropy_coef), # type: ignore
}
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
):
losses_per_step = self.loss_per_step(
step_count=step_count, batch=batch, actor_critic_output=actor_critic_output,
)
losses = {
key: (loss.mean(), weight)
for (key, (loss, weight)) in losses_per_step.items()
}
total_loss = cast(
torch.Tensor,
sum(
loss * weight if weight is not None else loss
for loss, weight in losses.values()
),
)
return (
total_loss,
{
self.loss_key: total_loss.item(),
**{key: loss.item() for key, (loss, _) in losses.items()},
},
)
class A2C(A2CACKTR):
"""A2C Loss."""
def __init__(
self,
value_loss_coef,
entropy_coef,
entropy_method_name: str = "entropy",
*args,
**kwargs,
):
super().__init__(
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
acktr=False,
entropy_method_name=entropy_method_name,
*args,
**kwargs,
)
class ACKTR(A2CACKTR):
"""ACKTR Loss.
This code is not supported as it currently lacks an implementation
for recurrent models.
"""
def __init__(
self,
value_loss_coef,
entropy_coef,
entropy_method_name: str = "entropy",
*args,
**kwargs,
):
super().__init__(
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
acktr=True,
entropy_method_name=entropy_method_name,
*args,
**kwargs,
)
A2CConfig = dict(value_loss_coef=0.5, entropy_coef=0.01,)
| ask4help-main | allenact/algorithms/onpolicy_sync/losses/a2cacktr.py |
ask4help-main | allenact/algorithms/offpolicy_sync/__init__.py |
|
"""Defining abstract loss classes for actor critic models."""
import abc
from typing import Dict, Tuple, TypeVar, Generic
import torch
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.misc import Loss, Memory
ModelType = TypeVar("ModelType")
class AbstractOffPolicyLoss(Generic[ModelType], Loss):
"""Abstract class representing an off-policy loss function used to train a
model."""
@abc.abstractmethod
def loss( # type: ignore
self,
step_count: int,
model: ModelType,
batch: ObservationType,
memory: Memory,
*args,
**kwargs,
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
"""Computes the loss.
Loss after processing a batch of data with (part of) a model (possibly with memory).
# Parameters
model: model to run on data batch (both assumed to be on the same device)
batch: data to use as input for model (already on the same device as model)
memory: model memory before processing current data batch
# Returns
A tuple with:
current_loss: total loss
current_info: additional information about the current loss
memory: model memory after processing current data batch
bsize: batch size
"""
raise NotImplementedError()
| ask4help-main | allenact/algorithms/offpolicy_sync/losses/abstract_offpolicy_loss.py |
ask4help-main | allenact/algorithms/offpolicy_sync/losses/__init__.py |
|
"""Functions used to initialize and manipulate pytorch models."""
import hashlib
from collections import Callable
from typing import Sequence, Tuple, Union, Optional, Dict, Any
import numpy as np
import torch
import torch.nn as nn
from allenact.utils.misc_utils import md5_hash_str_as_int
def md5_hash_of_state_dict(state_dict: Dict[str, Any]):
hashables = []
for piece in sorted(state_dict.items()):
import torch
if isinstance(piece[1], (np.ndarray, torch.Tensor, nn.Parameter)):
hashables.append(piece[0])
if not isinstance(piece[1], np.ndarray):
p1 = piece[1].data.cpu().numpy()
else:
p1 = piece[1]
hashables.append(int(hashlib.md5(p1.tobytes()).hexdigest(), 16,))
else:
hashables.append(md5_hash_str_as_int(str(piece)))
return md5_hash_str_as_int(str(hashables))
class Flatten(nn.Module):
"""Flatten input tensor so that it is of shape (FLATTENED_BATCH x -1)."""
def forward(self, x):
"""Flatten input tensor.
# Parameters
x : Tensor of size (FLATTENED_BATCH x ...) to flatten to size (FLATTENED_BATCH x -1)
# Returns
Flattened tensor.
"""
return x.reshape(x.size(0), -1)
def init_linear_layer(
module: nn.Linear, weight_init: Callable, bias_init: Callable, gain=1
):
"""Initialize a torch.nn.Linear layer.
# Parameters
module : A torch linear layer.
weight_init : Function used to initialize the weight parameters of the linear layer. Should take the weight data
tensor and gain as input.
bias_init : Function used to initialize the bias parameters of the linear layer. Should take the bias data
tensor and gain as input.
gain : The gain to apply.
# Returns
The initialized linear layer.
"""
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == "inf":
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1.0 / norm_type)
return total_norm
def make_cnn(
input_channels: int,
layer_channels: Sequence[int],
kernel_sizes: Sequence[Union[int, Tuple[int, int]]],
strides: Sequence[Union[int, Tuple[int, int]]],
paddings: Sequence[Union[int, Tuple[int, int]]],
dilations: Sequence[Union[int, Tuple[int, int]]],
output_height: int,
output_width: int,
output_channels: int,
flatten: bool = True,
output_relu: bool = True,
) -> nn.Module:
assert (
len(layer_channels)
== len(kernel_sizes)
== len(strides)
== len(paddings)
== len(dilations)
), "Mismatched sizes: layers {} kernels {} strides {} paddings {} dilations {}".format(
layer_channels, kernel_sizes, strides, paddings, dilations
)
net = nn.Sequential()
input_channels_list = [input_channels] + list(layer_channels)
for it, current_channels in enumerate(layer_channels):
net.add_module(
"conv_{}".format(it),
nn.Conv2d(
in_channels=input_channels_list[it],
out_channels=current_channels,
kernel_size=kernel_sizes[it],
stride=strides[it],
padding=paddings[it],
dilation=dilations[it],
),
)
if it < len(layer_channels) - 1:
net.add_module("relu_{}".format(it), nn.ReLU(inplace=True))
if flatten:
net.add_module("flatten", Flatten())
net.add_module(
"fc",
nn.Linear(
layer_channels[-1] * output_width * output_height, output_channels
),
)
if output_relu:
net.add_module("out_relu", nn.ReLU(True))
return net
def compute_cnn_output(
cnn: nn.Module,
cnn_input: torch.Tensor,
permute_order: Optional[Tuple[int, ...]] = (
0, # FLAT_BATCH (flattening steps, samplers and agents)
3, # CHANNEL
1, # ROW
2, # COL
), # from [FLAT_BATCH x ROW x COL x CHANNEL] flattened input
):
"""Computes CNN outputs for given inputs.
# Parameters
cnn : A torch CNN.
cnn_input: A torch Tensor with inputs.
permute_order: A permutation Tuple to provide PyTorch dimension order, default (0, 3, 1, 2), where 0 corresponds to
the flattened batch dimensions (combining step, sampler and agent)
# Returns
CNN output with dimensions [STEP, SAMPLER, AGENT, CHANNEL, (HEIGHT, WIDTH)].
"""
nsteps: int
nsamplers: int
nagents: int
assert len(cnn_input.shape) in [
5,
6,
], "CNN input must have shape [STEP, SAMPLER, (AGENT,) dim1, dim2, dim3]"
nagents: Optional[int] = None
if len(cnn_input.shape) == 6:
nsteps, nsamplers, nagents = cnn_input.shape[:3]
else:
nsteps, nsamplers = cnn_input.shape[:2]
# Make FLAT_BATCH = nsteps * nsamplers (* nagents)
cnn_input = cnn_input.view((-1,) + cnn_input.shape[2 + int(nagents is not None) :])
if permute_order is not None:
cnn_input = cnn_input.permute(*permute_order)
cnn_output = cnn(cnn_input)
if nagents is not None:
cnn_output = cnn_output.reshape(
(nsteps, nsamplers, nagents,) + cnn_output.shape[1:]
)
else:
cnn_output = cnn_output.reshape((nsteps, nsamplers,) + cnn_output.shape[1:])
return cnn_output
def simple_conv_and_linear_weights_init(m):
if type(m) in [
nn.Conv1d,
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose1d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
]:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
if m.bias is not None:
m.bias.data.fill_(0)
elif type(m) == nn.Linear:
simple_linear_weights_init(m)
def simple_linear_weights_init(m):
if type(m) == nn.Linear:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
if m.bias is not None:
m.bias.data.fill_(0)
class FeatureEmbedding(nn.Module):
"""A wrapper of nn.Embedding but support zero output Used for extracting
features for actions/rewards."""
def __init__(self, input_size, output_size):
super().__init__()
self.output_size = output_size
if self.output_size != 0:
self.fc = nn.Embedding(input_size, output_size)
else: # automatically be moved to a device
self.null_embedding: torch.Tensor
self.register_buffer("null_embedding", torch.zeros(0,), persistent=False)
def forward(self, inputs):
if self.output_size != 0:
return self.fc(inputs)
else:
return self.null_embedding
| ask4help-main | allenact/utils/model_utils.py |
"""Utility classes and functions for running and designing experiments."""
import abc
import collections.abc
import copy
import random
from collections import OrderedDict, defaultdict
from typing import (
Callable,
NamedTuple,
Dict,
Any,
Union,
Iterator,
Optional,
List,
cast,
Sequence,
TypeVar,
Generic,
)
import numpy as np
import torch
import torch.optim as optim
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (
AbstractOffPolicyLoss,
Memory,
)
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.base_abstractions.misc import Loss
from allenact.utils.system import get_logger
from allenact.utils.misc_utils import prepare_locals_for_super
def evenly_distribute_count_into_bins(count: int, nbins: int) -> List[int]:
"""Distribute a count into a number of bins.
# Parameters
count: A positive integer to be distributed, should be `>= nbins`.
nbins: The number of bins.
# Returns
A list of positive integers which sum to `count`. These values will be
as close to equal as possible (may differ by at most 1).
"""
assert count >= nbins, f"count ({count}) < nbins ({nbins})"
res = [0] * nbins
for it in range(count):
res[it % nbins] += 1
return res
def recursive_update(
original: Union[Dict, collections.abc.MutableMapping],
update: Union[Dict, collections.abc.MutableMapping],
):
"""Recursively updates original dictionary with entries form update dict.
# Parameters
original : Original dictionary to be updated.
update : Dictionary with additional or replacement entries.
# Returns
Updated original dictionary.
"""
for k, v in update.items():
if isinstance(v, collections.abc.MutableMapping):
original[k] = recursive_update(original.get(k, {}), v)
else:
original[k] = v
return original
ToBuildType = TypeVar("ToBuildType")
class Builder(tuple, Generic[ToBuildType]):
"""Used to instantiate a given class with (default) parameters.
Helper class that stores a class, default parameters for that
class, and key word arguments that (possibly) overwrite the defaults.
When calling this an object of the Builder class it generates
a class of type `class_type` with parameters specified by
the attributes `default` and `kwargs` (and possibly additional, overwriting,
keyword arguments).
# Attributes
class_type : The class to be instantiated when calling the object.
kwargs : Keyword arguments used to instantiate an object of type `class_type`.
default : Default parameters used when instantiating the class.
"""
class_type: ToBuildType
kwargs: Dict[str, Any]
default: Dict[str, Any]
# noinspection PyTypeChecker
def __new__(
cls,
class_type: ToBuildType,
kwargs: Optional[Dict[str, Any]] = None,
default: Optional[Dict[str, Any]] = None,
):
"""Create a new Builder.
For parameter descriptions see the class documentation. Note
that `kwargs` and `default` can be None in which case they are
set to be empty dictionaries.
"""
self = tuple.__new__(
cls,
(
class_type,
kwargs if kwargs is not None else {},
default if default is not None else {},
),
)
self.class_type = class_type
self.kwargs = self[1]
self.default = self[2]
return self
def __repr__(self) -> str:
return (
f"Group(class_type={self.class_type},"
f" kwargs={self.kwargs},"
f" default={self.default})"
)
def __call__(self, **kwargs) -> ToBuildType:
"""Build and return a new class.
# Parameters
kwargs : additional keyword arguments to use when instantiating
the object. These overwrite all arguments already in the `self.kwargs`
and `self.default` attributes.
# Returns
Class of type `self.class_type` with parameters
taken from `self.default`, `self.kwargs`, and
any keyword arguments additionally passed to `__call__`.
"""
allkwargs = copy.deepcopy(self.default)
recursive_update(allkwargs, self.kwargs)
recursive_update(allkwargs, kwargs)
return cast(Callable, self.class_type)(**allkwargs)
class ScalarMeanTracker(object):
"""Track a collection `scalar key -> mean` pairs."""
def __init__(self) -> None:
self._sums: Dict[str, float] = OrderedDict()
self._counts: Dict[str, int] = OrderedDict()
def add_scalars(
self, scalars: Dict[str, Union[float, int]], n: Union[int, Dict[str, int]] = 1
) -> None:
"""Add additional scalars to track.
# Parameters
scalars : A dictionary of `scalar key -> value` pairs.
"""
ndict = cast(
Dict[str, int], (n if isinstance(n, Dict) else defaultdict(lambda: n)) # type: ignore
)
for k in scalars:
if k not in self._sums:
self._sums[k] = ndict[k] * scalars[k]
self._counts[k] = ndict[k]
else:
self._sums[k] += ndict[k] * scalars[k]
self._counts[k] += ndict[k]
def pop_and_reset(self) -> Dict[str, float]:
"""Return tracked means and reset.
On resetting all previously tracked values are discarded.
# Returns
A dictionary of `scalar key -> current mean` pairs corresponding to those
values added with `add_scalars`.
"""
means = OrderedDict(
[(k, float(self._sums[k] / self._counts[k])) for k in self._sums]
)
self.reset()
return means
def reset(self):
self._sums = OrderedDict()
self._counts = OrderedDict()
def sums(self):
return copy.copy(self._sums)
def counts(self) -> Dict[str, int]:
return copy.copy(self._counts)
def means(self) -> Dict[str, float]:
return OrderedDict(
[(k, float(self._sums[k] / self._counts[k])) for k in self._sums]
)
@property
def empty(self):
assert len(self._sums) == len(
self._counts
), "Mismatched length of _sums {} and _counts {}".format(
len(self._sums), len(self._counts)
)
return len(self._sums) == 0
class LoggingPackage(object):
"""Data package used for logging."""
def __init__(
self,
mode: str,
training_steps: Optional[int],
pipeline_stage: Optional[int] = None,
off_policy_steps: Optional[int] = None,
) -> None:
self.mode = mode
self.training_steps: int = training_steps
self.pipeline_stage = pipeline_stage
self.off_policy_steps: Optional[int] = off_policy_steps
self.metrics_tracker = ScalarMeanTracker()
self.train_info_tracker = ScalarMeanTracker()
self.metric_dicts: List[Any] = []
self.viz_data: Optional[Dict[str, List[Dict[str, Any]]]] = None
self.checkpoint_file_name: Optional[str] = None
self.num_empty_metrics_dicts_added: int = 0
@property
def num_non_empty_metrics_dicts_added(self) -> int:
return len(self.metric_dicts)
@staticmethod
def _metrics_dict_is_empty(
single_task_metrics_dict: Dict[str, Union[float, int]]
) -> bool:
return (
len(single_task_metrics_dict) == 0
or (
len(single_task_metrics_dict) == 1
and "task_info" in single_task_metrics_dict
)
or (
"success" in single_task_metrics_dict
and single_task_metrics_dict["success"] is None
)
)
def add_metrics_dict(
self, single_task_metrics_dict: Dict[str, Union[float, int]]
) -> bool:
if self._metrics_dict_is_empty(single_task_metrics_dict):
self.num_empty_metrics_dicts_added += 1
return False
self.metric_dicts.append(single_task_metrics_dict)
self.metrics_tracker.add_scalars(
{k: v for k, v in single_task_metrics_dict.items() if k != "task_info"}
)
return True
def add_train_info_dict(
self, train_info_dict: Dict[str, Union[int, float]], n: int
):
assert n >= 0
self.train_info_tracker.add_scalars(scalars=train_info_dict, n=n)
class LinearDecay(object):
"""Linearly decay between two values over some number of steps.
Obtain the value corresponding to the `i`-th step by calling
an instance of this class with the value `i`.
# Parameters
steps : The number of steps over which to decay.
startp : The starting value.
endp : The ending value.
"""
def __init__(self, steps: int, startp: float = 1.0, endp: float = 0.0) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
self.steps = steps
self.startp = startp
self.endp = endp
def __call__(self, epoch: int) -> float:
"""Get the decayed value for `epoch` number of steps.
# Parameters
epoch : The number of steps.
# Returns
Decayed value for `epoch` number of steps.
"""
epoch = max(min(epoch, self.steps), 0)
return self.startp + (self.endp - self.startp) * (epoch / float(self.steps))
class MultiLinearDecay(object):
"""Container for multiple stages of LinearDecay.
Obtain the value corresponding to the `i`-th step by calling
an instance of this class with the value `i`.
# Parameters
stages: List of `LinearDecay` objects to be sequentially applied
for the number of steps in each stage.
"""
def __init__(self, stages: Sequence[LinearDecay]) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
self.stages = stages
self.steps = np.cumsum([stage.steps for stage in self.stages])
self.total_steps = self.steps[-1]
self.stage_idx = -1
self.min_steps = 0
self.max_steps = 0
self.stage = None
def __call__(self, epoch: int) -> float:
"""Get the decayed value factor for `epoch` number of steps.
# Parameters
epoch : The number of steps.
# Returns
Decayed value for `epoch` number of steps.
"""
epoch = max(min(epoch, self.total_steps), 0)
while epoch >= self.max_steps and self.max_steps < self.total_steps:
self.stage_idx += 1
assert self.stage_idx < len(self.stages)
self.min_steps = self.max_steps
self.max_steps = self.steps[self.stage_idx]
self.stage = self.stages[self.stage_idx]
return self.stage(epoch - self.min_steps)
# noinspection PyTypeHints,PyUnresolvedReferences
def set_deterministic_cudnn() -> None:
"""Makes cudnn deterministic.
This may slow down computations.
"""
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = False # type: ignore
def set_seed(seed: Optional[int] = None) -> None:
"""Set seeds for multiple (cpu) sources of randomness.
Sets seeds for (cpu) `pytorch`, base `random`, and `numpy`.
# Parameters
seed : The seed to set. If set to None, keep using the current seed.
"""
if seed is None:
return
torch.manual_seed(seed) # seeds the RNG for all devices (CPU and GPUs)
random.seed(seed)
np.random.seed(seed)
class EarlyStoppingCriterion(abc.ABC):
"""Abstract class for class who determines if training should stop early in
a particular pipeline stage."""
@abc.abstractmethod
def __call__(
self, stage_steps: int, total_steps: int, training_metrics: ScalarMeanTracker,
) -> bool:
"""Returns `True` if training should be stopped early.
# Parameters
stage_steps: Total number of steps taken in the current pipeline stage.
total_steps: Total number of steps taken during training so far (includes steps
taken in prior pipeline stages).
training_metrics: Metrics recovered over some fixed number of steps
(see the `metric_accumulate_interval` attribute in the `TrainingPipeline` class)
training.
"""
raise NotImplementedError
class NeverEarlyStoppingCriterion(EarlyStoppingCriterion):
"""Implementation of `EarlyStoppingCriterion` which never stops early."""
def __call__(
self, stage_steps: int, total_steps: int, training_metrics: ScalarMeanTracker,
) -> bool:
return False
class OffPolicyPipelineComponent(NamedTuple):
"""An off-policy component for a PipeLineStage.
# Attributes
data_iterator_builder: A function to instantiate a Data Iterator (with a __next__(self) method)
loss_names: list of unique names assigned to off-policy losses
updates: number of off-policy updates between on-policy rollout collections
loss_weights : A list of floating point numbers describing the relative weights
applied to the losses referenced by `loss_names`. Should be the same length
as `loss_names`. If this is `None`, all weights will be assumed to be one.
data_iterator_kwargs_generator: Optional generator of keyword arguments for data_iterator_builder (useful for
distributed training. It takes
a `cur_worker` int value,
a `rollouts_per_worker` list of number of samplers per training worker,
and an optional random `seed` shared by all workers, which can be None.
"""
data_iterator_builder: Callable[..., Iterator]
loss_names: List[str]
updates: int
loss_weights: Optional[Sequence[float]] = None
data_iterator_kwargs_generator: Callable[
[int, Sequence[int], Optional[int]], Dict
] = lambda cur_worker, rollouts_per_worker, seed: {}
class TrainingSettings(object):
"""Class defining parameters used for training (within a stage or the
entire pipeline).
# Attributes
num_mini_batch : The number of mini-batches to break a rollout into.
update_repeats : The number of times we will cycle through the mini-batches corresponding
to a single rollout doing gradient updates.
max_grad_norm : The maximum "inf" norm of any gradient step (gradients are clipped to not exceed this).
num_steps : Total number of steps a single agent takes in a rollout.
gamma : Discount factor applied to rewards (should be in [0, 1]).
use_gae : Whether or not to use generalized advantage estimation (GAE).
gae_lambda : The additional parameter used in GAE.
advance_scene_rollout_period: Optional number of rollouts before enforcing an advance scene in all samplers.
save_interval : The frequency with which to save (in total agent steps taken). If `None` then *no*
checkpoints will be saved. Otherwise, in addition to the checkpoints being saved every
`save_interval` steps, a checkpoint will *always* be saved at the end of each pipeline stage.
If `save_interval <= 0` then checkpoints will only be saved at the end of each pipeline stage.
metric_accumulate_interval : The frequency with which training/validation metrics are accumulated
(in total agent steps). Metrics accumulated in an interval are logged (if `should_log` is `True`)
and used by the stage's early stopping criterion (if any).
"""
num_mini_batch: Optional[int]
update_repeats: Optional[int]
max_grad_norm: Optional[float]
num_steps: Optional[int]
gamma: Optional[float]
use_gae: Optional[bool]
gae_lambda: Optional[float]
advance_scene_rollout_period: Optional[int]
save_interval: Optional[int]
metric_accumulate_interval: Optional[int]
# noinspection PyUnresolvedReferences
def __init__(
self,
num_mini_batch: Optional[int] = None,
update_repeats: Optional[int] = None,
max_grad_norm: Optional[float] = None,
num_steps: Optional[int] = None,
gamma: Optional[float] = None,
use_gae: Optional[bool] = None,
gae_lambda: Optional[float] = None,
advance_scene_rollout_period: Optional[int] = None,
save_interval: Optional[int] = None,
metric_accumulate_interval: Optional[int] = None,
**kwargs: Any,
):
all_vars = prepare_locals_for_super(locals(), ignore_kwargs=True)
for key, value in all_vars.items():
setattr(self, key, value)
_TRAINING_SETTINGS_NAMES: List[str] = list(TrainingSettings().__dict__.keys())
class PipelineStage(TrainingSettings):
"""A single stage in a training pipeline, possibly including overrides to
the global `TrainingSettings` in `TrainingPipeline`.
# Attributes
loss_name : A collection of unique names assigned to losses. These will
reference the `Loss` objects in a `TrainingPipeline` instance.
max_stage_steps : Either the total number of steps agents should take in this stage or
a Callable object (e.g. a function)
loss_weights : A list of floating point numbers describing the relative weights
applied to the losses referenced by `loss_name`. Should be the same length
as `loss_name`. If this is `None`, all weights will be assumed to be one.
teacher_forcing : If applicable, defines the probability an agent will take the
expert action (as opposed to its own sampled action) at a given time point.
early_stopping_criterion: An `EarlyStoppingCriterion` object which determines if
training in this stage should be stopped early. If `None` then no early stopping
occurs. If `early_stopping_criterion` is not `None` then we do not guarantee
reproducibility when restarting a model from a checkpoint (as the
`EarlyStoppingCriterion` object may store internal state which is not
saved in the checkpoint). Currently AllenAct only supports using early stopping
criterion when **not** using distributed training.
num_mini_batch : See docs for `TrainingSettings`.
update_repeats : See docs for `TrainingSettings`.
max_grad_norm : See docs for `TrainingSettings`.
num_steps : See docs for `TrainingSettings`.
gamma : See docs for `TrainingSettings`.
use_gae : See docs for `TrainingSettings`.
gae_lambda : See docs for `TrainingSettings`.
advance_scene_rollout_period: See docs for `TrainingSettings`.
save_interval : See docs for `TrainingSettings`.
metric_accumulate_interval : See docs for `TrainingSettings`.
"""
def __init__(
self,
*, # Disables positional arguments. Please provide arguments as keyword arguments.
loss_names: List[str],
max_stage_steps: Union[int, Callable],
loss_weights: Optional[Sequence[float]] = None,
loss_update_repeats: Optional[Sequence[int]] = None,
teacher_forcing: Optional[LinearDecay] = None,
offpolicy_component: Optional[OffPolicyPipelineComponent] = None,
early_stopping_criterion: Optional[EarlyStoppingCriterion] = None,
num_mini_batch: Optional[int] = None,
update_repeats: Optional[int] = None,
max_grad_norm: Optional[float] = None,
num_steps: Optional[int] = None,
gamma: Optional[float] = None,
use_gae: Optional[bool] = None,
gae_lambda: Optional[float] = None,
advance_scene_rollout_period: Optional[int] = None,
save_interval: Optional[int] = None,
metric_accumulate_interval: Optional[int] = None,
):
self._update_repeats: Optional[int] = None
# Populate TrainingSettings members
super().__init__(**prepare_locals_for_super(locals()))
self.loss_names = loss_names
self.max_stage_steps = max_stage_steps
self.loss_weights = loss_weights
self.loss_update_repeats = loss_update_repeats
assert self.loss_weights is None or len(self.loss_weights) == len(
self.loss_names
)
assert self.loss_update_repeats is None or (
len(self.loss_update_repeats) == len(self.loss_names)
and self._update_repeats is None
)
self.teacher_forcing = teacher_forcing
self.offpolicy_component = offpolicy_component
self.early_stopping_criterion = early_stopping_criterion
self.steps_taken_in_stage: int = 0
self.rollout_count = 0
self.early_stopping_criterion_met = False
self.named_losses: Optional[Dict[str, AbstractActorCriticLoss]] = None
self._named_loss_weights: Optional[Dict[str, float]] = None
self._named_loss_update_repeats: Optional[Dict[str, float]] = None
self.offpolicy_memory = Memory()
self.offpolicy_epochs: Optional[int] = None
self.offpolicy_named_losses: Optional[Dict[str, AbstractOffPolicyLoss]] = None
self._offpolicy_named_loss_weights: Optional[Dict[str, float]] = None
self.offpolicy_steps_taken_in_stage: int = 0
@property
def update_repeats(self) -> Optional[int]:
if self._update_repeats is None:
if self.loss_update_repeats is None:
return None
return max(self.loss_update_repeats)
else:
return self._update_repeats
@update_repeats.setter
def update_repeats(self, val: Optional[int]):
self._update_repeats = val
@property
def is_complete(self):
return (
self.early_stopping_criterion_met
or self.steps_taken_in_stage >= self.max_stage_steps
)
@property
def named_loss_update_repeats(self):
if self._named_loss_update_repeats is None:
loss_update_repeats = (
self.loss_update_repeats
if self.loss_update_repeats is not None
else [None] * len(self.loss_names)
)
self._named_loss_update_repeats = {
name: weight
for name, weight in zip(self.loss_names, loss_update_repeats)
}
return self._named_loss_update_repeats
@property
def named_loss_weights(self):
if self._named_loss_weights is None:
loss_weights = (
self.loss_weights
if self.loss_weights is not None
else [1.0] * len(self.loss_names)
)
self._named_loss_weights = {
name: weight for name, weight in zip(self.loss_names, loss_weights)
}
return self._named_loss_weights
@property
def offpolicy_named_loss_weights(self):
if self._offpolicy_named_loss_weights is None:
loss_weights = (
self.offpolicy_component.loss_weights
if self.offpolicy_component.loss_weights is not None
else [1.0] * len(self.offpolicy_component.loss_names)
)
self._offpolicy_named_loss_weights = {
name: weight
for name, weight in zip(
self.offpolicy_component.loss_names, loss_weights
)
}
return self._offpolicy_named_loss_weights
class TrainingPipeline(TrainingSettings):
"""Class defining the stages (and global training settings) in a training
pipeline.
The training pipeline can be used as an iterator to go through the pipeline
stages in, for instance, a loop.
# Attributes
named_losses : Dictionary mapping a the name of a loss to either an instantiation
of that loss or a `Builder` that, when called, will return that loss.
pipeline_stages : A list of PipelineStages. Each of these define how the agent
will be trained and are executed sequentially.
optimizer_builder : Builder object to instantiate the optimizer to use during training.
num_mini_batch : See docs for `TrainingSettings`.
update_repeats : See docs for `TrainingSettings`.
max_grad_norm : See docs for `TrainingSettings`.
num_steps : See docs for `TrainingSettings`.
gamma : See docs for `TrainingSettings`.
use_gae : See docs for `TrainingSettings`.
gae_lambda : See docs for `TrainingSettings`.
advance_scene_rollout_period: See docs for `TrainingSettings`.
save_interval : See docs for `TrainingSettings`.
metric_accumulate_interval : See docs for `TrainingSettings`.
should_log: `True` if metrics accumulated during training should be logged to the console as well
as to a tensorboard file.
lr_scheduler_builder : Optional builder object to instantiate the learning rate scheduler used
through the pipeline.
"""
# noinspection PyUnresolvedReferences
def __init__(
self,
named_losses: Dict[str, Union[Loss, Builder[Loss]]],
pipeline_stages: List[PipelineStage],
optimizer_builder: Builder[optim.Optimizer], # type: ignore
num_mini_batch: int,
update_repeats: Optional[int],
max_grad_norm: float,
num_steps: int,
gamma: float,
use_gae: bool,
gae_lambda: float,
advance_scene_rollout_period: Optional[int],
save_interval: Optional[int],
metric_accumulate_interval: int,
should_log: bool = True,
lr_scheduler_builder: Optional[Builder[optim.lr_scheduler._LRScheduler]] = None, # type: ignore
):
"""Initializer.
See class docstring for parameter definitions.
"""
all_vars = prepare_locals_for_super(locals())
# Populate TrainingSettings members
super().__init__(**all_vars)
self.optimizer_builder = optimizer_builder
self.lr_scheduler_builder = lr_scheduler_builder
self.named_losses = named_losses
self.should_log = should_log
self.pipeline_stages = pipeline_stages
if len(self.pipeline_stages) > len(set(id(ps) for ps in pipeline_stages)):
raise RuntimeError(
"Duplicate `PipelineStage` object instances found in the pipeline stages input"
" to `TrainingPipeline`. `PipelineStage` objects are not immutable, if you'd"
" like to have multiple pipeline stages of the same type, please instantiate"
" multiple separate instances."
)
self._current_stage: Optional[PipelineStage] = None
for sit, stage in enumerate(self.pipeline_stages):
# Forward all global `TrainingSettings` to all `PipelineStage`s unless overridden:
for var in _TRAINING_SETTINGS_NAMES:
if getattr(stage, var) is None:
setattr(stage, var, getattr(self, var))
assert (
stage.num_steps <= self.num_steps
), f"Stage {sit} has `num_steps` {stage.num_steps} > {self.num_steps} in pipeline."
self.rollout_count = 0
self.off_policy_epochs = None
self._refresh_current_stage(force_stage_search_from_start=True)
@property
def total_steps(self) -> int:
return sum(ps.steps_taken_in_stage for ps in self.pipeline_stages)
@property
def total_offpolicy_steps(self) -> int:
return sum(ps.offpolicy_steps_taken_in_stage for ps in self.pipeline_stages)
def _refresh_current_stage(
self, force_stage_search_from_start: bool = False
) -> Optional[PipelineStage]:
if force_stage_search_from_start:
self._current_stage = None
if self._current_stage is None or self._current_stage.is_complete:
if self._current_stage is None:
start_index = 0
else:
start_index = self.pipeline_stages.index(self._current_stage) + 1
self._current_stage = None
for ps in self.pipeline_stages[start_index:]:
if not ps.is_complete:
self._current_stage = ps
break
return self._current_stage
@property
def current_stage(self) -> Optional[PipelineStage]:
return self._current_stage
@property
def current_stage_index(self) -> Optional[int]:
if self.current_stage is None:
return None
return self.pipeline_stages.index(self.current_stage)
def before_rollout(self, train_metrics: Optional[ScalarMeanTracker] = None) -> bool:
if (
train_metrics is not None
and self.current_stage.early_stopping_criterion is not None
):
self.current_stage.early_stopping_criterion_met = self.current_stage.early_stopping_criterion(
stage_steps=self.current_stage.steps_taken_in_stage,
total_steps=self.total_steps,
training_metrics=train_metrics,
)
if self.current_stage.early_stopping_criterion_met:
get_logger().debug(
f"Early stopping criterion met after {self.total_steps} total steps "
f"({self.current_stage.steps_taken_in_stage} in current stage, stage index {self.current_stage_index})."
)
return self.current_stage is not self._refresh_current_stage(
force_stage_search_from_start=False
)
def restart_pipeline(self):
for ps in self.pipeline_stages:
ps.steps_taken_in_stage = 0
ps.early_stopping_criterion_met = False
self._current_stage = None
self._refresh_current_stage(force_stage_search_from_start=True)
def state_dict(self):
return dict(
stage_info_list=[
{
"early_stopping_criterion_met": ps.early_stopping_criterion_met,
"steps_taken_in_stage": ps.steps_taken_in_stage,
"offpolicy_steps_taken_in_stage": ps.offpolicy_steps_taken_in_stage,
}
for ps in self.pipeline_stages
],
rollout_count=self.rollout_count,
off_policy_epochs=self.off_policy_epochs,
)
def load_state_dict(self, state_dict: Dict[str, Any]):
for ps, stage_info in zip(self.pipeline_stages, state_dict["stage_info_list"]):
ps.early_stopping_criterion_met = stage_info["early_stopping_criterion_met"]
ps.steps_taken_in_stage = stage_info["steps_taken_in_stage"]
ps.offpolicy_steps_taken_in_stage = stage_info.get(
"offpolicy_steps_taken_in_stage", 0
)
self.rollout_count = state_dict["rollout_count"]
self.off_policy_epochs = state_dict.get("off_policy_epochs", 0)
self._refresh_current_stage(force_stage_search_from_start=True)
@property
def current_stage_losses(self) -> Dict[str, AbstractActorCriticLoss]:
if self.current_stage.named_losses is None:
for loss_name in self.current_stage.loss_names:
if isinstance(self.named_losses[loss_name], Builder):
self.named_losses[loss_name] = cast(
Builder["AbstractActorCriticLoss"],
self.named_losses[loss_name],
)()
self.current_stage.named_losses = {
loss_name: cast(AbstractActorCriticLoss, self.named_losses[loss_name])
for loss_name in self.current_stage.loss_names
}
return self.current_stage.named_losses
@property
def current_stage_offpolicy_losses(self) -> Dict[str, AbstractOffPolicyLoss]:
if self.current_stage.offpolicy_named_losses is None:
for loss_name in self.current_stage.offpolicy_component.loss_names:
if isinstance(self.named_losses[loss_name], Builder):
self.named_losses[loss_name] = cast(
Builder["AbstractOffPolicyLoss"], self.named_losses[loss_name],
)()
self.current_stage.offpolicy_named_losses = {
loss_name: cast(AbstractOffPolicyLoss, self.named_losses[loss_name])
for loss_name in self.current_stage.offpolicy_component.loss_names
}
return self.current_stage.offpolicy_named_losses
| ask4help-main | allenact/utils/experiment_utils.py |
# Original work Copyright (c) 2016 OpenAI (https://openai.com).
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Union, Tuple, List, cast, Iterable, Callable
from collections import OrderedDict
import numpy as np
import torch
from gym import spaces as gym
ActionType = Union[torch.Tensor, OrderedDict, Tuple, int]
def flatdim(space):
"""Return the number of dimensions a flattened equivalent of this space
would have.
Accepts a space and returns an integer. Raises
``NotImplementedError`` if the space is not defined in
``gym.spaces``.
"""
if isinstance(space, gym.Box):
return int(np.prod(space.shape))
elif isinstance(space, gym.Discrete):
return 1 # we do not expand to one-hot
elif isinstance(space, gym.Tuple):
return int(sum([flatdim(s) for s in space.spaces]))
elif isinstance(space, gym.Dict):
return int(sum([flatdim(s) for s in space.spaces.values()]))
elif isinstance(space, gym.MultiBinary):
return int(space.n)
elif isinstance(space, gym.MultiDiscrete):
return int(np.prod(space.shape))
else:
raise NotImplementedError
def flatten(space, torch_x):
"""Flatten data points from a space."""
if isinstance(space, gym.Box):
if len(space.shape) > 0:
return torch_x.view(torch_x.shape[: -len(space.shape)] + (-1,))
else:
return torch_x.view(torch_x.shape + (-1,))
elif isinstance(space, gym.Discrete):
# Assume tensor input does NOT contain a dimension for action
if isinstance(torch_x, torch.Tensor):
return torch_x.unsqueeze(-1)
else:
return torch.tensor(torch_x).view(1)
elif isinstance(space, gym.Tuple):
return torch.cat(
[flatten(s, x_part) for x_part, s in zip(torch_x, space.spaces)], dim=-1
)
elif isinstance(space, gym.Dict):
return torch.cat(
[flatten(s, torch_x[key]) for key, s in space.spaces.items()], dim=-1
)
elif isinstance(space, gym.MultiBinary):
return torch_x.view(torch_x.shape[: -len(space.shape)] + (-1,))
elif isinstance(space, gym.MultiDiscrete):
return torch_x.view(torch_x.shape[: -len(space.shape)] + (-1,))
else:
raise NotImplementedError
def unflatten(space, torch_x):
"""Unflatten a concatenated data points tensor from a space."""
if isinstance(space, gym.Box):
return torch_x.view(torch_x.shape[:-1] + space.shape).float()
elif isinstance(space, gym.Discrete):
res = torch_x.view(torch_x.shape[:-1] + space.shape).long()
return res if len(res.shape) > 0 else res.item()
elif isinstance(space, gym.Tuple):
dims = [flatdim(s) for s in space.spaces]
list_flattened = torch.split(torch_x, dims, dim=-1)
list_unflattened = [
unflatten(s, flattened)
for flattened, s in zip(list_flattened, space.spaces)
]
return tuple(list_unflattened)
elif isinstance(space, gym.Dict):
dims = [flatdim(s) for s in space.spaces.values()]
list_flattened = torch.split(torch_x, dims, dim=-1)
list_unflattened = [
(key, unflatten(s, flattened))
for flattened, (key, s) in zip(list_flattened, space.spaces.items())
]
return OrderedDict(list_unflattened)
elif isinstance(space, gym.MultiBinary):
return torch_x.view(torch_x.shape[:-1] + space.shape).byte()
elif isinstance(space, gym.MultiDiscrete):
return torch_x.view(torch_x.shape[:-1] + space.shape).long()
else:
raise NotImplementedError
def torch_point(space, np_x):
"""Convert numpy space point into torch."""
if isinstance(space, gym.Box):
return torch.from_numpy(np_x)
elif isinstance(space, gym.Discrete):
return np_x
elif isinstance(space, gym.Tuple):
return tuple([torch_point(s, x_part) for x_part, s in zip(np_x, space.spaces)])
elif isinstance(space, gym.Dict):
return OrderedDict(
[(key, torch_point(s, np_x[key])) for key, s in space.spaces.items()]
)
elif isinstance(space, gym.MultiBinary):
return torch.from_numpy(np_x)
elif isinstance(space, gym.MultiDiscrete):
return torch.from_numpy(np.asarray(np_x))
else:
raise NotImplementedError
def numpy_point(
space: gym.Space, torch_x: Union[int, torch.Tensor, OrderedDict, Tuple]
):
"""Convert torch space point into numpy."""
if isinstance(space, gym.Box):
return cast(torch.Tensor, torch_x).cpu().numpy()
elif isinstance(space, gym.Discrete):
return torch_x
elif isinstance(space, gym.Tuple):
return tuple(
[
numpy_point(s, x_part)
for x_part, s in zip(cast(Iterable, torch_x), space.spaces)
]
)
elif isinstance(space, gym.Dict):
return OrderedDict(
[
(key, numpy_point(s, cast(torch.Tensor, torch_x)[key]))
for key, s in space.spaces.items()
]
)
elif isinstance(space, gym.MultiBinary):
return cast(torch.Tensor, torch_x).cpu().numpy()
elif isinstance(space, gym.MultiDiscrete):
return cast(torch.Tensor, torch_x).cpu().numpy()
else:
raise NotImplementedError
def flatten_space(space: gym.Space):
if isinstance(space, gym.Box):
return gym.Box(space.low.flatten(), space.high.flatten())
if isinstance(space, gym.Discrete):
return gym.Box(low=0, high=space.n, shape=(1,))
if isinstance(space, gym.Tuple):
space = [flatten_space(s) for s in space.spaces]
return gym.Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
)
if isinstance(space, gym.Dict):
space = [flatten_space(s) for s in space.spaces.values()]
return gym.Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
)
if isinstance(space, gym.MultiBinary):
return gym.Box(low=0, high=1, shape=(space.n,))
if isinstance(space, gym.MultiDiscrete):
return gym.Box(low=np.zeros_like(space.nvec), high=space.nvec,)
raise NotImplementedError
def policy_space(
action_space: gym.Space, box_space_to_policy: Callable[[gym.Box], gym.Space] = None,
) -> gym.Space:
if isinstance(action_space, gym.Box):
if box_space_to_policy is None:
# policy = mean (default)
return action_space
else:
return box_space_to_policy(action_space)
if isinstance(action_space, gym.Discrete):
# policy = prob of each option
return gym.Box(
low=np.float32(0.0), high=np.float32(1.0), shape=(action_space.n,)
)
if isinstance(action_space, gym.Tuple):
# policy = tuple of sub-policies
spaces = [policy_space(s, box_space_to_policy) for s in action_space.spaces]
return gym.Tuple(spaces)
if isinstance(action_space, gym.Dict):
# policy = dict of sub-policies
spaces = [
(name, policy_space(s, box_space_to_policy),)
for name, s in action_space.spaces.items()
]
return gym.Dict(spaces)
if isinstance(action_space, gym.MultiBinary):
# policy = prob of 0, 1 in each entry
return gym.Box(
low=np.float32(0.0), high=np.float32(1.0), shape=(action_space.n, 2)
)
if isinstance(action_space, gym.MultiDiscrete):
# policy = Tuple of prob of each option for each discrete
return gym.Tuple(
[
gym.Box(low=np.float32(0.0), high=np.float32(1.0), shape=(n,))
for n in action_space.nvec
]
)
raise NotImplementedError
def action_list(
action_space: gym.Space, flat_actions: torch.Tensor
) -> List[ActionType]:
"""Convert flattened actions to list.
Assumes `flat_actions` are of shape `[step, sampler, flatdim]`.
"""
def tolist(action):
if isinstance(action, torch.Tensor):
return action.tolist()
if isinstance(action, Tuple):
actions = [tolist(ac) for ac in action]
return tuple(actions)
if isinstance(action, OrderedDict):
actions = [(key, tolist(action[key])) for key in action.keys()]
return OrderedDict(actions)
# else, it's a scalar
return action
return [tolist(unflatten(action_space, ac)) for ac in flat_actions[0]]
| ask4help-main | allenact/utils/spaces_utils.py |
import io
import logging
import os
import socket
import sys
from contextlib import closing
from typing import cast, Optional, Tuple
from torch import multiprocessing as mp
from allenact._constants import ALLENACT_INSTALL_DIR
HUMAN_LOG_LEVELS: Tuple[str, ...] = ("debug", "info", "warning", "error", "none")
"""
Available log levels: "debug", "info", "warning", "error", "none"
"""
_LOGGER: Optional[logging.Logger] = None
class ColoredFormatter(logging.Formatter):
"""Format a log string with colors.
This implementation taken (with modifications) from
https://stackoverflow.com/a/384125.
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
"WARNING": YELLOW,
"INFO": GREEN,
"DEBUG": BLUE,
"ERROR": RED,
"CRITICAL": MAGENTA,
}
def __init__(self, fmt: str, datefmt: Optional[str] = None, use_color=True):
super().__init__(fmt=fmt, datefmt=datefmt)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
levelname = record.levelname
if self.use_color and levelname in self.COLORS:
levelname_with_color = (
self.COLOR_SEQ % (30 + self.COLORS[levelname])
+ levelname
+ self.RESET_SEQ
)
record.levelname = levelname_with_color
formated_record = logging.Formatter.format(self, record)
record.levelname = (
levelname # Resetting levelname as `record` might be used elsewhere
)
return formated_record
else:
return logging.Formatter.format(self, record)
def get_logger() -> logging.Logger:
"""Get a `logging.Logger` to stderr. It can be called whenever we wish to
log some message. Messages can get mixed-up
(https://docs.python.org/3.6/library/multiprocessing.html#logging), but it
works well in most cases.
# Returns
logger: the `logging.Logger` object
"""
if _new_logger():
if mp.current_process().name == "MainProcess":
_new_logger(logging.DEBUG)
_set_log_formatter()
return _LOGGER
def _human_log_level_to_int(human_log_level):
human_log_level = human_log_level.lower().strip()
assert human_log_level in HUMAN_LOG_LEVELS, "unknown human_log_level {}".format(
human_log_level
)
if human_log_level == "debug":
log_level = logging.DEBUG
elif human_log_level == "info":
log_level = logging.INFO
elif human_log_level == "warning":
log_level = logging.WARNING
elif human_log_level == "error":
log_level = logging.ERROR
elif human_log_level == "none":
log_level = logging.CRITICAL + 1
else:
raise NotImplementedError(f"Unknown log level {human_log_level}.")
return log_level
def init_logging(human_log_level: str = "info") -> None:
"""Init the `logging.Logger`.
It should be called only once in the app (e.g. in `main`). It sets
the log_level to one of `HUMAN_LOG_LEVELS`. And sets up a handler
for stderr. The logging level is propagated to all subprocesses.
"""
_new_logger(_human_log_level_to_int(human_log_level))
_set_log_formatter()
def update_log_level(logger, human_log_level: str):
logger.setLevel(_human_log_level_to_int(human_log_level))
def find_free_port(address: str = "127.0.0.1") -> int:
"""Finds a free port for distributed training.
# Returns
port: port number that can be used to listen
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind((address, 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = s.getsockname()[1]
return port
def _new_logger(log_level: Optional[int] = None):
global _LOGGER
if _LOGGER is None:
_LOGGER = mp.get_logger()
if log_level is not None:
get_logger().setLevel(log_level)
return True
if log_level is not None:
get_logger().setLevel(log_level)
return False
def _set_log_formatter():
assert _LOGGER is not None
if _LOGGER.getEffectiveLevel() <= logging.CRITICAL:
add_style_to_logs = True # In case someone wants to turn this off manually.
if add_style_to_logs:
default_format = "$BOLD[%(asctime)s$RESET %(levelname)s$BOLD:]$RESET %(message)s\t[%(filename)s: %(lineno)d]"
default_format = default_format.replace(
"$BOLD", ColoredFormatter.BOLD_SEQ
).replace("$RESET", ColoredFormatter.RESET_SEQ)
else:
default_format = (
"%(asctime)s %(levelname)s: %(message)s\t[%(filename)s: %(lineno)d]"
)
short_date_format = "%m/%d %H:%M:%S"
log_format = "default"
if log_format == "default":
fmt = default_format
datefmt = short_date_format
elif log_format == "defaultMilliseconds":
fmt = default_format
datefmt = None
else:
fmt = log_format
datefmt = short_date_format
if add_style_to_logs:
formatter = ColoredFormatter(fmt=fmt, datefmt=datefmt,)
else:
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.addFilter(cast(logging.Filter, _AllenActMessageFilter(os.getcwd())))
_LOGGER.addHandler(ch)
sys.excepthook = _excepthook
sys.stdout = cast(io.TextIOWrapper, _StreamToLogger())
return _LOGGER
class _StreamToLogger:
def __init__(self):
self.linebuf = ""
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
if line[-1] == "\n":
cast(logging.Logger, _LOGGER).info(line.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
cast(logging.Logger, _LOGGER).info(self.linebuf.rstrip())
self.linebuf = ""
def _excepthook(*args):
# noinspection PyTypeChecker
get_logger().error(msg="Uncaught exception:", exc_info=args)
class _AllenActMessageFilter:
def __init__(self, working_directory: str):
self.working_directory = working_directory
# noinspection PyMethodMayBeStatic
def filter(self, record):
# TODO: Does this work when pip-installing AllenAct?
return int(
self.working_directory in record.pathname
or ALLENACT_INSTALL_DIR in record.pathname
or "main" in record.pathname
)
class ImportChecker:
def __init__(self, msg=None):
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if exc_type == ModuleNotFoundError and self.msg is not None:
value.msg += self.msg
return exc_type is None
| ask4help-main | allenact/utils/system.py |
from typing import List, Any
import torch
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.faster_rcnn import FasterRCNN
# noinspection PyProtectedMember
from torchvision.models.detection.faster_rcnn import model_urls
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.utils import load_state_dict_from_url
class CachelessAnchorGenerator(AnchorGenerator):
def forward(self, image_list: Any, feature_maps: Any):
grid_sizes = list([feature_map.shape[-2:] for feature_map in feature_maps])
image_size = image_list.tensors.shape[-2:]
strides = [
[int(image_size[0] / g[0]), int(image_size[1] / g[1])] for g in grid_sizes
]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
self.set_cell_anchors(dtype, device)
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
anchors = torch.jit.annotate(List[List[torch.Tensor]], []) # type:ignore
for i, (image_height, image_width) in enumerate(image_list.image_sizes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
anchors_in_image.append(anchors_per_feature_map)
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
return anchors
def fasterrcnn_resnet50_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, **kwargs
):
if pretrained:
# no need to download the backbone if pretrained is set
pretrained_backbone = False
backbone = resnet_fpn_backbone("resnet50", pretrained_backbone)
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = CachelessAnchorGenerator(anchor_sizes, aspect_ratios)
model = FasterRCNN(
backbone, num_classes, rpn_anchor_generator=rpn_anchor_generator, **kwargs
)
# min_size = 300
# max_size = 400
# anchor_sizes = ((12,), (24,), (48,), (96,), (192,))
# aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
# rpn_anchor_generator = CachelessAnchorGenerator(
# anchor_sizes, aspect_ratios
# )
# model = FasterRCNN(backbone, num_classes, rpn_anchor_generator=rpn_anchor_generator, min_size=min_size, max_size=max_size, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
model_urls["fasterrcnn_resnet50_fpn_coco"], progress=progress
)
model.load_state_dict(state_dict)
return model
| ask4help-main | allenact/utils/cacheless_frcnn.py |
ask4help-main | allenact/utils/__init__.py |
|
import copy
import functools
import hashlib
import inspect
import json
import math
import os
import random
import subprocess
import urllib
import urllib.request
from collections import Counter
from contextlib import contextmanager
from typing import Sequence, List, Optional, Tuple, Hashable
import filelock
import numpy as np
import torch
from scipy.special import comb
from allenact.utils.system import get_logger
TABLEAU10_RGB = (
(31, 119, 180),
(255, 127, 14),
(44, 160, 44),
(214, 39, 40),
(148, 103, 189),
(140, 86, 75),
(227, 119, 194),
(127, 127, 127),
(188, 189, 34),
(23, 190, 207),
)
def multiprocessing_safe_download_file_from_url(url: str, save_path: str):
with filelock.FileLock(save_path + ".lock"):
if not os.path.isfile(save_path):
get_logger().info(f"Downloading file from {url} to {save_path}.")
urllib.request.urlretrieve(
url, save_path,
)
else:
get_logger().debug(f"{save_path} exists - skipping download.")
def experimental_api(to_decorate):
"""Decorate a function to note that it is part of the experimental API."""
have_warned = [False]
name = f"{inspect.getmodule(to_decorate).__name__}.{to_decorate.__qualname__}"
if to_decorate.__name__ == "__init__":
name = name.replace(".__init__", "")
@functools.wraps(to_decorate)
def decorated(*args, **kwargs):
if not have_warned[0]:
get_logger().warning(
f"'{name}' is a part of AllenAct's experimental API."
f" This means: (1) there are likely bugs present and (2)"
f" we may remove/change this functionality without warning."
f" USE AT YOUR OWN RISK.",
)
have_warned[0] = True
return to_decorate(*args, **kwargs)
return decorated
def deprecated(to_decorate):
"""Decorate a function to note that it has been deprecated."""
have_warned = [False]
name = f"{inspect.getmodule(to_decorate).__name__}.{to_decorate.__qualname__}"
if to_decorate.__name__ == "__init__":
name = name.replace(".__init__", "")
@functools.wraps(to_decorate)
def decorated(*args, **kwargs):
if not have_warned[0]:
get_logger().warning(
f"'{name}' has been deprecated and will soon be removed from AllenAct's API."
f" Please discontinue your use of this function.",
)
have_warned[0] = True
return to_decorate(*args, **kwargs)
return decorated
class NumpyJSONEncoder(json.JSONEncoder):
"""JSON encoder for numpy objects.
Based off the stackoverflow answer by Jie Yang here: https://stackoverflow.com/a/57915246.
The license for this code is [BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/).
"""
def default(self, obj):
if isinstance(obj, np.void):
return None
elif isinstance(obj, np.bool):
return bool(obj)
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyJSONEncoder, self).default(obj)
@contextmanager
def tensor_print_options(**print_opts):
torch_print_opts = copy.deepcopy(torch._tensor_str.PRINT_OPTS)
np_print_opts = np.get_printoptions()
try:
torch.set_printoptions(**print_opts)
np.set_printoptions(**print_opts)
yield None
finally:
torch.set_printoptions(**{k: getattr(torch_print_opts, k) for k in print_opts})
np.set_printoptions(**np_print_opts)
def md5_hash_str_as_int(to_hash: str):
return int(hashlib.md5(to_hash.encode()).hexdigest(), 16,)
def get_git_diff_of_project() -> Tuple[str, str]:
short_sha = (
subprocess.check_output(["git", "describe", "--always"]).decode("utf-8").strip()
)
diff = subprocess.check_output(["git", "diff", short_sha]).decode("utf-8")
return short_sha, diff
class HashableDict(dict):
"""A dictionary which is hashable so long as all of its values are
hashable.
A HashableDict object will allow setting / deleting of items until
the first time that `__hash__()` is called on it after which
attempts to set or delete items will throw `RuntimeError`
exceptions.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hash_has_been_called = False
def __key(self):
return tuple((k, self[k]) for k in sorted(self))
def __hash__(self):
self._hash_has_been_called = True
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
def __setitem__(self, *args, **kwargs):
if not self._hash_has_been_called:
return super(HashableDict, self).__setitem__(*args, **kwargs)
raise RuntimeError("Cannot set item in HashableDict after having called hash.")
def __delitem__(self, *args, **kwargs):
if not self._hash_has_been_called:
return super(HashableDict, self).__delitem__(*args, **kwargs)
raise RuntimeError(
"Cannot delete item in HashableDict after having called hash."
)
def partition_sequence(seq: Sequence, parts: int) -> List:
assert 0 < parts, f"parts [{parts}] must be greater > 0"
assert parts <= len(seq), f"parts [{parts}] > len(seq) [{len(seq)}]"
n = len(seq)
quotient = n // parts
remainder = n % parts
counts = [quotient + (i < remainder) for i in range(parts)]
inds = np.cumsum([0] + counts)
return [seq[ind0:ind1] for ind0, ind1 in zip(inds[:-1], inds[1:])]
def uninterleave(seq: Sequence, parts: int) -> List:
assert 0 < parts <= len(seq)
n = len(seq)
quotient = n // parts
return [
[seq[i + j * parts] for j in range(quotient + 1) if i + j * parts < len(seq)]
for i in range(parts)
]
@functools.lru_cache(10000)
def cached_comb(n: int, m: int):
return comb(n, m)
def expected_max_of_subset_statistic(vals: List[float], m: int):
n = len(vals)
assert m <= n
vals_and_counts = list(Counter([round(val, 8) for val in vals]).items())
vals_and_counts.sort()
count_so_far = 0
logdenom = math.log(comb(n, m))
expected_max = 0.0
for val, num_occurances_of_val in vals_and_counts:
count_so_far += num_occurances_of_val
if count_so_far < m:
continue
count_where_max = 0
for i in range(1, min(num_occurances_of_val, m) + 1):
count_where_max += cached_comb(num_occurances_of_val, i) * cached_comb(
count_so_far - num_occurances_of_val, m - i
)
expected_max += val * math.exp(math.log(count_where_max) - logdenom)
return expected_max
def bootstrap_max_of_subset_statistic(
vals: List[float], m: int, reps=1000, seed: Optional[int] = None
):
rstate = None
if seed is not None:
rstate = random.getstate()
random.seed(seed)
results = []
for _ in range(reps):
results.append(
expected_max_of_subset_statistic(random.choices(vals, k=len(vals)), m)
)
if seed is not None:
random.setstate(rstate)
return results
def rand_float(low: float, high: float, shape):
assert low <= high
try:
return np.random.rand(*shape) * (high - low) + low
except TypeError as _:
return np.random.rand(shape) * (high - low) + low
def all_unique(seq: Sequence[Hashable]):
seen = set()
for s in seq:
if s in seen:
return False
seen.add(s)
return True
def all_equal(s: Sequence):
if len(s) <= 1:
return True
return all(s[0] == ss for ss in s[1:])
def prepare_locals_for_super(
local_vars, args_name="args", kwargs_name="kwargs", ignore_kwargs=False
):
assert (
args_name not in local_vars
), "`prepare_locals_for_super` does not support {}.".format(args_name)
new_locals = {k: v for k, v in local_vars.items() if k != "self" and "__" not in k}
if kwargs_name in new_locals:
if ignore_kwargs:
new_locals.pop(kwargs_name)
else:
kwargs = new_locals.pop(kwargs_name)
kwargs.update(new_locals)
new_locals = kwargs
return new_locals
def partition_limits(num_items: int, num_parts: int):
return (
np.round(np.linspace(0, num_items, num_parts + 1, endpoint=True))
.astype(np.int32)
.tolist()
)
| ask4help-main | allenact/utils/misc_utils.py |
from typing import Sequence, Any
import numpy as np
from matplotlib import pyplot as plt, markers
from matplotlib.collections import LineCollection
from allenact.utils.viz_utils import TrajectoryViz
class MultiTrajectoryViz(TrajectoryViz):
def __init__(
self,
path_to_trajectory_prefix: Sequence[str] = ("task_info", "followed_path"),
agent_suffixes: Sequence[str] = ("1", "2"),
label: str = "trajectories",
trajectory_plt_colormaps: Sequence[str] = ("cool", "spring"),
marker_plt_colors: Sequence[Any] = ("blue", "orange"),
axes_equal: bool = True,
**other_base_kwargs,
):
super().__init__(label=label, **other_base_kwargs)
self.path_to_trajectory_prefix = list(path_to_trajectory_prefix)
self.agent_suffixes = list(agent_suffixes)
self.trajectory_plt_colormaps = list(trajectory_plt_colormaps)
self.marker_plt_colors = marker_plt_colors
self.axes_equal = axes_equal
def make_fig(self, episode, episode_id):
# From https://nbviewer.jupyter.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
def colorline(
x,
y,
z=None,
cmap=plt.get_cmap("cool"),
norm=plt.Normalize(0.0, 1.0),
linewidth=2,
alpha=1.0,
zorder=1,
):
"""Plot a colored line with coordinates x and y.
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width.
"""
def make_segments(x, y):
"""Create list of line segments from x and y coordinates, in
the correct format for LineCollection:
an array of the form numlines x (points per line) x 2
(x and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(
z, "__iter__"
): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = LineCollection(
segments,
array=z,
cmap=cmap,
norm=norm,
linewidth=linewidth,
alpha=alpha,
zorder=zorder,
)
ax = plt.gca()
ax.add_collection(lc)
return lc
fig, ax = plt.subplots(figsize=self.figsize)
for agent, cmap, marker_color in zip(
self.agent_suffixes, self.trajectory_plt_colormaps, self.marker_plt_colors
):
path = self.path_to_trajectory_prefix[:]
path[-1] = path[-1] + agent
trajectory = self._access(episode, path)
x, y = [], []
for xy in trajectory:
x.append(float(self._access(xy, self.x)))
y.append(float(self._access(xy, self.y)))
colorline(x, y, zorder=1, cmap=cmap)
start_marker = markers.MarkerStyle(marker=self.start_marker_shape)
if self.path_to_rot_degrees is not None:
rot_degrees = float(
self._access(trajectory[0], self.path_to_rot_degrees)
)
if self.adapt_rotation is not None:
rot_degrees = self.adapt_rotation(rot_degrees)
start_marker._transform = start_marker.get_transform().rotate_deg(
rot_degrees
)
ax.scatter(
[x[0]],
[y[0]],
marker=start_marker,
zorder=2,
s=self.start_marker_scale,
color=marker_color,
)
ax.scatter(
[x[-1]], [y[-1]], marker="s", color=marker_color
) # stop (square)
if self.axes_equal:
ax.set_aspect("equal", "box")
ax.set_title(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
return fig
| ask4help-main | allenact/utils/multi_agent_viz_utils.py |
import os
from collections import defaultdict
import abc
import json
from typing import (
Dict,
Any,
Union,
Optional,
List,
Tuple,
Sequence,
Callable,
cast,
Set,
)
import sys
import numpy as np
from allenact.utils.experiment_utils import Builder
from allenact.utils.tensor_utils import SummaryWriter, tile_images, process_video
try:
# Tensorflow not installed for testing
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
_TF_AVAILABLE = True
except ImportError as _:
_TF_AVAILABLE = False
try:
# When debugging we don't want to use the interactive version of matplotlib
# as it causes all sorts of problems.
import pydevd
import matplotlib
matplotlib.use("agg")
except ImportError as _:
pass
from matplotlib import pyplot as plt, markers
from matplotlib.collections import LineCollection
from matplotlib.figure import Figure
import cv2
from allenact.utils.system import get_logger
class AbstractViz:
def __init__(
self,
label: Optional[str] = None,
vector_task_sources: Sequence[Tuple[str, Dict[str, Any]]] = (),
rollout_sources: Sequence[Union[str, Sequence[str]]] = (),
actor_critic_source: bool = False,
**kwargs, # accepts `max_episodes_in_group`
):
self.label = label
self.vector_task_sources = list(vector_task_sources)
self.rollout_sources = [
[entry] if isinstance(entry, str) else list(entry)
for entry in rollout_sources
]
self.actor_critic_source = actor_critic_source
self.mode: Optional[str] = None
self.path_to_id: Optional[Sequence[str]] = None
self.episode_ids: Optional[List[Sequence[str]]] = None
if "max_episodes_in_group" in kwargs:
self.max_episodes_in_group = kwargs["max_episodes_in_group"]
self.assigned_max_eps_in_group = True
else:
self.max_episodes_in_group = 8
self.assigned_max_eps_in_group = False
@staticmethod
def _source_to_str(source, is_vector_task):
source_type = "vector_task" if is_vector_task else "rollout_or_actor_critic"
return "{}__{}".format(
source_type,
"__{}_sep__".format(source_type).join(["{}".format(s) for s in source]),
)
@staticmethod
def _access(dictionary, path):
path = path[::-1]
while len(path) > 0:
dictionary = dictionary[path.pop()]
return dictionary
def _auto_viz_order(self, task_outputs):
if task_outputs is None:
return None, None
all_episodes = {
self._access(episode, self.path_to_id): episode for episode in task_outputs
}
if self.episode_ids is None:
all_episode_keys = list(all_episodes.keys())
viz_order = []
for page_start in range(
0, len(all_episode_keys), self.max_episodes_in_group
):
viz_order.append(
all_episode_keys[
page_start : page_start + self.max_episodes_in_group
]
)
get_logger().debug("visualizing with order {}".format(viz_order))
else:
viz_order = self.episode_ids
return viz_order, all_episodes
def _setup(
self,
mode: str,
path_to_id: Sequence[str],
episode_ids: Optional[Sequence[Union[Sequence[str], str]]],
max_episodes_in_group: int,
force: bool = False,
):
self.mode = mode
self.path_to_id = list(path_to_id)
if (self.episode_ids is None or force) and episode_ids is not None:
self.episode_ids = (
list(episode_ids)
if not isinstance(episode_ids[0], str)
else [list(cast(List[str], episode_ids))]
)
if not self.assigned_max_eps_in_group or force:
self.max_episodes_in_group = max_episodes_in_group
@abc.abstractmethod
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
raise NotImplementedError()
class TrajectoryViz(AbstractViz):
def __init__(
self,
path_to_trajectory: Sequence[str] = ("task_info", "followed_path"),
path_to_target_location: Optional[Sequence[str]] = (
"task_info",
"target_position",
),
path_to_x: Sequence[str] = ("x",),
path_to_y: Sequence[str] = ("z",),
path_to_rot_degrees: Optional[Sequence[str]] = ("rotation", "y"),
adapt_rotation: Optional[Callable[[float], float]] = None,
label: str = "trajectory",
figsize: Tuple[float, float] = (2, 2),
fontsize: float = 5,
start_marker_shape: str = r"$\spadesuit$",
start_marker_scale: int = 100,
**other_base_kwargs,
):
super().__init__(label, **other_base_kwargs)
self.path_to_trajectory = list(path_to_trajectory)
self.path_to_target_location = (
list(path_to_target_location)
if path_to_target_location is not None
else None
)
self.adapt_rotation = adapt_rotation
self.x = list(path_to_x)
self.y = list(path_to_y)
self.path_to_rot_degrees = (
list(path_to_rot_degrees) if path_to_rot_degrees is not None else None
)
self.figsize = figsize
self.fontsize = fontsize
self.start_marker_shape = start_marker_shape
self.start_marker_scale = start_marker_scale
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
viz_order, all_episodes = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("trajectory viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
figs = []
for episode_id in current_ids:
# assert episode_id in all_episodes
if episode_id not in all_episodes:
get_logger().warning(
"skipping viz for missing episode {}".format(episode_id)
)
continue
figs.append(self.make_fig(all_episodes[episode_id], episode_id))
if len(figs) == 0:
continue
log_writer.add_figure(
"{}/{}_group{}".format(self.mode, self.label, page),
figs,
global_step=num_steps,
)
plt.close(
"all"
) # close all current figures (SummaryWriter already closes all figures we log)
def make_fig(self, episode, episode_id):
# From https://nbviewer.jupyter.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
def colorline(
x,
y,
z=None,
cmap=plt.get_cmap("cool"),
norm=plt.Normalize(0.0, 1.0),
linewidth=2,
alpha=1.0,
zorder=1,
):
"""Plot a colored line with coordinates x and y.
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width.
"""
def make_segments(x, y):
"""Create list of line segments from x and y coordinates, in
the correct format for LineCollection:
an array of the form numlines x (points per line) x 2
(x and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(
z, "__iter__"
): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = LineCollection(
segments,
array=z,
cmap=cmap,
norm=norm,
linewidth=linewidth,
alpha=alpha,
zorder=zorder,
)
ax = plt.gca()
ax.add_collection(lc)
return lc
trajectory = self._access(episode, self.path_to_trajectory)
x, y = [], []
for xy in trajectory:
x.append(float(self._access(xy, self.x)))
y.append(float(self._access(xy, self.y)))
fig, ax = plt.subplots(figsize=self.figsize)
colorline(x, y, zorder=1)
start_marker = markers.MarkerStyle(marker=self.start_marker_shape)
if self.path_to_rot_degrees is not None:
rot_degrees = float(self._access(trajectory[0], self.path_to_rot_degrees))
if self.adapt_rotation is not None:
rot_degrees = self.adapt_rotation(rot_degrees)
start_marker._transform = start_marker.get_transform().rotate_deg(
rot_degrees
)
ax.scatter(
[x[0]], [y[0]], marker=start_marker, zorder=2, s=self.start_marker_scale
)
ax.scatter([x[-1]], [y[-1]], marker="s") # stop
if self.path_to_target_location is not None:
target = self._access(episode, self.path_to_target_location)
ax.scatter(
[float(self._access(target, self.x))],
[float(self._access(target, self.y))],
marker="*",
)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
return fig
class AgentViewViz(AbstractViz):
def __init__(
self,
label: str = "agent_view",
max_clip_length: int = 100, # control memory used when converting groups of images into clips
max_video_length: int = -1, # no limit, if > 0, limit the maximum video length (discard last frames)
vector_task_source: Tuple[str, Dict[str, Any]] = (
"render",
{"mode": "raw_rgb_list"},
),
episode_ids: Optional[Sequence[Union[Sequence[str], str]]] = None,
fps: int = 4,
max_render_size: int = 400,
**other_base_kwargs,
):
super().__init__(
label, vector_task_sources=[vector_task_source], **other_base_kwargs,
)
self.max_clip_length = max_clip_length
self.max_video_length = max_video_length
self.fps = fps
self.max_render_size = max_render_size
self.episode_ids = (
(
list(episode_ids)
if not isinstance(episode_ids[0], str)
else [list(cast(List[str], episode_ids))]
)
if episode_ids is not None
else None
)
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
if render is None:
return
datum_id = self._source_to_str(self.vector_task_sources[0], is_vector_task=True)
viz_order, _ = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("agent view viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
images = [] # list of lists of rgb frames
for episode_id in current_ids:
# assert episode_id in render
if episode_id not in render:
get_logger().warning(
"skipping viz for missing episode {}".format(episode_id)
)
continue
images.append(
[
self._overlay_label(step[datum_id], episode_id)
for step in render[episode_id]
]
)
if len(images) == 0:
continue
vid = self.make_vid(images)
if vid is not None:
log_writer.add_vid(
"{}/{}_group{}".format(self.mode, self.label, page),
vid,
global_step=num_steps,
)
@staticmethod
def _overlay_label(
img,
text,
pos=(0, 0),
bg_color=(255, 255, 255),
fg_color=(0, 0, 0),
scale=0.4,
thickness=1,
margin=2,
font_face=cv2.FONT_HERSHEY_SIMPLEX,
):
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1]
pos = (pos[0], pos[1] + txt_size[0][1] + margin)
cv2.rectangle(img, pos, (end_x, end_y), bg_color, cv2.FILLED)
cv2.putText(
img=img,
text=text,
org=pos,
fontFace=font_face,
fontScale=scale,
color=fg_color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
return img
def make_vid(self, images):
max_length = max([len(ep) for ep in images])
if max_length == 0:
return None
valid_im = None
for ep in images:
if len(ep) > 0:
valid_im = ep[0]
break
frames = []
for it in range(max_length):
current_images = []
for ep in images:
if it < len(ep):
current_images.append(ep[it])
else:
if it == 0:
current_images.append(np.zeros_like(valid_im))
else:
gray = ep[-1].copy()
gray[:, :, 0] = gray[:, :, 2] = gray[:, :, 1]
current_images.append(gray)
frames.append(tile_images(current_images))
return process_video(
frames, self.max_clip_length, self.max_video_length, fps=self.fps
)
class AbstractTensorViz(AbstractViz):
def __init__(
self,
rollout_source: Union[str, Sequence[str]],
label: Optional[str] = None,
figsize: Tuple[float, float] = (3, 3),
**other_base_kwargs,
):
if label is None:
if isinstance(rollout_source, str):
label = rollout_source[:]
else:
label = "/".join(rollout_source)
super().__init__(label, rollout_sources=[rollout_source], **other_base_kwargs)
self.figsize = figsize
self.datum_id = self._source_to_str(
self.rollout_sources[0], is_vector_task=False
)
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
if render is None:
return
viz_order, _ = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("tensor viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
figs = []
for episode_id in current_ids:
if episode_id not in render or len(render[episode_id]) == 0:
get_logger().warning(
"skipping viz for missing or 0-length episode {}".format(
episode_id
)
)
continue
episode_src = [
step[self.datum_id]
for step in render[episode_id]
if self.datum_id in step
]
if len(episode_src) > 0:
# If the last episode for an inference worker is of length 1, there's no captured rollout sources
figs.append(self.make_fig(episode_src, episode_id))
if len(figs) == 0:
continue
log_writer.add_figure(
"{}/{}_group{}".format(self.mode, self.label, page),
figs,
global_step=num_steps,
)
plt.close(
"all"
) # close all current figures (SummaryWriter already closes all figures we log)
@abc.abstractmethod
def make_fig(self, episode_src: Sequence[np.ndarray], episode_id: str) -> Figure:
raise NotImplementedError()
class TensorViz1D(AbstractTensorViz):
def __init__(
self,
rollout_source: Union[str, Sequence[str]] = "action_log_probs",
label: Optional[str] = None,
figsize: Tuple[float, float] = (3, 3),
**other_base_kwargs,
):
super().__init__(rollout_source, label, figsize, **other_base_kwargs)
def make_fig(self, episode_src, episode_id):
assert episode_src[0].size == 1
# Concatenate along step axis (0)
seq = np.concatenate(episode_src, axis=0).squeeze() # remove all singleton dims
fig, ax = plt.subplots(figsize=self.figsize)
ax.plot(seq)
ax.set_title(episode_id)
ax.set_aspect("auto")
plt.tight_layout()
return fig
class TensorViz2D(AbstractTensorViz):
def __init__(
self,
rollout_source: Union[str, Sequence[str]] = ("memory", "rnn"),
label: Optional[str] = None,
figsize: Tuple[float, float] = (10, 10),
fontsize: float = 5,
**other_base_kwargs,
):
super().__init__(rollout_source, label, figsize, **other_base_kwargs)
self.fontsize = fontsize
def make_fig(self, episode_src, episode_id):
# Concatenate along step axis (0)
seq = np.concatenate(
episode_src, axis=0
).squeeze() # remove num_layers if it's equal to 1, else die
assert len(seq.shape) == 2, "No support for higher-dimensions"
# get_logger().debug("basic {} h render {}".format(episode_id, seq[:10, 0]))
fig, ax = plt.subplots(figsize=self.figsize)
ax.matshow(seq)
ax.set_xlabel(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
ax.tick_params(bottom=False)
ax.set_aspect("auto")
plt.tight_layout()
return fig
class ActorViz(AbstractViz):
def __init__(
self,
label: str = "action_probs",
action_names_path: Optional[Sequence[str]] = ("task_info", "action_names"),
figsize: Tuple[float, float] = (1, 5),
fontsize: float = 5,
**other_base_kwargs,
):
super().__init__(label, actor_critic_source=True, **other_base_kwargs)
self.action_names_path: Optional[Sequence[str]] = (
list(action_names_path) if action_names_path is not None else None
)
self.figsize = figsize
self.fontsize = fontsize
self.action_names: Optional[List[str]] = None
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
if render is None:
return
if (
self.action_names is None
and task_outputs is not None
and len(task_outputs) > 0
and self.action_names_path is not None
):
self.action_names = list(
self._access(task_outputs[0], self.action_names_path)
)
viz_order, _ = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("actor viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
figs = []
for episode_id in current_ids:
# assert episode_id in render
if episode_id not in render:
get_logger().warning(
"skipping viz for missing episode {}".format(episode_id)
)
continue
episode_src = [
step["actor_probs"]
for step in render[episode_id]
if "actor_probs" in step
]
assert len(episode_src) == len(render[episode_id])
figs.append(self.make_fig(episode_src, episode_id))
if len(figs) == 0:
continue
log_writer.add_figure(
"{}/{}_group{}".format(self.mode, self.label, page),
figs,
global_step=num_steps,
)
plt.close(
"all"
) # close all current figures (SummaryWriter already closes all figures we log)
def make_fig(self, episode_src, episode_id):
# Concatenate along step axis (0, reused from kept sampler axis)
mat = np.concatenate(episode_src, axis=0)
fig, ax = plt.subplots(figsize=self.figsize)
ax.matshow(mat)
if self.action_names is not None:
assert len(self.action_names) == mat.shape[-1]
ax.set_xticklabels([""] + self.action_names, rotation="vertical")
ax.set_xlabel(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
ax.tick_params(bottom=False)
# Gridlines based on minor ticks
ax.set_yticks(np.arange(-0.5, mat.shape[0], 1), minor=True)
ax.set_xticks(np.arange(-0.5, mat.shape[1], 1), minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=0.05)
ax.tick_params(
axis="both", which="minor", left=False, top=False, right=False, bottom=False
)
ax.set_aspect("auto")
plt.tight_layout()
return fig
class VizSuite(AbstractViz):
def __init__(
self,
episode_ids: Optional[Sequence[Union[Sequence[str], str]]] = None,
path_to_id: Sequence[str] = ("task_info", "id"),
mode: str = "valid",
force_episodes_and_max_episodes_in_group: bool = False,
max_episodes_in_group: int = 8,
*viz,
**kw_viz,
):
super().__init__(max_episodes_in_group=max_episodes_in_group)
self._setup(
mode=mode,
path_to_id=path_to_id,
episode_ids=episode_ids,
max_episodes_in_group=max_episodes_in_group,
)
self.force_episodes_and_max_episodes_in_group = (
force_episodes_and_max_episodes_in_group
)
self.all_episode_ids = self._episodes_set()
self.viz = [
v() if isinstance(v, Builder) else v
for v in viz
if isinstance(v, Builder) or isinstance(v, AbstractViz)
] + [
v() if isinstance(v, Builder) else v
for k, v in kw_viz.items()
if isinstance(v, Builder) or isinstance(v, AbstractViz)
]
self.max_render_size: Optional[int] = None
(
self.rollout_sources,
self.vector_task_sources,
self.actor_critic_source,
) = self._setup_sources()
self.data: Dict[
str, List[Dict]
] = {} # dict of episode id to list of dicts with collected data
self.last_it2epid: List[str] = []
def _setup_sources(self):
rollout_sources, vector_task_sources = [], []
labels = []
actor_critic_source = False
new_episodes = []
for v in self.viz:
labels.append(v.label)
rollout_sources += v.rollout_sources
vector_task_sources += v.vector_task_sources
actor_critic_source |= v.actor_critic_source
if (
v.episode_ids is not None
and not self.force_episodes_and_max_episodes_in_group
):
cur_episodes = self._episodes_set(v.episode_ids)
for ep in cur_episodes:
if (
self.all_episode_ids is not None
and ep not in self.all_episode_ids
):
new_episodes.append(ep)
get_logger().info(
"Added new episode {} from {}".format(ep, v.label)
)
v._setup(
mode=self.mode,
path_to_id=self.path_to_id,
episode_ids=self.episode_ids,
max_episodes_in_group=self.max_episodes_in_group,
force=self.force_episodes_and_max_episodes_in_group,
)
if isinstance(v, AgentViewViz):
self.max_render_size = v.max_render_size
get_logger().info("Logging labels {}".format(labels))
if len(new_episodes) > 0:
get_logger().info("Added new episodes {}".format(new_episodes))
self.episode_ids.append(new_episodes) # new group with all added episodes
self.all_episode_ids = self._episodes_set()
rol_flat = {json.dumps(src, sort_keys=True): src for src in rollout_sources}
vt_flat = {json.dumps(src, sort_keys=True): src for src in vector_task_sources}
rol_keys = list(set(rol_flat.keys()))
vt_keys = list(set(vt_flat.keys()))
return (
[rol_flat[k] for k in rol_keys],
[vt_flat[k] for k in vt_keys],
actor_critic_source,
)
def _episodes_set(self, episode_list=None) -> Optional[Set[str]]:
source = self.episode_ids if episode_list is None else episode_list
if source is None:
return None
all_episode_ids: List[str] = []
for group in source:
all_episode_ids += group
return set(all_episode_ids)
def empty(self):
return len(self.data) == 0
def _update(self, collected_data):
for epid in collected_data:
assert epid in self.data
self.data[epid][-1].update(collected_data[epid])
def _append(self, vector_task_data):
for epid in vector_task_data:
if epid in self.data:
self.data[epid].append(vector_task_data[epid])
else:
self.data[epid] = [vector_task_data[epid]]
def _collect_actor_critic(self, actor_critic):
actor_critic_data = {
epid: dict()
for epid in self.last_it2epid
if self.all_episode_ids is None or epid in self.all_episode_ids
}
if len(actor_critic_data) > 0 and actor_critic is not None:
if self.actor_critic_source:
# TODO this code only supports Discrete action spaces!
probs = (
actor_critic.distributions.probs
) # step (=1) x sampler x agent (=1) x action
values = actor_critic.values # step x sampler x agent x 1
for it, epid in enumerate(self.last_it2epid):
if epid in actor_critic_data:
# Select current episode (sampler axis will be reused as step axis)
prob = (
# probs.narrow(dim=0, start=it, length=1) # works for sampler x action
probs.narrow(
dim=1, start=it, length=1
) # step x sampler x agent x action -> step x 1 x agent x action
.squeeze(
0
) # step x 1 x agent x action -> 1 x agent x action
# .squeeze(-2) # 1 x agent x action -> 1 x action
.to("cpu")
.detach()
.numpy()
)
assert "actor_probs" not in actor_critic_data[epid]
actor_critic_data[epid]["actor_probs"] = prob
val = (
# values.narrow(dim=0, start=it, length=1) # works for sampler x 1
values.narrow(
dim=1, start=it, length=1
) # step x sampler x agent x 1 -> step x 1 x agent x 1
.squeeze(0) # step x 1 x agent x 1 -> 1 x agent x 1
# .squeeze(-2) # 1 x agent x 1 -> 1 x 1
.to("cpu")
.detach()
.numpy()
)
assert "critic_value" not in actor_critic_data[epid]
actor_critic_data[epid]["critic_value"] = val
self._update(actor_critic_data)
def _collect_rollout(self, rollout, alive):
alive_set = set(alive)
assert len(alive_set) == len(alive)
alive_it2epid = [
epid for it, epid in enumerate(self.last_it2epid) if it in alive_set
]
rollout_data = {
epid: dict()
for epid in alive_it2epid
if self.all_episode_ids is None or epid in self.all_episode_ids
}
if len(rollout_data) > 0 and rollout is not None:
for source in self.rollout_sources:
datum_id = self._source_to_str(source, is_vector_task=False)
storage, path = source[0], source[1:]
# Access storage
res = getattr(rollout, storage)
episode_dim = rollout.dim_names.index("sampler")
# Access sub-storage if path not empty
if len(path) > 0:
flattened_name = rollout.unflattened_to_flattened[storage][
tuple(path)
]
# for path_step in path:
# res = res[path_step]
res = res[flattened_name]
res, episode_dim = res
if rollout.step > 0:
if rollout.step > res.shape[0]:
# e.g. rnn with only latest memory saved
rollout_step = res.shape[0] - 1
else:
rollout_step = rollout.step - 1
else:
if rollout.num_steps - 1 < res.shape[0]:
rollout_step = rollout.num_steps - 1
else:
# e.g. rnn with only latest memory saved
rollout_step = res.shape[0] - 1
# Select latest step
res = res.narrow(
dim=0, start=rollout_step, length=1, # step dimension
) # 1 x ... x sampler x ...
# get_logger().debug("basic collect h {}".format(res[..., 0]))
for it, epid in enumerate(alive_it2epid):
if epid in rollout_data:
# Select current episode and remove episode/sampler axis
datum = (
res.narrow(dim=episode_dim, start=it, length=1)
.squeeze(axis=episode_dim)
.to("cpu")
.detach()
.numpy()
) # 1 x ... (no sampler dim)
# get_logger().debug("basic collect ep {} h {}".format(epid, res[..., 0]))
assert datum_id not in rollout_data[epid]
rollout_data[epid][
datum_id
] = datum.copy() # copy needed when running on CPU!
self._update(rollout_data)
def _collect_vector_task(self, vector_task):
it2epid = [
self._access(info, self.path_to_id[1:])
for info in vector_task.attr("task_info")
]
# get_logger().debug("basic epids {}".format(it2epid))
def limit_spatial_res(data: np.ndarray, max_size=400):
if data.shape[0] <= max_size and data.shape[1] <= max_size:
return data
else:
f = float(max_size) / max(data.shape[0], data.shape[1])
size = (int(data.shape[1] * f), int(data.shape[0] * f))
return cv2.resize(data, size, 0, 0, interpolation=cv2.INTER_AREA)
vector_task_data = {
epid: dict()
for epid in it2epid
if self.all_episode_ids is None or epid in self.all_episode_ids
}
if len(vector_task_data) > 0:
for (
source
) in self.vector_task_sources: # these are observations for next step!
datum_id = self._source_to_str(source, is_vector_task=True)
method, kwargs = source
res = getattr(vector_task, method)(**kwargs)
if not isinstance(res, Sequence):
assert len(it2epid) == 1
res = [res]
if method == "render":
res = [limit_spatial_res(r, self.max_render_size) for r in res]
assert len(res) == len(it2epid)
for datum, epid in zip(res, it2epid):
if epid in vector_task_data:
assert datum_id not in vector_task_data[epid]
vector_task_data[epid][datum_id] = datum
self._append(vector_task_data)
return it2epid
# to be called by engine
def collect(self, vector_task=None, alive=None, rollout=None, actor_critic=None):
if actor_critic is not None:
# in phase with last_it2epid
try:
self._collect_actor_critic(actor_critic)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Failed collect (actor_critic) for viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Failed collect (actor_critic) for viz")
if alive is not None and rollout is not None:
# in phase with last_it2epid that stay alive
try:
self._collect_rollout(rollout, alive)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Failed collect (rollout) for viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Failed collect (rollout) for viz")
# Always call this one last!
if vector_task is not None:
# in phase with identifiers of current episodes from vector_task
try:
self.last_it2epid = self._collect_vector_task(vector_task)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Failed collect (vector_task) for viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Failed collect (vector_task) for viz")
def read_and_reset(self) -> Dict[str, List[Dict[str, Any]]]:
res = self.data
self.data = {}
# get_logger().debug("Returning episodes {}".format(list(res.keys())))
return res
# to be called by logger
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
for v in self.viz:
try:
v.log(log_writer, task_outputs, render, num_steps)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Dropped {v.label} viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Dropped {v.label} viz")
class TensorboardSummarizer:
"""Assumption: tensorboard tags/labels include a valid/test/train substr indicating the data modality"""
def __init__(
self,
experiment_to_train_events_paths_map: Dict[str, Sequence[str]],
experiment_to_test_events_paths_map: Dict[str, Sequence[str]],
eval_min_mega_steps: Optional[Sequence[float]] = None,
tensorboard_tags_to_labels_map: Optional[Dict[str, str]] = None,
tensorboard_output_summary_folder: str = "tensorboard_plotter_output",
):
if not _TF_AVAILABLE:
raise ImportError(
"Please install tensorflow e.g. with `pip install tensorflow` to enable TensorboardSummarizer"
)
self.experiment_to_train_events_paths_map = experiment_to_train_events_paths_map
self.experiment_to_test_events_paths_map = experiment_to_test_events_paths_map
train_experiments = set(list(experiment_to_train_events_paths_map.keys()))
test_experiments = set(list(experiment_to_test_events_paths_map.keys()))
assert (train_experiments - test_experiments) in [set(), train_experiments,], (
f"`experiment_to_test_events_paths_map` must have identical keys (experiment names) to those"
f" in `experiment_to_train_events_paths_map`, or be empty."
f" Got {train_experiments} train keys and {test_experiments} test keys."
)
self.eval_min_mega_steps = eval_min_mega_steps
self.tensorboard_tags_to_labels_map = tensorboard_tags_to_labels_map
if self.tensorboard_tags_to_labels_map is not None:
for tag, label in self.tensorboard_tags_to_labels_map.items():
assert ("valid" in label) + ("train" in label) + (
"test" in label
) == 1, (
f"One (and only one) of {'train', 'valid', 'test'} must be part of the label for"
f" tag {tag} ({label} given)."
)
self.tensorboard_output_summary_folder = tensorboard_output_summary_folder
self.train_data = self._read_tensorflow_experiment_events(
self.experiment_to_train_events_paths_map
)
self.test_data = self._read_tensorflow_experiment_events(
self.experiment_to_test_events_paths_map
)
def _read_tensorflow_experiment_events(
self, experiment_to_events_paths_map, skip_map=False
):
def my_summary_iterator(path):
try:
for r in tf_record.tf_record_iterator(path):
yield event_pb2.Event.FromString(r)
except IOError:
get_logger().debug(f"IOError for path {path}")
return None
collected_data = {}
for experiment_name, path_list in experiment_to_events_paths_map.items():
experiment_data = defaultdict(list)
for filename_path in path_list:
for event in my_summary_iterator(filename_path):
if event is None:
break
for value in event.summary.value:
if self.tensorboard_tags_to_labels_map is None or skip_map:
label = value.tag
elif value.tag in self.tensorboard_tags_to_labels_map:
label = self.tensorboard_tags_to_labels_map[value.tag]
else:
continue
experiment_data[label].append(
dict(
score=value.simple_value,
time=event.wall_time,
steps=event.step,
)
)
collected_data[experiment_name] = experiment_data
return collected_data
def _eval_vs_train_time_steps(self, eval_data, train_data):
min_mega_steps = self.eval_min_mega_steps
if min_mega_steps is None:
min_mega_steps = [(item["steps"] - 1) / 1e6 for item in eval_data]
scores, times, steps = [], [], []
i, t, last_i = 0, 0, -1
while len(times) < len(min_mega_steps):
while eval_data[i]["steps"] / min_mega_steps[len(times)] / 1e6 < 1:
i += 1
while train_data[t]["steps"] / min_mega_steps[len(times)] / 1e6 < 1:
t += 1
# step might be missing in valid! (and would duplicate future value at previous steps!)
# solution: move forward last entry's time if no change in i (instead of new entry)
if i == last_i:
times[-1] = train_data[t]["time"]
else:
scores.append(eval_data[i]["score"])
times.append(train_data[t]["time"])
steps.append(eval_data[i]["steps"])
last_i = i
scores.insert(0, train_data[0]["score"])
times.insert(0, train_data[0]["time"])
steps.insert(0, 0)
return scores, times, steps
def _train_vs_time_steps(self, train_data):
last_eval_step = (
self.eval_min_mega_steps[-1] * 1e6
if self.eval_min_mega_steps is not None
else float("inf")
)
scores = [train_data[0]["score"]]
times = [train_data[0]["time"]]
steps = [train_data[0]["steps"]]
t = 1
while steps[-1] < last_eval_step and t < len(train_data):
scores.append(train_data[t]["score"])
times.append(train_data[t]["time"])
steps.append(train_data[t]["steps"])
t += 1
return scores, times, steps
def make_tensorboard_summary(self):
all_experiments = list(self.experiment_to_train_events_paths_map.keys())
for experiment_name in all_experiments:
summary_writer = SummaryWriter(
os.path.join(self.tensorboard_output_summary_folder, experiment_name)
)
test_labels = (
sorted(list(self.test_data[experiment_name].keys()))
if len(self.test_data) > 0
else []
)
for test_label in test_labels:
train_label = test_label.replace("valid", "test").replace(
"test", "train"
)
if train_label not in self.train_data[experiment_name]:
print(
f"Missing matching 'train' label {train_label} for eval label {test_label}. Skipping"
)
continue
train_data = self.train_data[experiment_name][train_label]
test_data = self.test_data[experiment_name][test_label]
scores, times, steps = self._eval_vs_train_time_steps(
test_data, train_data
)
for score, t, step in zip(scores, times, steps):
summary_writer.add_scalar(
test_label, score, global_step=step, walltime=t
)
valid_labels = sorted(
[
key
for key in list(self.train_data[experiment_name].keys())
if "valid" in key
]
)
for valid_label in valid_labels:
train_label = valid_label.replace("valid", "train")
assert (
train_label in self.train_data[experiment_name]
), f"Missing matching 'train' label {train_label} for valid label {valid_label}"
train_data = self.train_data[experiment_name][train_label]
valid_data = self.train_data[experiment_name][valid_label]
scores, times, steps = self._eval_vs_train_time_steps(
valid_data, train_data
)
for score, t, step in zip(scores, times, steps):
summary_writer.add_scalar(
valid_label, score, global_step=step, walltime=t
)
train_labels = sorted(
[
key
for key in list(self.train_data[experiment_name].keys())
if "train" in key
]
)
for train_label in train_labels:
scores, times, steps = self._train_vs_time_steps(
self.train_data[experiment_name][train_label]
)
for score, t, step in zip(scores, times, steps):
summary_writer.add_scalar(
train_label, score, global_step=step, walltime=t
)
summary_writer.close()
| ask4help-main | allenact/utils/viz_utils.py |
"""Functions used to manipulate pytorch tensors and numpy arrays."""
import numbers
import os
import tempfile
from collections import defaultdict
from typing import List, Dict, Optional, DefaultDict, Union, Any, cast
import PIL
import numpy as np
import torch
from PIL import Image
from moviepy import editor as mpy
from moviepy.editor import concatenate_videoclips
from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary
from tensorboardX.proto.summary_pb2 import Summary as TBXSummary
# noinspection PyProtectedMember
from tensorboardX.utils import _prepare_video as tbx_prepare_video
from tensorboardX.x2num import make_np as tbxmake_np
from allenact.utils.system import get_logger
def to_device_recursively(
input: Any, device: Union[str, torch.device, int], inplace: bool = True
):
"""Recursively places tensors on the appropriate device."""
if input is None:
return input
elif isinstance(input, torch.Tensor):
return input.to(device) # type: ignore
elif isinstance(input, tuple):
return tuple(
to_device_recursively(input=subinput, device=device, inplace=inplace)
for subinput in input
)
elif isinstance(input, list):
if inplace:
for i in range(len(input)):
input[i] = to_device_recursively(
input=input[i], device=device, inplace=inplace
)
return input
else:
return [
to_device_recursively(input=subpart, device=device, inplace=inplace)
for subpart in input
]
elif isinstance(input, dict):
if inplace:
for key in input:
input[key] = to_device_recursively(
input=input[key], device=device, inplace=inplace
)
return input
else:
return {
k: to_device_recursively(input=input[k], device=device, inplace=inplace)
for k in input
}
elif isinstance(input, set):
if inplace:
for element in list(input):
input.remove(element)
input.add(
to_device_recursively(element, device=device, inplace=inplace)
)
else:
return set(
to_device_recursively(k, device=device, inplace=inplace) for k in input
)
elif isinstance(input, np.ndarray) or np.isscalar(input) or isinstance(input, str):
return input
elif hasattr(input, "to"):
# noinspection PyCallingNonCallable
return input.to(device=device, inplace=inplace)
else:
raise NotImplementedError(
"Sorry, value of type {} is not supported.".format(type(input))
)
def detach_recursively(input: Any, inplace=True):
"""Recursively detaches tensors in some data structure from their
computation graph."""
if input is None:
return input
elif isinstance(input, torch.Tensor):
return input.detach()
elif isinstance(input, tuple):
return tuple(
detach_recursively(input=subinput, inplace=inplace) for subinput in input
)
elif isinstance(input, list):
if inplace:
for i in range(len(input)):
input[i] = detach_recursively(input[i], inplace=inplace)
return input
else:
return [
detach_recursively(input=subinput, inplace=inplace)
for subinput in input
]
elif isinstance(input, dict):
if inplace:
for key in input:
input[key] = detach_recursively(input[key], inplace=inplace)
return input
else:
return {k: detach_recursively(input[k], inplace=inplace) for k in input}
elif isinstance(input, set):
if inplace:
for element in list(input):
input.remove(element)
input.add(detach_recursively(element, inplace=inplace))
else:
return set(detach_recursively(k, inplace=inplace) for k in input)
elif isinstance(input, np.ndarray) or np.isscalar(input) or isinstance(input, str):
return input
elif hasattr(input, "detach_recursively"):
# noinspection PyCallingNonCallable
return input.detach_recursively(inplace=inplace)
else:
raise NotImplementedError(
"Sorry, hidden state of type {} is not supported.".format(type(input))
)
def batch_observations(
observations: List[Dict], device: Optional[torch.device] = None
) -> Dict[str, Union[Dict, torch.Tensor]]:
"""Transpose a batch of observation dicts to a dict of batched
observations.
# Arguments
observations : List of dicts of observations.
device : The torch.device to put the resulting tensors on.
Will not move the tensors if None.
# Returns
Transposed dict of lists of observations.
"""
def dict_from_observation(
observation: Dict[str, Any]
) -> Dict[str, Union[Dict, List]]:
batch_dict: DefaultDict = defaultdict(list)
for sensor in observation:
if isinstance(observation[sensor], Dict):
batch_dict[sensor] = dict_from_observation(observation[sensor])
else:
batch_dict[sensor].append(to_tensor(observation[sensor]))
return batch_dict
def fill_dict_from_observations(
input_batch: Any, observation: Dict[str, Any]
) -> None:
for sensor in observation:
if isinstance(observation[sensor], Dict):
fill_dict_from_observations(input_batch[sensor], observation[sensor])
else:
input_batch[sensor].append(to_tensor(observation[sensor]))
def dict_to_batch(input_batch: Any) -> None:
for sensor in input_batch:
if isinstance(input_batch[sensor], Dict):
dict_to_batch(input_batch[sensor])
else:
input_batch[sensor] = torch.stack(
[batch.to(device=device) for batch in input_batch[sensor]], dim=0
)
if len(observations) == 0:
return cast(Dict[str, Union[Dict, torch.Tensor]], observations)
batch = dict_from_observation(observations[0])
for obs in observations[1:]:
fill_dict_from_observations(batch, obs)
dict_to_batch(batch)
return cast(Dict[str, Union[Dict, torch.Tensor]], batch)
def to_tensor(v) -> torch.Tensor:
"""Return a torch.Tensor version of the input.
# Parameters
v : Input values that can be coerced into being a tensor.
# Returns
A tensor version of the input.
"""
if torch.is_tensor(v):
return v
elif isinstance(v, np.ndarray):
return torch.from_numpy(v)
else:
return torch.tensor(
v, dtype=torch.int64 if isinstance(v, numbers.Integral) else torch.float
)
def tile_images(images: List[np.ndarray]) -> np.ndarray:
"""Tile multiple images into single image.
# Parameters
images : list of images where each image has dimension
(height x width x channels)
# Returns
Tiled image (new_height x width x channels).
"""
assert len(images) > 0, "empty list of images"
np_images = np.asarray(images)
n_images, height, width, n_channels = np_images.shape
new_height = int(np.ceil(np.sqrt(n_images)))
new_width = int(np.ceil(float(n_images) / new_height))
# pad with empty images to complete the rectangle
np_images = np.array(
images + [images[0] * 0 for _ in range(n_images, new_height * new_width)]
)
# img_HWhwc
out_image = np_images.reshape((new_height, new_width, height, width, n_channels))
# img_HhWwc
out_image = out_image.transpose(0, 2, 1, 3, 4)
# img_Hh_Ww_c
out_image = out_image.reshape((new_height * height, new_width * width, n_channels))
return out_image
class SummaryWriter(TBXSummaryWriter):
@staticmethod
def _video(tag, vid):
# noinspection PyProtectedMember
tag = tbxsummary._clean_tag(tag)
return TBXSummary(value=[TBXSummary.Value(tag=tag, image=vid)])
def add_vid(self, tag, vid, global_step=None, walltime=None):
self._get_file_writer().add_summary(
self._video(tag, vid), global_step, walltime
)
def add_image(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW"
):
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def image(tag, tensor, rescale=1, dataformats="CHW"):
"""Outputs a `Summary` protocol buffer with images. The summary has up to
`max_images` summary values containing images. The images are built from
`tensor` which must be 3-D with shape `[height, width, channels]` and where
`channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
# Parameters
tag: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
channels]` where `channels` is 1, 3, or 4.
'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).
The image() function will scale the image values to [0, 255] by applying
a scale factor of either 1 (uint8) or 255 (float32).
rescale: The scale.
dataformats: Input image shape format.
# Returns
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# noinspection PyProtectedMember
tag = tbxsummary._clean_tag(tag)
tensor = tbxmake_np(tensor)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
image = tbxsummary.make_image(tensor, rescale=rescale)
return TBXSummary(value=[TBXSummary.Value(tag=tag, image=image)])
def convert_to_HWC(tensor, input_format): # tensor: numpy array
assert len(set(input_format)) == len(
input_format
), "You can not use the same dimension shordhand twice. \
input_format: {}".format(
input_format
)
assert len(tensor.shape) == len(
input_format
), "size of input tensor and input format are different. \
tensor shape: {}, input_format: {}".format(
tensor.shape, input_format
)
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in "NCHW"]
tensor_NCHW = tensor.transpose(index)
tensor_CHW = make_grid(tensor_NCHW)
# noinspection PyTypeChecker
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in "HWC"]
tensor_HWC = tensor.transpose(index)
if tensor_HWC.shape[2] == 1:
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2)
return tensor_HWC
if len(input_format) == 2:
index = [input_format.find(c) for c in "HW"]
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
def make_grid(I, ncols=8):
# I: N1HW or N3HW
assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here"
if I.shape[1] == 1:
I = np.concatenate([I, I, I], 1)
assert I.ndim == 4 and I.shape[1] == 3 or I.shape[1] == 4
nimg = I.shape[0]
H = I.shape[2]
W = I.shape[3]
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((I.shape[1], H * nrows, W * ncols), dtype=I.dtype)
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i]
i = i + 1
return canvas
def tensor_to_video(tensor, fps=4):
tensor = tbxmake_np(tensor)
tensor = tbx_prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
return tbxsummary.make_video(tensor, fps)
def tensor_to_clip(tensor, fps=4):
tensor = tbxmake_np(tensor)
tensor = tbx_prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
t, h, w, c = tensor.shape
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
return clip, (h, w, c)
def clips_to_video(clips, h, w, c):
# encode sequence of images into gif string
clip = concatenate_videoclips(clips)
filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
# moviepy >= 1.0.0 use logger=None to suppress output.
try:
clip.write_gif(filename, verbose=False, logger=None)
except TypeError:
get_logger().warning(
"Upgrade to moviepy >= 1.0.0 to suppress the progress bar."
)
clip.write_gif(filename, verbose=False)
with open(filename, "rb") as f:
tensor_string = f.read()
try:
os.remove(filename)
except OSError:
get_logger().warning("The temporary file used by moviepy cannot be deleted.")
return TBXSummary.Image(
height=h, width=w, colorspace=c, encoded_image_string=tensor_string
)
def process_video(render, max_clip_len=500, max_video_len=-1, fps=4):
output = []
hwc = None
if len(render) > 0:
if len(render) > max_video_len > 0:
get_logger().warning(
"Clipping video to first {} frames out of {} original frames".format(
max_video_len, len(render)
)
)
render = render[:max_video_len]
for clipstart in range(0, len(render), max_clip_len):
clip = render[clipstart : clipstart + max_clip_len]
try:
current = np.stack(clip, axis=0) # T, H, W, C
current = current.transpose((0, 3, 1, 2)) # T, C, H, W
current = np.expand_dims(current, axis=0) # 1, T, C, H, W
current, cur_hwc = tensor_to_clip(current, fps=fps)
if hwc is None:
hwc = cur_hwc
else:
assert (
hwc == cur_hwc
), "Inconsistent clip shape: previous {} current {}".format(
hwc, cur_hwc
)
output.append(current)
except MemoryError:
get_logger().error(
"Skipping video due to memory error with clip of length {}".format(
len(clip)
)
)
return None
else:
get_logger().warning("Calling process_video with 0 frames")
return None
assert len(output) > 0, "No clips to concatenate"
assert hwc is not None, "No tensor dims assigned"
try:
result = clips_to_video(output, *hwc)
except MemoryError:
get_logger().error("Skipping video due to memory error calling clips_to_video")
result = None
return result
class ScaleBothSides(object):
"""Rescales the input PIL.Image to the given 'width' and `height`.
Attributes
width: new width
height: new height
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, width: int, height: int, interpolation=Image.BILINEAR):
self.width = width
self.height = height
self.interpolation = interpolation
def __call__(self, img: PIL.Image) -> PIL.Image:
return img.resize((self.width, self.height), self.interpolation)
| ask4help-main | allenact/utils/tensor_utils.py |
import math
from typing import Dict, Any, Union, Callable, Optional
from allenact.utils.system import get_logger
def pos_to_str_for_cache(pos: Dict[str, float]) -> str:
return "_".join([str(pos["x"]), str(pos["y"]), str(pos["z"])])
def str_to_pos_for_cache(s: str) -> Dict[str, float]:
split = s.split("_")
return {"x": float(split[0]), "y": float(split[1]), "z": float(split[2])}
def get_distance(
cache: Dict[str, Any], pos: Dict[str, float], target: Dict[str, float]
) -> float:
pos = {
"x": 0.25 * math.ceil(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.ceil(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.floor(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.ceil(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.ceil(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.floor(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.floor(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.floor(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = find_nearest_point_in_cache(cache, pos)
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
target = find_nearest_point_in_cache(cache, target)
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
print("Your cache is incomplete!")
exit()
return sp
def get_distance_to_object(
cache: Dict[str, Any], pos: Dict[str, float], target_class: str
) -> float:
dists = []
weights = []
for rounder_func_0 in [math.ceil, math.floor]:
for rounder_func_1 in [math.ceil, math.floor]:
rounded_pos = {
"x": 0.25 * rounder_func_0(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * rounder_func_1(pos["z"] / 0.25),
}
dist = _get_shortest_path_distance_to_object_from_cache(
cache, rounded_pos, target_class
)
if dist >= 0:
dists.append(dist)
weights.append(
1.0
/ (
math.sqrt(
(pos["x"] - rounded_pos["x"]) ** 2
+ (pos["z"] - rounded_pos["z"]) ** 2
)
+ 1e6
)
)
if len(dists) == 0:
raise RuntimeError("Your cache is incomplete!")
total_weight = sum(weights)
weights = [w / total_weight for w in weights]
return sum(d * w for d, w in zip(dists, weights))
def _get_shortest_path_distance_from_cache(
cache: Dict[str, Any], position: Dict[str, float], target: Dict[str, float]
) -> float:
try:
return cache[pos_to_str_for_cache(position)][pos_to_str_for_cache(target)][
"distance"
]
except KeyError:
return -1.0
def _get_shortest_path_distance_to_object_from_cache(
cache: Dict[str, Any], position: Dict[str, float], target_class: str
) -> float:
try:
return cache[pos_to_str_for_cache(position)][target_class]["distance"]
except KeyError:
return -1.0
def find_nearest_point_in_cache(
cache: Dict[str, Any], point: Dict[str, float]
) -> Dict[str, float]:
best_delta = float("inf")
closest_point: Dict[str, float] = {}
for p in cache:
pos = str_to_pos_for_cache(p)
delta = (
abs(point["x"] - pos["x"])
+ abs(point["y"] - pos["y"])
+ abs(point["z"] - pos["z"])
)
if delta < best_delta:
best_delta = delta
closest_point = pos
return closest_point
class DynamicDistanceCache(object):
def __init__(self, rounding: Optional[int] = None):
self.cache: Dict[str, Any] = {}
self.rounding = rounding
self.hits = 0
self.misses = 0
self.num_accesses = 0
def find_distance(
self,
scene_name: str,
position: Dict[str, Any],
target: Union[Dict[str, Any], str],
native_distance_function: Callable[
[Dict[str, Any], Union[Dict[str, Any], str]], float
],
) -> float:
# Convert the position to its rounded string representation
position_str = scene_name + self._pos_to_str(position)
# If the target is also a position, convert it to its rounded string representation
if isinstance(target, str):
target_str = target
else:
target_str = self._pos_to_str(target)
if position_str not in self.cache:
self.cache[position_str] = {}
if target_str not in self.cache[position_str]:
self.cache[position_str][target_str] = native_distance_function(
position, target
)
self.misses += 1
else:
self.hits += 1
self.num_accesses += 1
if self.num_accesses % 1000 == 0:
get_logger().debug("Cache Miss-Hit Ratio: %.4f" % (self.misses / self.hits))
return self.cache[position_str][target_str]
def invalidate(self):
self.cache = []
def _pos_to_str(self, pos: Dict[str, Any]) -> str:
if self.rounding:
pos = {k: round(v, self.rounding) for k, v in pos.items()}
return str(pos)
| ask4help-main | allenact/utils/cache_utils.py |
import os
import sys
from pathlib import Path
from subprocess import getoutput
def make_package(name, verbose=False):
"""Prepares sdist for allenact or allenact_plugins."""
orig_dir = os.getcwd()
base_dir = os.path.join(os.path.abspath(os.path.dirname(Path(__file__))), "..")
os.chdir(base_dir)
with open(".VERSION", "r") as f:
__version__ = f.readline().strip()
# generate sdist via setuptools
output = getoutput(f"{sys.executable} {name}/setup.py sdist")
if verbose:
print(output)
os.chdir(os.path.join(base_dir, "dist"))
# uncompress the tar.gz sdist
output = getoutput(f"tar zxvf {name}-{__version__}.tar.gz")
if verbose:
print(output)
# copy setup.py to the top level of the package (required by pip install)
output = getoutput(
f"cp {name}-{__version__}/{name}/setup.py {name}-{__version__}/setup.py"
)
if verbose:
print(output)
# create new source file with version
getoutput(
f"printf '__version__ = \"{__version__}\"\n' >> {name}-{__version__}/{name}/_version.py"
)
# include it in sources
getoutput(
f'printf "\n{name}/_version.py" >> {name}-{__version__}/{name}.egg-info/SOURCES.txt'
)
# recompress tar.gz
output = getoutput(f"tar zcvf {name}-{__version__}.tar.gz {name}-{__version__}/")
if verbose:
print(output)
# remove temporary directory
output = getoutput(f"rm -r {name}-{__version__}")
if verbose:
print(output)
os.chdir(orig_dir)
if __name__ == "__main__":
verbose = False
make_package("allenact", verbose)
make_package("allenact_plugins", verbose)
| ask4help-main | scripts/release.py |
#!/usr/bin/env python3
"""Tool to run command on multiple nodes through SSH."""
import os
import argparse
import glob
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dcommand", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--runs_on",
required=False,
type=str,
default=None,
help="Comma-separated IP addresses of machines. If empty, the tool will scan for lists of IP addresses"
" in `screen_ids_file`s in the `~/.allenact` directory.",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i path/to/mykey.pem ubuntu@{addr}'.",
)
parser.add_argument(
"--command",
required=False,
default="nvidia-smi | head -n 35",
type=str,
help="Command to be run through ssh onto each machine",
)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def wrap_double(text):
return f'"{text}"'
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text, quote=r"'\''"):
return f"{quote}{text}{quote}"
if __name__ == "__main__":
args = get_args()
all_addresses = []
if args.runs_on is not None:
all_addresses = args.runs_on.split(",")
else:
all_files = sorted(
glob.glob(os.path.join(os.path.expanduser("~"), ".allenact", "*.killfile")),
reverse=True,
)
if len(all_files) == 0:
print(
f"No screen_ids_file found under {os.path.join(os.path.expanduser('~'), '.allenact')}"
)
for killfile in all_files:
with open(killfile, "r") as f:
# Each line contains 'IP_address screen_ID'
nodes = [tuple(line[:-1].split(" ")) for line in f.readlines()]
all_addresses = [node[0] for node in nodes]
use_addresses = ""
while use_addresses not in ["y", "n"]:
use_addresses = input(
f"Run on {all_addresses} from {killfile}? [Y/n] "
).lower()
if use_addresses == "":
use_addresses = "y"
if use_addresses == "n":
all_addresses = []
else:
break
print(f"Running on IP addresses {all_addresses}")
for it, addr in enumerate(all_addresses):
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {wrap_single(args.command)}"
print(f"{it} {addr} SSH command {ssh_command}")
os.system(ssh_command)
print("DONE")
| ask4help-main | scripts/dcommand.py |
import glob
import os
import shutil
import sys
from pathlib import Path
from subprocess import check_output
from threading import Thread
from typing import Dict, Union, Optional, Set, List, Sequence, Mapping
from git import Git
from ruamel.yaml import YAML # type: ignore
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
# TODO: the scripts directory shouldn't be a module (as it conflicts with
# some local developmment workflows) but we do want to import scripts/literate.py.
# Temporary solution is just to modify the sys.path when this script is run.
sys.path.append(os.path.abspath(os.path.dirname(Path(__file__))))
from literate import literate_python_to_markdown
class StringColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
exclude_files = [
".DS_Store",
"__init__.py",
"__init__.pyc",
"README.md",
"version.py",
"run.py",
"setup.py",
"main.py",
]
def render_file(
relative_src_path: str, src_file: str, to_file: str, modifier=""
) -> None:
"""Shells out to pydocmd, which creates a .md file from the docstrings of
python functions and classes in the file we specify.
The modifer specifies the depth at which to generate docs for
classes and functions in the file. More information here:
https://pypi.org/project/pydoc-markdown/
"""
# First try literate
was_literate = False
try:
was_literate = literate_python_to_markdown(
path=os.path.join(relative_src_path, src_file)
)
except Exception as _:
pass
if was_literate:
return
# Now do standard pydocmd
relative_src_namespace = relative_src_path.replace("/", ".")
src_base = src_file.replace(".py", "")
if relative_src_namespace == "":
namespace = f"{src_base}{modifier}"
else:
namespace = f"{relative_src_namespace}.{src_base}{modifier}"
pydoc_config = """'{
renderer: {
type: markdown,
code_headers: true,
descriptive_class_title: false,
add_method_class_prefix: true,
source_linker: {type: github, repo: allenai/allenact},
header_level_by_type: {
Module: 1,
Class: 2,
Method: 3,
Function: 3,
Data: 3,
}
}
}'"""
pydoc_config = " ".join(pydoc_config.split())
args = ["pydoc-markdown", "-m", namespace, pydoc_config]
try:
call_result = check_output([" ".join(args)], shell=True, env=os.environ).decode(
"utf-8"
)
# noinspection PyShadowingNames
with open(to_file, "w") as f:
doc_split = call_result.split("\n")
# github_path = "https://github.com/allenai/allenact/tree/master/"
# path = (
# github_path + namespace.replace(".", "/") + ".py"
# )
# mdlink = "[[source]]({})".format(path)
mdlink = "" # Removing the above source link for now.
call_result = "\n".join([doc_split[0] + " " + mdlink] + doc_split[1:])
call_result = call_result.replace("_DOC_COLON_", ":")
f.write(call_result)
print(
f"{StringColors.OKGREEN}[SUCCESS]{StringColors.ENDC} built docs for {src_file} -> {to_file}."
)
except Exception as _:
cmd = " ".join(args)
print(
f"{StringColors.WARNING}[SKIPPING]{StringColors.ENDC} could not"
f" build docs for {src_file} (missing an import?). CMD: '{cmd}'"
)
# noinspection PyShadowingNames
def build_docs_for_file(
relative_path: str, file_name: str, docs_dir: str, threads: List
) -> Dict[str, str]:
"""Build docs for an individual python file."""
clean_filename = file_name.replace(".py", "")
markdown_filename = f"{clean_filename}.md"
output_path = os.path.join(docs_dir, relative_path, markdown_filename)
nav_path = os.path.join("api", relative_path, markdown_filename)
thread = Thread(target=render_file, args=(relative_path, file_name, output_path))
thread.start()
threads.append(thread)
return {os.path.basename(clean_filename): nav_path}
# noinspection PyShadowingNames
def build_docs(
base_dir: Union[Path, str],
root_path: Union[Path, str],
docs_dir: Union[Path, str],
threads: List,
allowed_dirs: Optional[Set[str]] = None,
):
base_dir, root_path, docs_dir = str(base_dir), str(root_path), str(docs_dir)
nav_root = []
for child in os.listdir(root_path):
relative_path = os.path.join(root_path, child)
if (
(allowed_dirs is not None)
and (os.path.isdir(relative_path))
and (os.path.abspath(relative_path) not in allowed_dirs)
# or ".git" in relative_path
# or ".idea" in relative_path
# or "__pycache__" in relative_path
# or "tests" in relative_path
# or "mypy_cache" in relative_path
):
print("SKIPPING {}".format(relative_path))
continue
# without_allenact = str(root_path).replace("allenact/", "")
new_path = os.path.relpath(root_path, base_dir).replace(".", "")
target_dir = os.path.join(docs_dir, new_path)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
if os.path.isdir(relative_path):
nav_subsection = build_docs(
base_dir,
relative_path,
docs_dir,
threads=threads,
allowed_dirs=allowed_dirs,
)
if not nav_subsection:
continue
nav_root.append({child: nav_subsection})
else:
if child in exclude_files or not child.endswith(".py"):
continue
nav = build_docs_for_file(new_path, child, docs_dir, threads=threads)
nav_root.append(nav)
return nav_root
def project_readme_paths_to_nav_structure(project_readmes):
nested_dict = {}
for fp in project_readmes:
has_seen_project_dir = False
sub_nested_dict = nested_dict
split_fp = os.path.dirname(fp).split("/")
for i, yar in enumerate(split_fp):
has_seen_project_dir = has_seen_project_dir or yar == "projects"
if not has_seen_project_dir or yar == "projects":
continue
if yar not in sub_nested_dict:
if i == len(split_fp) - 1:
sub_nested_dict[yar] = fp.replace("docs/", "")
break
else:
sub_nested_dict[yar] = {}
sub_nested_dict = sub_nested_dict[yar]
def recursively_create_nav_structure(nested_dict):
if isinstance(nested_dict, str):
return nested_dict
to_return = []
for key in nested_dict:
to_return.append({key: recursively_create_nav_structure(nested_dict[key])})
return to_return
return recursively_create_nav_structure(nested_dict)
def pruned_nav_entries(nav_entries):
if isinstance(nav_entries, str):
if os.path.exists(os.path.join("docs", nav_entries)):
return nav_entries
else:
return None
elif isinstance(nav_entries, Sequence):
new_entries = []
for entry in nav_entries:
entry = pruned_nav_entries(entry)
if entry:
new_entries.append(entry)
return new_entries
elif isinstance(nav_entries, Mapping):
new_entries = {}
for k, entry in nav_entries.items():
entry = pruned_nav_entries(entry)
if entry:
new_entries[k] = entry
return new_entries
else:
raise NotImplementedError()
def main():
os.chdir(ABS_PATH_OF_TOP_LEVEL_DIR)
print("Copying all README.md files to docs.")
with open("README.md") as f:
readme_content = f.readlines()
readme_content = [x.replace("docs/", "") for x in readme_content]
with open("docs/index.md", "w") as f:
f.writelines(readme_content)
project_readmes = []
for readme_file_path in glob.glob("projects/**/README.md", recursive=True):
if "docs/" not in readme_file_path:
new_path = os.path.join("docs", readme_file_path)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
shutil.copy(readme_file_path, new_path)
project_readmes.append(new_path)
print("Copying LICENSE file to docs.")
shutil.copy("LICENSE", "docs/LICENSE.md")
print("Copying CONTRIBUTING.md file to docs.")
shutil.copy("CONTRIBUTING.md", "docs/CONTRIBUTING.md")
# print("Copying CNAME file to docs.")
# shutil.copy("CNAME", "docs/CNAME")
print("Building the docs.")
parent_folder_path = Path(__file__).parent.parent
yaml_path = parent_folder_path / "mkdocs.yml"
source_path = parent_folder_path
docs_dir = str(parent_folder_path / "docs" / "api")
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
# Adding project readmes to the yaml
yaml = YAML()
mkdocs_yaml = yaml.load(yaml_path)
site_nav = mkdocs_yaml["nav"]
# TODO Find a way to do the following in a way that results in nice titles.
# projects_key = "Projects using allenact"
# nav_obj = None
# for obj in site_nav:
# if projects_key in obj:
# nav_obj = obj
# break
# nav_obj[projects_key] = project_readme_paths_to_nav_structure(project_readmes)
with open(yaml_path, "w") as f:
yaml.dump(mkdocs_yaml, f)
# Get directories to ignore
git_dirs = set(
os.path.abspath(os.path.split(p)[0]) for p in Git(".").ls_files().split("\n")
)
ignore_rel_dirs = [
"docs",
"scripts",
"experiments",
"src",
".pip_src",
"dist",
"build",
]
ignore_abs_dirs = set(
os.path.abspath(os.path.join(str(parent_folder_path), rel_dir))
for rel_dir in ignore_rel_dirs
)
for d in ignore_abs_dirs:
if d in git_dirs:
git_dirs.remove(d)
threads: List = []
nav_entries = build_docs(
parent_folder_path,
source_path,
docs_dir,
threads=threads,
allowed_dirs=git_dirs,
)
nav_entries.sort(key=lambda x: list(x)[0], reverse=False)
for thread in threads:
thread.join()
nav_entries = pruned_nav_entries(nav_entries)
docs_key = "API"
# Find the yaml corresponding to the API
nav_obj = None
for obj in site_nav:
if docs_key in obj:
nav_obj = obj
break
nav_obj[docs_key] = nav_entries
with open(yaml_path, "w") as f:
yaml.dump(mkdocs_yaml, f)
if __name__ == "__main__":
main()
| ask4help-main | scripts/build_docs.py |
#!/usr/bin/env python3
import os
import argparse
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dconfig", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--runs_on",
required=True,
type=str,
help="Comma-separated IP addresses of machines",
)
parser.add_argument(
"--config_script",
required=True,
type=str,
help="Path to bash script with configuration",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh -f {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i path/to/mykey.pem -f ubuntu@{addr}'. "
"The option `-f` should be used, since we want a non-interactive session",
)
parser.add_argument(
"--distribute_public_rsa_key",
dest="distribute_public_rsa_key",
action="store_true",
required=False,
help="if you pass the `--distribute_public_rsa_key` flag, the manager node's public key will be added to the "
"authorized keys of all workers (this is necessary in default-configured EC2 instances to use "
"`scripts/dmain.py`)",
)
parser.set_defaults(distribute_public_rsa_key=False)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def wrap_double(text):
return f'"{text}"'
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text, quote=r"'\''"):
return f"{quote}{text}{quote}"
if __name__ == "__main__":
args = get_args()
all_addresses = args.runs_on.split(",")
print(f"Running on addresses {all_addresses}")
remote_config_script = f"{args.config_script}.distributed"
for it, addr in enumerate(all_addresses):
if args.distribute_public_rsa_key:
key_command = (
f"{args.ssh_cmd.format(addr=addr)} "
f"{wrap_double('echo $(cat ~/.ssh/id_rsa.pub) >> ~/.ssh/authorized_keys')}"
)
print(f"Key command {key_command}")
os.system(f"{key_command}")
scp_cmd = (
args.ssh_cmd.replace("ssh ", "scp ")
.replace("-f", args.config_script)
.format(addr=addr)
)
print(f"SCP command {scp_cmd}:{remote_config_script}")
os.system(f"{scp_cmd}:{remote_config_script}")
screen_name = f"allenact_config_machine{it}"
bash_command = wrap_single_nested(
f"source {remote_config_script} &>> log_allenact_distributed_config"
)
screen_command = wrap_single(
f"screen -S {screen_name} -dm bash -c {bash_command}"
)
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {screen_command}"
print(f"SSH command {ssh_command}")
os.system(ssh_command)
print(f"{addr} {screen_name}")
print("DONE")
| ask4help-main | scripts/dconfig.py |
#!/usr/bin/env python3
"""Tool to terminate multi-node (distributed) training."""
import os
import argparse
import glob
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dkill", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--screen_ids_file",
required=False,
type=str,
default=None,
help="Path to file generated by dmain.py with IPs and screen ids for nodes running process."
" If empty, the tool will scan the `~/.allenact` directory for `screen_ids_file`s.",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i mykey.pem ubuntu@{addr}'. ",
)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
all_files = (
[args.screen_ids_file]
if args.screen_ids_file is not None
else sorted(
glob.glob(os.path.join(os.path.expanduser("~"), ".allenact", "*.killfile")),
reverse=True,
)
)
if len(all_files) == 0:
print(
f"No screen_ids_file found under {os.path.join(os.path.expanduser('~'), '.allenact')}"
)
for killfile in all_files:
with open(killfile, "r") as f:
nodes = [tuple(line[:-1].split(" ")) for line in f.readlines()]
do_kill = ""
while do_kill not in ["y", "n"]:
do_kill = input(
f"Stopping processes on {nodes} from {killfile}? [y/N] "
).lower()
if do_kill == "":
do_kill = "n"
if do_kill == "y":
for it, node in enumerate(nodes):
addr, screen_name = node
print(f"Killing screen {screen_name} on {addr}")
ssh_command = (
f"{args.ssh_cmd.format(addr=addr)} '"
f"screen -S {screen_name} -p 0 -X quit ; "
f"sleep 1 ; "
f"echo Master processes left running: ; "
f"ps aux | grep Master: | grep -v grep ; "
f"echo ; "
f"'"
)
# print(f"SSH command {ssh_command}")
os.system(ssh_command)
do_delete = ""
while do_delete not in ["y", "n"]:
do_delete = input(f"Delete file {killfile}? [y/N] ").lower()
if do_delete == "":
do_delete = "n"
if do_delete == "y":
os.system(f"rm {killfile}")
print(f"Deleted {killfile}")
print("DONE")
| ask4help-main | scripts/dkill.py |
"""Helper functions used to create literate documentation from python files."""
import importlib
import inspect
import os
from typing import Optional, Sequence, List, cast
from typing.io import TextIO
from constants import ABS_PATH_OF_DOCS_DIR, ABS_PATH_OF_TOP_LEVEL_DIR
def get_literate_output_path(file: TextIO) -> Optional[str]:
for l in file:
l = l.strip()
if l != "":
if l.lower().startswith(("# literate", "#literate")):
parts = l.split(":")
if len(parts) == 1:
assert (
file.name[-3:].lower() == ".py"
), "Can only run literate on python (*.py) files."
return file.name[:-3] + ".md"
elif len(parts) == 2:
rel_outpath = parts[1].strip()
outpath = os.path.abspath(
os.path.join(ABS_PATH_OF_DOCS_DIR, rel_outpath)
)
assert outpath.startswith(
ABS_PATH_OF_DOCS_DIR
), f"Path {outpath} is not allowed, must be within {ABS_PATH_OF_DOCS_DIR}."
return outpath
else:
raise NotImplementedError(
f"Line '{l}' is not of the correct format."
)
else:
return None
return None
def source_to_markdown(dot_path: str, summarize: bool = False):
importlib.invalidate_caches()
module_path, obj_name = ".".join(dot_path.split(".")[:-1]), dot_path.split(".")[-1]
module = importlib.import_module(module_path)
obj = getattr(module, obj_name)
source = inspect.getsource(obj)
if not summarize:
return source
elif inspect.isclass(obj):
lines = source.split("\n")
newlines = [lines[0]]
whitespace_len = float("inf")
k = 1
started = False
while k < len(lines):
l = lines[k]
lstripped = l.lstrip()
if started:
newlines.append(l)
started = "):" not in l and "->" not in l
if not started:
newlines.append(l[: cast(int, whitespace_len)] + " ...\n")
if (
l.lstrip().startswith("def ")
and len(l) - len(lstripped) <= whitespace_len
):
whitespace_len = len(l) - len(lstripped)
newlines.append(l)
started = "):" not in l and "->" not in l
if not started:
newlines.append(l[:whitespace_len] + " ...\n")
k += 1
return "\n".join(newlines).strip()
elif inspect.isfunction(obj):
return source.split("\n")[0] + "\n ..."
else:
return
def _strip_empty_lines(lines: Sequence[str]) -> List[str]:
lines = list(lines)
if len(lines) == 0:
return lines
for i in range(len(lines)):
if lines[i].strip() != "":
lines = lines[i:]
break
for i in reversed(list(range(len(lines)))):
if lines[i].strip() != "":
lines = lines[: i + 1]
break
return lines
def literate_python_to_markdown(path: str) -> bool:
assert path[-3:].lower() == ".py", "Can only run literate on python (*.py) files."
with open(path, "r") as file:
output_path = get_literate_output_path(file)
if output_path is None:
return False
output_lines = [
f"<!-- DO NOT EDIT THIS FILE. --> ",
f"<!-- THIS FILE WAS AUTOGENERATED FROM"
f" 'ALLENACT_BASE_DIR/{os.path.relpath(path, ABS_PATH_OF_TOP_LEVEL_DIR)}', EDIT IT INSTEAD. -->\n",
]
md_lines: List[str] = []
code_lines = md_lines
lines = file.readlines()
mode = None
for line in lines:
line = line.rstrip()
stripped_line = line.strip()
if (mode is None or mode == "change") and line.strip() == "":
continue
if mode == "markdown":
if stripped_line in ['"""', "'''"]:
output_lines.extend(_strip_empty_lines(md_lines) + [""])
md_lines.clear()
mode = None
elif stripped_line.endswith(('"""', "'''")):
output_lines.extend(
_strip_empty_lines(md_lines) + [stripped_line[:-3]]
)
md_lines.clear()
mode = None
# TODO: Does not account for the case where a string is ended with a comment.
else:
md_lines.append(line.strip())
elif stripped_line.startswith(("# %%", "#%%")):
last_mode = mode
mode = "change"
if last_mode == "code":
output_lines.extend(
["```python"] + _strip_empty_lines(code_lines) + ["```"]
)
code_lines.clear()
if " import " in stripped_line:
path = stripped_line.split(" import ")[-1].strip()
output_lines.append(
"```python\n" + source_to_markdown(path) + "\n```"
)
elif " import_summary " in stripped_line:
path = stripped_line.split(" import_summary ")[-1].strip()
output_lines.append(
"```python\n"
+ source_to_markdown(path, summarize=True)
+ "\n```"
)
elif " hide" in stripped_line:
mode = "hide"
elif mode == "hide":
continue
elif mode == "change":
if stripped_line.startswith(('"""', "'''")):
mode = "markdown"
if len(stripped_line) != 3:
if stripped_line.endswith(('"""', "'''")):
output_lines.append(stripped_line[3:-3])
mode = "change"
else:
output_lines.append(stripped_line[3:])
else:
mode = "code"
code_lines.append(line)
elif mode == "code":
code_lines.append(line)
else:
raise NotImplementedError(
f"mode {mode} is not implemented. Last 5 lines: "
+ "\n".join(output_lines[-5:])
)
if mode == "code" and len(code_lines) != 0:
output_lines.extend(
["```python"] + _strip_empty_lines(code_lines) + ["```"]
)
with open(output_path, "w") as f:
f.writelines([l + "\n" for l in output_lines])
return True
if __name__ == "__main__":
# print(
# source_to_markdown(
# "allenact_plugins.minigrid_plugin.minigrid_offpolicy.ExpertTrajectoryIterator",
# True
# )
# )
literate_python_to_markdown(
os.path.join(
ABS_PATH_OF_TOP_LEVEL_DIR,
"projects/tutorials/training_a_pointnav_model.py",
)
)
| ask4help-main | scripts/literate.py |
import atexit
import os
import platform
import re
import shlex
import subprocess
import tempfile
# Turning off automatic black formatting for this script as it breaks quotes.
# fmt: off
def pci_records():
records = []
command = shlex.split("lspci -vmm")
output = subprocess.check_output(command).decode()
for devices in output.strip().split("\n\n"):
record = {}
records.append(record)
for row in devices.split("\n"):
key, value = row.split("\t")
record[key.split(":")[0]] = value
return records
def generate_xorg_conf(devices):
xorg_conf = []
device_section = """
Section "Device"
Identifier "Device{device_id}"
Driver "nvidia"
VendorName "NVIDIA Corporation"
BusID "{bus_id}"
EndSection
"""
server_layout_section = """
Section "ServerLayout"
Identifier "Layout0"
{screen_records}
EndSection
"""
screen_section = """
Section "Screen"
Identifier "Screen{screen_id}"
Device "Device{device_id}"
DefaultDepth 24
Option "AllowEmptyInitialConfiguration" "True"
SubSection "Display"
Depth 24
Virtual 1024 768
EndSubSection
EndSection
"""
screen_records = []
for i, bus_id in enumerate(devices):
xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
xorg_conf.append(screen_section.format(device_id=i, screen_id=i))
screen_records.append('Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i))
xorg_conf.append(server_layout_section.format(screen_records="\n ".join(screen_records)))
output = "\n".join(xorg_conf)
return output
def startx(display=0):
if platform.system() != "Linux":
raise Exception("Can only run startx on linux")
devices = []
for r in pci_records():
if r.get("Vendor", "") == "NVIDIA Corporation"\
and r["Class"] in ["VGA compatible controller", "3D controller"]:
bus_id = "PCI:" + ":".join(map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"])))
devices.append(bus_id)
if not devices:
raise Exception("no nvidia cards found")
try:
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write(generate_xorg_conf(devices))
command = shlex.split("Xorg -noreset +extension GLX +extension RANDR +extension RENDER -config %s :%s" % (path, display))
proc = subprocess.Popen(command)
atexit.register(lambda: proc.poll() is None and proc.kill())
proc.wait()
finally:
os.close(fd)
os.unlink(path)
# fmt: on
if __name__ == "__main__":
startx()
| ask4help-main | scripts/startx.py |
#!/usr/bin/env python3
"""Entry point to multi-node (distributed) training for a user given experiment
name."""
import sys
import os
import time
import random
import string
from pathlib import Path
from typing import Optional
import subprocess
# Add to PYTHONPATH the path of the parent directory of the current file's directory
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(Path(__file__)))))
from allenact.main import get_argument_parser as get_main_arg_parser
from allenact.utils.system import init_logging, get_logger
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
def get_argument_parser():
"""Creates the argument parser."""
parser = get_main_arg_parser()
parser.description = f"distributed {parser.description}"
parser.add_argument(
"--runs_on",
required=True,
type=str,
help="Comma-separated IP addresses of machines",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh -f {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i mykey.pem -f ubuntu@{addr}'. "
"The option `-f` should be used for non-interactive session",
)
parser.add_argument(
"--env_activate_path",
required=True,
type=str,
help="Path to the virtual environment's `activate` script. It must be the same across all machines",
)
parser.add_argument(
"--allenact_path",
required=False,
type=str,
default="allenact",
help="Path to allenact top directory. It must be the same across all machines",
)
# Required distributed_ip_and_port
idx = [a.dest for a in parser._actions].index("distributed_ip_and_port")
parser._actions[idx].required = True
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def get_raw_args():
raw_args = sys.argv[1:]
filtered_args = []
remove: Optional[str] = None
enclose_in_quotes: Optional[str] = None
for arg in raw_args:
if remove is not None:
remove = None
elif enclose_in_quotes is not None:
# Within backslash expansion: close former single, open double, create single, close double, reopen single
inner_quote = r"\'\"\'\"\'"
# Convert double quotes into backslash double for later expansion
filtered_args.append(
inner_quote + arg.replace('"', r"\"").replace("'", r"\"") + inner_quote
)
enclose_in_quotes = None
elif arg in [
"--runs_on",
"--ssh_cmd",
"--env_activate_path",
"--allenact_path",
"--extra_tag",
"--machine_id",
]:
remove = arg
elif arg == "--config_kwargs":
enclose_in_quotes = arg
filtered_args.append(arg)
else:
filtered_args.append(arg)
return filtered_args
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text):
# Close former single, start backslash expansion (via $), create new single quote for expansion:
quote_enter = r"'$'\'"
# New closing single quote for expansion, close backslash expansion, reopen former single:
quote_leave = r"\'''"
return f"{quote_enter}{text}{quote_leave}"
def wrap_double(text):
return f'"{text}"'
def id_generator(size=4, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
# Assume we can ssh into each of the `runs_on` machines through port 22
if __name__ == "__main__":
# Tool must be called from AllenAct project's root directory
cwd = os.path.abspath(os.getcwd())
assert cwd == ABS_PATH_OF_TOP_LEVEL_DIR, (
f"`dmain.py` called from {cwd}."
f"\nIt should be called from AllenAct's top level directory {ABS_PATH_OF_TOP_LEVEL_DIR}."
)
args = get_args()
init_logging(args.log_level)
raw_args = get_raw_args()
if args.seed is None:
seed = random.randint(0, 2 ** 31 - 1)
raw_args.extend(["-s", f"{seed}"])
get_logger().info(f"Using random seed {seed} in all workers (none was given)")
all_addresses = args.runs_on.split(",")
get_logger().info(f"Running on IP addresses {all_addresses}")
assert args.distributed_ip_and_port.split(":")[0] in all_addresses, (
f"Missing listener IP address {args.distributed_ip_and_port.split(':')[0]}"
f" in list of worker addresses {all_addresses}"
)
time_str = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
global_job_id = id_generator()
killfilename = os.path.join(
os.path.expanduser("~"), ".allenact", f"{time_str}_{global_job_id}.killfile"
)
os.makedirs(os.path.dirname(killfilename), exist_ok=True)
code_src = "."
with open(killfilename, "w") as killfile:
for it, addr in enumerate(all_addresses):
code_tget = f"{addr}:{args.allenact_path}/"
get_logger().info(f"rsync {code_src} to {code_tget}")
os.system(f"rsync -rz {code_src} {code_tget}")
job_id = id_generator()
command = " ".join(
["python", "main.py"]
+ raw_args
+ [
"--extra_tag",
f"{args.extra_tag}{'__' if len(args.extra_tag) > 0 else ''}machine{it}",
]
+ ["--machine_id", f"{it}"]
)
logfile = (
f"{args.output_dir}/log_{time_str}_{global_job_id}_{job_id}_machine{it}"
)
env_and_command = wrap_single_nested(
f"for NCCL_SOCKET_IFNAME in $(route | grep default) ; do : ; done && export NCCL_SOCKET_IFNAME"
f" && cd {args.allenact_path}"
f" && mkdir -p {args.output_dir}"
f" && source {args.env_activate_path} &>> {logfile}"
f" && echo pwd=$(pwd) &>> {logfile}"
f" && echo output_dir={args.output_dir} &>> {logfile}"
f" && echo python_version=$(python --version) &>> {logfile}"
f" && echo python_path=$(which python) &>> {logfile}"
f" && set | grep NCCL_SOCKET_IFNAME &>> {logfile}"
f" && echo &>> {logfile}"
f" && {command} &>> {logfile}"
)
screen_name = f"allenact_{time_str}_{global_job_id}_{job_id}_machine{it}"
screen_command = wrap_single(
f"screen -S {screen_name} -dm bash -c {env_and_command}"
)
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {screen_command}"
get_logger().debug(f"SSH command {ssh_command}")
subprocess.run(ssh_command, shell=True, executable="/bin/bash")
get_logger().info(f"{addr} {screen_name}")
killfile.write(f"{addr} {screen_name}\n")
get_logger().info("")
get_logger().info(f"Running screen ids saved to {killfilename}")
get_logger().info("")
get_logger().info("DONE")
| ask4help-main | scripts/dmain.py |
try:
from allenact_plugins._version import __version__
except ModuleNotFoundError:
__version__ = None
| ask4help-main | allenact_plugins/__init__.py |
import glob
import os
from pathlib import Path
from setuptools import find_packages, setup
def parse_req_file(fname, initial=None):
"""Reads requires.txt file generated by setuptools and outputs a
new/updated dict of extras as keys and corresponding lists of dependencies
as values.
The input file's contents are similar to a `ConfigParser` file, e.g.
pkg_1
pkg_2
pkg_3
[extras1]
pkg_4
pkg_5
[extras2]
pkg_6
pkg_7
"""
reqs = {} if initial is None else initial
cline = None
with open(fname, "r") as f:
for line in f.readlines():
line = line[:-1].strip()
if len(line) == 0:
continue
if line[0] == "[":
# Add new key for current extras (if missing in dict)
cline = line[1:-1].strip()
if cline not in reqs:
reqs[cline] = []
else:
# Only keep dependencies from extras
if cline is not None:
reqs[cline].append(line)
return reqs
def get_version(fname):
"""Reads PKG-INFO file generated by setuptools and extracts the Version
number."""
res = "UNK"
with open(fname, "r") as f:
for line in f.readlines():
line = line[:-1]
if line.startswith("Version:"):
res = line.replace("Version:", "").strip()
break
if res in ["UNK", ""]:
raise ValueError(f"Missing Version number in {fname}")
return res
if __name__ == "__main__":
base_dir = os.path.abspath(os.path.dirname(Path(__file__)))
if not os.path.exists(
os.path.join(base_dir, "allenact_plugins.egg-info/dependency_links.txt")
):
# Build mode for sdist
# Extra dependencies required for various plugins
extras = {}
for plugin_path in glob.glob(os.path.join(base_dir, "*_plugin")):
plugin_name = os.path.basename(plugin_path).replace("_plugin", "")
extra_reqs_path = os.path.join(plugin_path, "extra_requirements.txt")
if os.path.exists(extra_reqs_path):
with open(extra_reqs_path, "r") as f:
# Filter out non-PyPI dependencies
extras[plugin_name] = [
clean_dep
for clean_dep in (dep.strip() for dep in f.readlines())
if clean_dep != ""
and not clean_dep.startswith("#")
and "@ git+https://github.com/" not in clean_dep
]
extras["all"] = sum(extras.values(), [])
os.chdir(os.path.join(base_dir, ".."))
with open(".VERSION", "r") as f:
__version__ = f.readline().strip()
else:
# Install mode from sdist
__version__ = get_version(
os.path.join(base_dir, "allenact_plugins.egg-info/PKG-INFO")
)
extras = parse_req_file(
os.path.join(base_dir, "allenact_plugins.egg-info/requires.txt")
)
setup(
name="allenact_plugins",
version=__version__,
description="Plugins for the AllenAct framework",
long_description=(
"A collection of plugins/extensions for use within the AllenAct framework."
),
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords=["reinforcement learning", "embodied-AI", "AI", "RL", "SLAM"],
url="https://github.com/allenai/allenact",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="MIT",
packages=find_packages(include=["allenact_plugins", "allenact_plugins.*"]),
install_requires=[
"gym>=0.17.0,<0.18.0",
"torch>=1.6.0,!=1.8.0,<1.9.0",
"torchvision>=0.7.0,<0.10.0",
"numpy>=1.19.1",
"wheel>=0.36.2",
f"allenact=={__version__}",
],
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-cov"],
extras_require=extras,
)
| ask4help-main | allenact_plugins/setup.py |
import os
if os.path.exists(os.path.join(os.getcwd(), "habitat", "habitat-lab")):
# Old directory structure (not recommended)
HABITAT_DATA_BASE = os.path.join(os.getcwd(), "habitat/habitat-lab/data")
else:
# New directory structure
HABITAT_DATA_BASE = os.path.join(os.getcwd(), "datasets", "habitat",)
HABITAT_DATASETS_DIR = os.path.join(HABITAT_DATA_BASE, "datasets")
HABITAT_SCENE_DATASETS_DIR = os.path.join(HABITAT_DATA_BASE, "scene_datasets")
HABITAT_CONFIGS_DIR = os.path.join(HABITAT_DATA_BASE, "configs")
MOVE_AHEAD = "MOVE_FORWARD"
ROTATE_LEFT = "TURN_LEFT"
ROTATE_RIGHT = "TURN_RIGHT"
LOOK_DOWN = "LOOK_DOWN"
LOOK_UP = "LOOK_UP"
END = "STOP"
| ask4help-main | allenact_plugins/habitat_plugin/habitat_constants.py |
from abc import ABC
from typing import Tuple, List, Dict, Any, Optional, Union, Sequence, cast
import gym
import numpy as np
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.system import get_logger
from allenact_plugins.habitat_plugin.habitat_constants import (
MOVE_AHEAD,
ROTATE_LEFT,
ROTATE_RIGHT,
END,
LOOK_UP,
LOOK_DOWN,
)
from allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment
class HabitatTask(Task[HabitatEnvironment], ABC):
def __init__(
self,
env: HabitatEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
**kwargs
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._last_action: Optional[str] = None
self._last_action_ind: Optional[int] = None
self._last_action_success: Optional[bool] = None
self._actions_taken: List[str] = []
self._positions = []
pos = self.get_observations()["agent_position_and_rotation"]
self._positions.append(
{"x": pos[0], "y": pos[1], "z": pos[2], "rotation": pos[3]}
)
ep = self.env.get_current_episode()
# Extract the scene name from the scene path and append the episode id to generate
# a globally unique episode_id
self._episode_id = ep.scene_id[-15:-4] + "_" + ep.episode_id
@property
def last_action(self):
return self._last_action
@last_action.setter
def last_action(self, value: str):
self._last_action = value
@property
def last_action_success(self):
return self._last_action_success
@last_action_success.setter
def last_action_success(self, value: Optional[bool]):
self._last_action_success = value
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
if mode == "rgb":
return self.env.current_frame["rgb"]
elif mode == "depth":
return self.env.current_frame["depth"]
else:
raise NotImplementedError()
class PointNavTask(Task[HabitatEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END)
def __init__(
self,
env: HabitatEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
failed_end_reward: float = 0.0,
**kwargs
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible = None
# Get the geodesic distance to target from the environemnt and make sure it is
# a valid value
self.last_geodesic_distance = self.current_geodesic_dist_to_target()
self.start_distance = self.last_geodesic_distance
assert self.last_geodesic_distance is not None
# noinspection PyProtectedMember
self._shortest_path_follower = ShortestPathFollower(
cast(HabitatSim, env.env.sim), env.env._config.TASK.SUCCESS_DISTANCE, False
)
self._shortest_path_follower.mode = "geodesic_path"
self._rewards: List[float] = []
self._metrics = None
self.failed_end_reward = failed_end_reward
def current_geodesic_dist_to_target(self) -> Optional[float]:
metrics = self.env.env.get_metrics()
if metrics["distance_to_goal"] is None:
habitat_env = self.env.env
habitat_env.task.measurements.update_measures(
episode=habitat_env.current_episode, action=None, task=habitat_env.task
)
metrics = self.env.env.get_metrics()
return metrics["distance_to_goal"]
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self.env.env.episode_over
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
self.env.step({"action": action_str})
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.last_action_success = self.env.last_action_success
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
return self.env.current_frame["rgb"]
def _is_goal_in_range(self) -> bool:
return (
self.current_geodesic_dist_to_target() <= self.task_info["distance_to_goal"]
)
def judge(self) -> float:
reward = -0.01
new_geodesic_distance = self.current_geodesic_dist_to_target()
if self.last_geodesic_distance is None:
self.last_geodesic_distance = new_geodesic_distance
if self.last_geodesic_distance is not None:
if (
new_geodesic_distance is None
or new_geodesic_distance in [float("-inf"), float("inf")]
or np.isnan(new_geodesic_distance)
):
new_geodesic_distance = self.last_geodesic_distance
delta_distance_reward = self.last_geodesic_distance - new_geodesic_distance
reward += delta_distance_reward
self.last_geodesic_distance = new_geodesic_distance
if self.is_done():
reward += 10.0 if self._success else self.failed_end_reward
else:
get_logger().warning("Could not get geodesic distance from habitat env.")
self._rewards.append(float(reward))
return float(reward)
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
else:
_metrics = self.env.env.get_metrics()
metrics = {
"success": 1 * self._success,
"ep_length": self.num_steps_taken(),
"reward": np.sum(self._rewards),
"spl": _metrics["spl"] if _metrics["spl"] is not None else 0.0,
"dist_to_target": self.current_geodesic_dist_to_target(),
}
self._rewards = []
return metrics
def query_expert(self, **kwargs) -> Tuple[int, bool]:
if self._is_goal_in_range():
return self.class_action_names().index(END), True
target = self.task_info["target"]
habitat_action = self._shortest_path_follower.get_next_action(target)
if habitat_action == HabitatSimActions.MOVE_FORWARD:
return self.class_action_names().index(MOVE_AHEAD), True
elif habitat_action == HabitatSimActions.TURN_LEFT:
return self.class_action_names().index(ROTATE_LEFT), True
elif habitat_action == HabitatSimActions.TURN_RIGHT:
return self.class_action_names().index(ROTATE_RIGHT), True
else:
return 0, False
class ObjectNavTask(HabitatTask):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END, LOOK_UP, LOOK_DOWN)
def __init__(
self,
env: HabitatEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
**kwargs
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible = None
# Get the geodesic distance to target from the environemnt and make sure it is
# a valid value
self.last_geodesic_distance = self.current_geodesic_dist_to_target()
assert not (
self.last_geodesic_distance is None
or self.last_geodesic_distance in [float("-inf"), float("inf")]
or np.isnan(self.last_geodesic_distance)
), "Bad geodesic distance"
self._min_distance_to_goal = self.last_geodesic_distance
self._num_invalid_actions = 0
# noinspection PyProtectedMember
self._shortest_path_follower = ShortestPathFollower(
env.env.sim, env.env._config.TASK.SUCCESS_DISTANCE, False
)
self._shortest_path_follower.mode = "geodesic_path"
self._rewards: List[float] = []
self._metrics = None
self.task_info["episode_id"] = self._episode_id
self.task_info["target_position"] = {
"x": self.task_info["target"][0],
"y": self.task_info["target"][1],
"z": self.task_info["target"][2],
}
self._coverage_map = np.zeros((150, 150))
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self.env.env.episode_over
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def action_names(self, **kwargs) -> Tuple[str, ...]:
return self._actions
def close(self) -> None:
self.env.stop()
def current_geodesic_dist_to_target(self) -> Optional[float]:
metrics = self.env.env.get_metrics()
if metrics["distance_to_goal"] is None:
habitat_env = self.env.env
habitat_env.task.measurements.update_measures(
episode=habitat_env.current_episode, action=None, task=habitat_env.task
)
metrics = self.env.env.get_metrics()
return metrics["distance_to_goal"]
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
old_pos = self.get_observations()["agent_position_and_rotation"]
action_str = self.action_names()[action]
self._actions_taken.append(action_str)
self.env.step({"action": action_str})
# if action_str != END:
# self.env.step({"action": action_str})
# if self.env.env.get_metrics()['distance_to_goal'] <= 0.2:
# self._took_end_action = True
# self._success = self.env.env.get_metrics()['distance_to_goal'] <= 0.2
# self.last_action_success = self._success
# else:
# self.last_action_success = self.env.last_action_success
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.last_action_success = self.env.last_action_success
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
new_pos = self.get_observations()["agent_position_and_rotation"]
if np.all(old_pos == new_pos):
self._num_invalid_actions += 1
pos = self.get_observations()["agent_position_and_rotation"]
self._positions.append(
{"x": pos[0], "y": pos[1], "z": pos[2], "rotation": pos[3]}
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
return self.env.current_frame["rgb"]
def _is_goal_in_range(self) -> bool:
# The habitat simulator will return an SPL value of 0.0 whenever the goal is not in range
return bool(self.env.env.get_metrics()["spl"])
def judge(self) -> float:
# Set default reward
reward = -0.01
# Get geodesic distance reward
new_geodesic_distance = self.current_geodesic_dist_to_target()
self._min_distance_to_goal = min(
new_geodesic_distance, self._min_distance_to_goal
)
if (
new_geodesic_distance is None
or new_geodesic_distance in [float("-inf"), float("inf")]
or np.isnan(new_geodesic_distance)
):
new_geodesic_distance = self.last_geodesic_distance
delta_distance_reward = self.last_geodesic_distance - new_geodesic_distance
reward += delta_distance_reward
if self._took_end_action:
reward += 10.0 if self._success else 0.0
# Get success reward
self._rewards.append(float(reward))
self.last_geodesic_distance = new_geodesic_distance
# # Get coverage reward
# pos = self.get_observations()["agent_position_and_rotation"]
# # align current position with center of map
# x = int(pos[0] + 75)
# y = int(pos[2] + 75)
# if self._coverage_map[x, y] == 0:
# self._coverage_map[x, y] = 1
# reward += 0.1
# else:
# reward -= 0.0
return float(reward)
def metrics(self) -> Dict[str, Any]:
self.task_info["taken_actions"] = self._actions_taken
self.task_info["action_names"] = self.action_names()
self.task_info["followed_path"] = self._positions
if not self.is_done():
return {}
else:
_metrics = self.env.env.get_metrics()
metrics = {
"success": self._success,
"ep_length": self.num_steps_taken(),
"total_reward": np.sum(self._rewards),
"spl": _metrics["spl"] if _metrics["spl"] is not None else 0.0,
"min_distance_to_target": self._min_distance_to_goal,
"num_invalid_actions": self._num_invalid_actions,
"task_info": self.task_info,
}
self._rewards = []
return metrics
def query_expert(self, **kwargs) -> Tuple[int, bool]:
if self._is_goal_in_range():
return self.class_action_names().index(END), True
target = self.task_info["target"]
action = self._shortest_path_follower.get_next_action(target)
return action, action is not None
| ask4help-main | allenact_plugins/habitat_plugin/habitat_tasks.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"\n\nPlease install habitat following\n\n"
"https://allenact.org/installation/installation-framework/#installation-of-habitat\n\n"
):
import habitat
import habitat_sim
| ask4help-main | allenact_plugins/habitat_plugin/__init__.py |
from typing import Any, Optional, Tuple
import gym
import numpy as np
from pyquaternion import Quaternion
from allenact.base_abstractions.sensor import Sensor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.base_abstractions.task import Task
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment
from allenact_plugins.habitat_plugin.habitat_tasks import PointNavTask # type: ignore
class RGBSensorHabitat(RGBSensor[HabitatEnvironment, Task[HabitatEnvironment]]):
# For backwards compatibility
def __init__(
self,
use_resnet_normalization: bool = False,
mean: Optional[np.ndarray] = np.array(
[[[0.485, 0.456, 0.406]]], dtype=np.float32
),
stdev: Optional[np.ndarray] = np.array(
[[[0.229, 0.224, 0.225]]], dtype=np.float32
),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "rgb",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 3,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 1.0,
scale_first: bool = True,
**kwargs: Any
):
super().__init__(**prepare_locals_for_super(locals()))
def frame_from_env(
self, env: HabitatEnvironment, task: Optional[Task[HabitatEnvironment]]
) -> np.ndarray:
return env.current_frame["rgb"].copy()
class DepthSensorHabitat(DepthSensor[HabitatEnvironment, Task[HabitatEnvironment]]):
# For backwards compatibility
def __init__(
self,
use_resnet_normalization: Optional[bool] = None,
use_normalization: Optional[bool] = None,
mean: Optional[np.ndarray] = np.array([[0.5]], dtype=np.float32),
stdev: Optional[np.ndarray] = np.array([[0.25]], dtype=np.float32),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "depth",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 1,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 5.0,
scale_first: bool = False,
**kwargs: Any
):
# Give priority to use_normalization, but use_resnet_normalization for backward compat. if not set
if use_resnet_normalization is not None and use_normalization is None:
use_normalization = use_resnet_normalization
elif use_normalization is None:
use_normalization = False
super().__init__(**prepare_locals_for_super(locals()))
def frame_from_env(
self, env: HabitatEnvironment, task: Optional[Task[HabitatEnvironment]]
) -> np.ndarray:
return env.current_frame["depth"].copy()
class TargetCoordinatesSensorHabitat(Sensor[HabitatEnvironment, PointNavTask]):
def __init__(
self, coordinate_dims: int, uuid: str = "target_coordinates_ind", **kwargs: Any
):
self.coordinate_dims = coordinate_dims
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
# Distance is a non-negative real and angle is normalized to the range (-Pi, Pi] or [-Pi, Pi)
return gym.spaces.Box(
np.float32(-3.15), np.float32(1000), shape=(self.coordinate_dims,)
)
def get_observation(
self,
env: HabitatEnvironment,
task: Optional[PointNavTask],
*args: Any,
**kwargs: Any
) -> Any:
frame = env.current_frame
goal = frame["pointgoal_with_gps_compass"]
return goal
class TargetObjectSensorHabitat(Sensor[HabitatEnvironment, PointNavTask]):
def __init__(self, uuid: str = "target_object_id", **kwargs: Any):
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Discrete(38)
def get_observation(
self,
env: HabitatEnvironment,
task: Optional[PointNavTask],
*args: Any,
**kwargs: Any
) -> Any:
frame = env.current_frame
goal = frame["objectgoal"][0]
return goal
class AgentCoordinatesSensorHabitat(Sensor[HabitatEnvironment, PointNavTask]):
def __init__(self, uuid: str = "agent_position_and_rotation", **kwargs: Any):
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(np.float32(-1000), np.float32(1000), shape=(4,))
def get_observation(
self,
env: HabitatEnvironment,
task: Optional[PointNavTask],
*args: Any,
**kwargs: Any
) -> Any:
position = env.env.sim.get_agent_state().position
quaternion = Quaternion(env.env.sim.get_agent_state().rotation.components)
return np.array([position[0], position[1], position[2], quaternion.radians])
| ask4help-main | allenact_plugins/habitat_plugin/habitat_sensors.py |
"""A wrapper for interacting with the Habitat environment."""
from typing import Dict, Union, List, Optional
import habitat
import numpy as np
from habitat.config import Config
from habitat.core.dataset import Dataset
from habitat.core.simulator import Observations, AgentState, ShortestPathPoint
from habitat.tasks.nav.nav import NavigationEpisode as HabitatNavigationEpisode
from allenact.utils.cache_utils import DynamicDistanceCache
from allenact.utils.system import get_logger
class HabitatEnvironment(object):
def __init__(self, config: Config, dataset: Dataset, x_display: str = None) -> None:
# print("habitat_plugin env constructor")
self.x_display = x_display
self.env = habitat.Env(config=config, dataset=dataset)
# Set the target to a random goal from the provided list for this episode
self.goal_index = 0
self.last_geodesic_distance = None
self.distance_cache = DynamicDistanceCache(rounding=1)
self._current_frame: Optional[np.ndarray] = None
@property
def scene_name(self) -> str:
return self.env.current_episode.scene_id
@property
def current_frame(self) -> np.ndarray:
assert self._current_frame is not None
return self._current_frame
def step(self, action_dict: Dict[str, Union[str, int]]) -> Observations:
obs = self.env.step(action_dict["action"])
self._current_frame = obs
return obs
# def get_distance_to_target(self) -> float:
# curr = self.get_location()
# goal = self.get_current_episode().goals[0].view_points[0].agent_state.position
# return self.env.sim.geodesic_distance(curr, goal)
def get_location(self) -> Optional[np.ndarray]:
return self.env.sim.get_agent_state().position
def get_rotation(self) -> Optional[List[float]]:
return self.env.sim.get_agent_state().rotation
def get_shortest_path(
self, source_state: AgentState, target_state: AgentState,
) -> List[ShortestPathPoint]:
return self.env.sim.action_space_shortest_path(source_state, [target_state])
def get_current_episode(self) -> HabitatNavigationEpisode:
return self.env.current_episode # type: ignore
# noinspection PyMethodMayBeStatic
def start(self):
get_logger().debug("No need to start a habitat_plugin env")
def stop(self):
self.env.close()
def reset(self):
self._current_frame = self.env.reset()
@property
def last_action_success(self) -> bool:
# For now we can not have failure of actions
return True
@property
def num_episodes(self) -> int:
ep_iterator = self.env.episode_iterator
assert isinstance(ep_iterator, habitat.core.dataset.EpisodeIterator)
return len(ep_iterator.episodes)
| ask4help-main | allenact_plugins/habitat_plugin/habitat_environment.py |
from typing import Any
from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor
from allenact.utils.system import get_logger
class ResnetPreProcessorHabitat(ResNetPreprocessor):
"""Preprocess RGB or depth image using a ResNet model."""
def __init__(self, *args, **kwargs: Any):
super().__init__(*args, **kwargs)
get_logger().warning(
"`ResnetPreProcessorHabitat` is deprecated, use `ResNetPreprocessor` instead."
)
| ask4help-main | allenact_plugins/habitat_plugin/habitat_preprocessors.py |
from typing import List, Optional, Union, Callable
import gym
import habitat
from habitat.config import Config
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment
from allenact_plugins.habitat_plugin.habitat_tasks import PointNavTask, ObjectNavTask # type: ignore
class PointNavTaskSampler(TaskSampler):
def __init__(
self,
env_config: Config,
sensors: List[Sensor],
max_steps: int,
action_space: gym.Space,
distance_to_goal: float,
filter_dataset_func: Optional[
Callable[[habitat.Dataset], habitat.Dataset]
] = None,
**task_init_kwargs,
) -> None:
self.grid_size = 0.25
self.env: Optional[HabitatEnvironment] = None
self.max_tasks: Optional[int] = None
self.reset_tasks: Optional[int] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.env_config = env_config
self.distance_to_goal = distance_to_goal
self.seed: Optional[int] = None
self.filter_dataset_func = filter_dataset_func
self._last_sampled_task: Optional[PointNavTask] = None
self.task_init_kwargs = task_init_kwargs
def _create_environment(self) -> HabitatEnvironment:
dataset = habitat.make_dataset(
self.env_config.DATASET.TYPE, config=self.env_config.DATASET
)
if len(dataset.episodes) == 0:
raise RuntimeError("Empty input dataset.")
if self.filter_dataset_func is not None:
dataset = self.filter_dataset_func(dataset)
if len(dataset.episodes) == 0:
raise RuntimeError("Empty dataset after filtering.")
env = HabitatEnvironment(config=self.env_config, dataset=dataset)
self.max_tasks = (
None if self.env_config.MODE == "train" else env.num_episodes
) # env.num_episodes
self.reset_tasks = self.max_tasks
return env
@property
def length(self) -> Union[int, float]:
"""
@return: Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Union[int, float, None]:
return self.env.num_episodes
@property
def last_sampled_task(self) -> Optional[PointNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""
@return: True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def next_task(self, force_advance_scene=False) -> Optional[PointNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.env is not None:
self.env.reset()
else:
self.env = self._create_environment()
self.env.reset()
ep_info = self.env.get_current_episode()
target = ep_info.goals[0].position
task_info = {
"target": target,
"distance_to_goal": self.distance_to_goal,
}
self._last_sampled_task = PointNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
**self.task_init_kwargs,
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self._last_sampled_task
def reset(self):
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
self.env.env.seed(seed)
class ObjectNavTaskSampler(TaskSampler):
def __init__(
self,
env_config: Config,
sensors: List[Sensor],
max_steps: int,
action_space: gym.Space,
distance_to_goal: float,
**kwargs,
) -> None:
self.grid_size = 0.25
self.env: Optional[HabitatEnvironment] = None
self.max_tasks: Optional[int] = None
self.reset_tasks: Optional[int] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.env_config = env_config
self.distance_to_goal = distance_to_goal
self.seed: Optional[int] = None
self._last_sampled_task: Optional[ObjectNavTask] = None
def _create_environment(self) -> HabitatEnvironment:
dataset = habitat.make_dataset(
self.env_config.DATASET.TYPE, config=self.env_config.DATASET
)
env = HabitatEnvironment(config=self.env_config, dataset=dataset)
self.max_tasks = (
None if self.env_config.MODE == "train" else env.num_episodes
) # mp3d objectnav val -> 2184
self.reset_tasks = self.max_tasks
return env
@property
def length(self) -> Union[int, float]:
"""
@return: Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Union[int, float, None]:
return self.env.num_episodes
@property
def last_sampled_task(self) -> Optional[ObjectNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""
@return: True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def next_task(self, force_advance_scene=False) -> Optional[ObjectNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.env is not None:
self.env.reset()
else:
self.env = self._create_environment()
self.env.reset()
ep_info = self.env.get_current_episode()
target = ep_info.goals[0].position
task_info = {
"target": target,
"distance_to_goal": self.distance_to_goal,
}
self._last_sampled_task = ObjectNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self._last_sampled_task
def reset(self):
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
self.env.env.seed(seed)
| ask4help-main | allenact_plugins/habitat_plugin/habitat_task_samplers.py |
import glob
import os
import shutil
from typing import List
import habitat
from habitat import Config
from allenact.utils.system import get_logger
from allenact_plugins.habitat_plugin.habitat_constants import (
HABITAT_DATA_BASE,
HABITAT_CONFIGS_DIR,
)
def construct_env_configs(
config: Config, allow_scene_repeat: bool = False,
) -> List[Config]:
"""Create list of Habitat Configs for training on multiple processes To
allow better performance, dataset are split into small ones for each
individual env, grouped by scenes.
# Parameters
config : configs that contain num_processes as well as information
necessary to create individual environments.
allow_scene_repeat: if `True` and the number of distinct scenes
in the dataset is less than the total number of processes this will
result in scenes being repeated across processes. If `False`, then
if the total number of processes is greater than the number of scenes,
this will result in a RuntimeError exception being raised.
# Returns
List of Configs, one for each process.
"""
config.freeze()
num_processes = config.NUM_PROCESSES
configs = []
dataset = habitat.make_dataset(config.DATASET.TYPE)
scenes = dataset.get_scenes_to_load(config.DATASET)
if len(scenes) > 0:
if len(scenes) < num_processes:
if not allow_scene_repeat:
raise RuntimeError(
"reduce the number of processes as there aren't enough number of scenes."
)
else:
scenes = (scenes * (1 + (num_processes // len(scenes))))[:num_processes]
scene_splits: List[List] = [[] for _ in range(num_processes)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_processes):
task_config = config.clone()
task_config.defrost()
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[i]
if len(config.SIMULATOR_GPU_IDS) == 0:
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = -1
else:
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = config.SIMULATOR_GPU_IDS[
i % len(config.SIMULATOR_GPU_IDS)
]
task_config.freeze()
configs.append(task_config.clone())
return configs
def construct_env_configs_mp3d(config: Config) -> List[Config]:
r"""Create list of Habitat Configs for training on multiple processes
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
Args:
config: configs that contain num_processes as well as information
necessary to create individual environments.
Returns:
List of Configs, one for each process
"""
config.freeze()
num_processes = config.NUM_PROCESSES
configs = []
# dataset = habitat.make_dataset(config.DATASET.TYPE)
# scenes = dataset.get_scenes_to_load(config.DATASET)
if num_processes == 1:
scene_splits = [["pRbA3pwrgk9"]]
else:
small = [
"rPc6DW4iMge",
"e9zR4mvMWw7",
"uNb9QFRL6hY",
"qoiz87JEwZ2",
"sKLMLpTHeUy",
"s8pcmisQ38h",
"759xd9YjKW5",
"XcA2TqTSSAj",
"SN83YJsR3w2",
"8WUmhLawc2A",
"JeFG25nYj2p",
"17DRP5sb8fy",
"Uxmj2M2itWa",
"XcA2TqTSSAj",
"SN83YJsR3w2",
"8WUmhLawc2A",
"JeFG25nYj2p",
"17DRP5sb8fy",
"Uxmj2M2itWa",
"D7N2EKCX4Sj",
"b8cTxDM8gDG",
"sT4fr6TAbpF",
"S9hNv5qa7GM",
"82sE5b5pLXE",
"pRbA3pwrgk9",
"aayBHfsNo7d",
"cV4RVeZvu5T",
"i5noydFURQK",
"YmJkqBEsHnH",
"jh4fc5c5qoQ",
"VVfe2KiqLaN",
"29hnd4uzFmX",
"Pm6F8kyY3z2",
"JF19kD82Mey",
"GdvgFV5R1Z5",
"HxpKQynjfin",
"vyrNrziPKCB",
]
med = [
"V2XKFyX4ASd",
"VFuaQ6m2Qom",
"ZMojNkEp431",
"5LpN3gDmAk7",
"r47D5H71a5s",
"ULsKaCPVFJR",
"E9uDoFAP3SH",
"kEZ7cmS4wCh",
"ac26ZMwG7aT",
"dhjEzFoUFzH",
"mJXqzFtmKg4",
"p5wJjkQkbXX",
"Vvot9Ly1tCj",
"EDJbREhghzL",
"VzqfbhrpDEA",
"7y3sRwLe3Va",
]
scene_splits = [[] for _ in range(config.NUM_PROCESSES)]
distribute(
small,
scene_splits,
num_gpus=8,
procs_per_gpu=3,
proc_offset=1,
scenes_per_process=2,
)
distribute(
med,
scene_splits,
num_gpus=8,
procs_per_gpu=3,
proc_offset=0,
scenes_per_process=1,
)
# gpu0 = [['pRbA3pwrgk9', '82sE5b5pLXE', 'S9hNv5qa7GM'],
# ['Uxmj2M2itWa', '17DRP5sb8fy', 'JeFG25nYj2p'],
# ['5q7pvUzZiYa', '759xd9YjKW5', 's8pcmisQ38h'],
# ['e9zR4mvMWw7', 'rPc6DW4iMge', 'vyrNrziPKCB']]
# gpu1 = [['sT4fr6TAbpF', 'b8cTxDM8gDG', 'D7N2EKCX4Sj'],
# ['8WUmhLawc2A', 'SN83YJsR3w2', 'XcA2TqTSSAj'],
# ['sKLMLpTHeUy', 'qoiz87JEwZ2', 'uNb9QFRL6hY'],
# ['V2XKFyX4ASd', 'VFuaQ6m2Qom', 'ZMojNkEp431']]
# gpu2 = [['5LpN3gDmAk7', 'r47D5H71a5s', 'ULsKaCPVFJR', 'E9uDoFAP3SH'],
# ['VVfe2KiqLaN', 'jh4fc5c5qoQ', 'YmJkqBEsHnH'], # small
# ['i5noydFURQK', 'cV4RVeZvu5T', 'aayBHfsNo7d']] # small
# gpu3 = [['kEZ7cmS4wCh', 'ac26ZMwG7aT', 'dhjEzFoUFzH'],
# ['mJXqzFtmKg4', 'p5wJjkQkbXX', 'Vvot9Ly1tCj']]
# gpu4 = [['EDJbREhghzL', 'VzqfbhrpDEA', '7y3sRwLe3Va'],
# ['ur6pFq6Qu1A', 'PX4nDJXEHrG', 'PuKPg4mmafe']]
# gpu5 = [['r1Q1Z4BcV1o', 'gTV8FGcVJC9', '1pXnuDYAj8r'],
# ['JF19kD82Mey', 'Pm6F8kyY3z2', '29hnd4uzFmX']] # small
# gpu6 = [['VLzqgDo317F', '1LXtFkjw3qL'],
# ['HxpKQynjfin', 'gZ6f7yhEvPG', 'GdvgFV5R1Z5']] # small
# gpu7 = [['D7G3Y4RVNrH', 'B6ByNegPMKs']]
#
# scene_splits = gpu0 + gpu1 + gpu2 + gpu3 + gpu4 + gpu5 + gpu6 + gpu7
for i in range(num_processes):
task_config = config.clone()
task_config.defrost()
task_config.DATASET.CONTENT_SCENES = scene_splits[i]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = config.SIMULATOR_GPU_IDS[
i % len(config.SIMULATOR_GPU_IDS)
]
task_config.freeze()
configs.append(task_config.clone())
return configs
def distribute(
data: List[str],
scene_splits: List[List],
num_gpus=8,
procs_per_gpu=4,
proc_offset=0,
scenes_per_process=1,
) -> None:
for idx, scene in enumerate(data):
i = (idx // num_gpus) % scenes_per_process
j = idx % num_gpus
scene_splits[j * procs_per_gpu + i + proc_offset].append(scene)
def get_habitat_config(path: str, allow_download: bool = True):
assert (
path[-4:].lower() == ".yml" or path[-5:].lower() == ".yaml"
), f"path ({path}) must be a .yml or .yaml file."
if not os.path.exists(path):
if not allow_download:
raise IOError(
"Path {} does not exist and we do not wish to try downloading it."
)
get_logger().info(
f"Attempting to load config at path {path}. This path does not exist, attempting to"
f"download habitat configs and will try again. Downloading..."
)
os.chdir(HABITAT_DATA_BASE)
output_archive_name = "__TO_OVERWRITE__.zip"
deletable_dir_name = "__TO_DELETE__"
url = "https://github.com/facebookresearch/habitat-lab/archive/7c4286653211bbfaca59d0807c28bfb3a6b962bf.zip"
cmd = f"wget {url} -O {output_archive_name}"
if os.system(cmd):
raise RuntimeError(f"ERROR: `{cmd}` failed.")
cmd = f"unzip {output_archive_name} -d {deletable_dir_name}"
if os.system(cmd):
raise RuntimeError(f"ERROR: `{cmd}` failed.")
habitat_path = glob.glob(os.path.join(deletable_dir_name, "habitat-lab*"))[0]
cmd = f"rsync --ignore-existing -raz {habitat_path}/configs/ {HABITAT_CONFIGS_DIR}/"
if os.system(cmd):
raise RuntimeError(f"ERROR: `{cmd}` failed.")
os.remove(output_archive_name)
shutil.rmtree(deletable_dir_name)
if not os.path.exists(path):
raise RuntimeError(
f"Config at path {path} does not exist even after downloading habitat configs to {HABITAT_CONFIGS_DIR}."
)
else:
get_logger().info(f"Config downloaded successfully.")
return habitat.get_config(path)
| ask4help-main | allenact_plugins/habitat_plugin/habitat_utils.py |
ask4help-main | allenact_plugins/habitat_plugin/configs/__init__.py |
|
import os
import cv2
import habitat
from pyquaternion import Quaternion
from allenact_plugins.habitat_plugin.habitat_constants import (
HABITAT_CONFIGS_DIR,
HABITAT_DATASETS_DIR,
HABITAT_SCENE_DATASETS_DIR,
)
from allenact_plugins.habitat_plugin.habitat_utils import get_habitat_config
FORWARD_KEY = "w"
LEFT_KEY = "a"
RIGHT_KEY = "d"
FINISH = "f"
def transform_rgb_bgr(image):
return image[:, :, [2, 1, 0]]
def agent_demo():
config = get_habitat_config(
os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav.yaml")
)
config.defrost()
config.DATASET.DATA_PATH = os.path.join(
HABITAT_DATASETS_DIR, "pointnav/gibson/v1/train/train.json.gz"
)
config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR
config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = 0
config.SIMULATOR.TURN_ANGLE = 45
config.freeze()
env = habitat.Env(config=config)
print("Environment creation successful")
observations = env.reset()
cv2.imshow("RGB", transform_rgb_bgr(observations["rgb"]))
print("Agent stepping around inside environment.")
count_steps = 0
action = None
while not env.episode_over:
keystroke = cv2.waitKey(0)
if keystroke == ord(FORWARD_KEY):
action = 1
print("action: FORWARD")
elif keystroke == ord(LEFT_KEY):
action = 2
print("action: LEFT")
elif keystroke == ord(RIGHT_KEY):
action = 3
print("action: RIGHT")
elif keystroke == ord(FINISH):
action = 0
print("action: FINISH")
else:
print("INVALID KEY")
continue
observations = env.step(action)
count_steps += 1
print("Position:", env.sim.get_agent_state().position)
print("Quaternions:", env.sim.get_agent_state().rotation)
quat = Quaternion(env.sim.get_agent_state().rotation.components)
print(quat.radians)
cv2.imshow("RGB", transform_rgb_bgr(observations["rgb"]))
print("Episode finished after {} steps.".format(count_steps))
if action == habitat.SimulatorActions.STOP and observations["pointgoal"][0] < 0.2:
print("you successfully navigated to destination point")
else:
print("your navigation was unsuccessful")
if __name__ == "__main__":
agent_demo()
| ask4help-main | allenact_plugins/habitat_plugin/scripts/agent_demo.py |
ask4help-main | allenact_plugins/habitat_plugin/scripts/__init__.py |
|
import os
import habitat
import numpy as np
from tqdm import tqdm
from allenact_plugins.habitat_plugin.habitat_constants import (
HABITAT_CONFIGS_DIR,
HABITAT_DATA_BASE,
HABITAT_SCENE_DATASETS_DIR,
HABITAT_DATASETS_DIR,
)
from allenact_plugins.habitat_plugin.habitat_utils import get_habitat_config
map_resolution = 0.05
map_size = 960
def make_map(env, scene):
vacancy_map = np.zeros([map_size, map_size], dtype=bool)
for i in tqdm(range(map_size)):
for j in range(map_size):
x = (i - map_size // 2) * map_resolution
z = (j - map_size // 2) * map_resolution
vacancy_map[j, i] = env.sim.is_navigable([x, 0.0, z])
np.save(
os.path.join(HABITAT_DATA_BASE, "map_data/pointnav/v1/gibson/data/" + scene),
vacancy_map,
)
def generate_maps():
config = get_habitat_config(
os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav.yaml")
)
config.defrost()
config.DATASET.DATA_PATH = os.path.join(
HABITAT_DATASETS_DIR, "pointnav/gibson/v1/train/train.json.gz"
)
config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR
config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = 0
config.freeze()
dataset = habitat.make_dataset(config.DATASET.TYPE)
scenes = dataset.get_scenes_to_load(config.DATASET)
for scene in scenes:
print("Making environment for:", scene)
config.defrost()
config.DATASET.CONTENT_SCENES = [scene]
config.freeze()
env = habitat.Env(config=config)
make_map(env, scene)
env.close()
if __name__ == "__main__":
generate_maps()
| ask4help-main | allenact_plugins/habitat_plugin/scripts/make_map.py |
ask4help-main | allenact_plugins/habitat_plugin/data/__init__.py |
|
from typing import Optional, Tuple, cast
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
Memory,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, DistributionType
class LinearAdvisorActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
ensure_same_init_aux_weights: bool = True,
):
super().__init__(action_space=action_space, observation_space=observation_space)
assert (
input_uuid in observation_space.spaces
), "LinearActorCritic expects only a single observational input."
self.input_uuid = input_uuid
box_space: gym.spaces.Box = observation_space[self.input_uuid]
assert isinstance(box_space, gym.spaces.Box), (
"LinearActorCritic requires that"
"observation space corresponding to the input key is a Box space."
)
assert len(box_space.shape) == 1
self.in_dim = box_space.shape[0]
self.num_actions = action_space.n
self.linear = nn.Linear(self.in_dim, 2 * self.num_actions + 1)
nn.init.orthogonal_(self.linear.weight)
if ensure_same_init_aux_weights:
# Ensure main actor / auxiliary actor start with the same weights
self.linear.weight.data[self.num_actions : -1, :] = self.linear.weight[
: self.num_actions, :
]
nn.init.constant_(self.linear.bias, 0)
# noinspection PyMethodMayBeStatic
def _recurrent_memory_specification(self):
return None
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
out = self.linear(cast(torch.Tensor, observations[self.input_uuid]))
main_logits = out[..., : self.num_actions]
aux_logits = out[..., self.num_actions : -1]
values = out[..., -1:]
# noinspection PyArgumentList
return (
ActorCriticOutput(
distributions=cast(
DistributionType, CategoricalDistr(logits=main_logits)
), # step x sampler x ...
values=cast(
torch.FloatTensor, values.view(values.shape[:2] + (-1,))
), # step x sampler x flattened
extras={"auxiliary_distributions": CategoricalDistr(logits=aux_logits)},
),
None,
)
| ask4help-main | allenact_plugins/lighthouse_plugin/lighthouse_models.py |
import copy
import curses
import itertools
import time
from functools import lru_cache
from typing import Optional, Tuple, Any, List, Union, cast
import numpy as np
from gym.utils import seeding
from gym_minigrid import minigrid
EMPTY = 0
GOAL = 1
WRONG_CORNER = 2
WALL = 3
@lru_cache(1000)
def _get_world_corners(world_dim: int, world_radius: int):
if world_radius == 0:
return ((0,) * world_dim,)
def combination_to_vec(comb) -> Tuple[int, ...]:
vec = [world_radius] * world_dim
for k in comb:
vec[k] *= -1
return tuple(vec)
return tuple(
sorted(
combination_to_vec(comb)
for i in range(world_dim + 1)
for comb in itertools.combinations(list(range(world_dim)), i)
)
)
@lru_cache(1000)
def _base_world_tensor(world_dim: int, world_radius: int):
tensor = np.full((2 * world_radius + 1,) * world_dim, fill_value=EMPTY)
slices: List[Union[slice, int]] = [slice(0, 2 * world_radius + 1)] * world_dim
for i in range(world_dim):
tmp_slices = [*slices]
tmp_slices[i] = 0
tensor[tuple(tmp_slices)] = WALL
tmp_slices[i] = 2 * world_radius
tensor[tuple(tmp_slices)] = WALL
for corner in _get_world_corners(world_dim=world_dim, world_radius=world_radius):
tensor[tuple([loc + world_radius for loc in corner])] = WRONG_CORNER
return tensor
class LightHouseEnvironment(object):
EMPTY = 0
GOAL = 1
WRONG_CORNER = 2
WALL = 3
SPACE_LEVELS = [EMPTY, GOAL, WRONG_CORNER, WALL]
def __init__(self, world_dim: int, world_radius: int, **kwargs):
self.world_dim = world_dim
self.world_radius = world_radius
self.world_corners = np.array(
_get_world_corners(world_dim=world_dim, world_radius=world_radius),
dtype=int,
)
self.curses_screen: Optional[Any] = None
self.world_tensor: np.ndarray = copy.deepcopy(
_base_world_tensor(world_radius=world_radius, world_dim=world_dim)
)
self.current_position = np.zeros(world_dim, dtype=int)
self.closest_distance_to_corners = np.full(
2 ** world_dim, fill_value=world_radius, dtype=int
)
self.positions: List[Tuple[int, ...]] = [tuple(self.current_position)]
self.goal_position: Optional[np.ndarray] = None
self.last_action: Optional[int] = None
self.seed: Optional[int] = None
self.np_seeded_random_gen: Optional[np.random.RandomState] = None
self.set_seed(seed=int(kwargs.get("seed", np.random.randint(0, 2 ** 31 - 1))))
self.random_reset()
def set_seed(self, seed: int):
# More information about why `np_seeded_random_gen` is used rather than just `np.random.seed`
# can be found at gym/utils/seeding.py
# There's literature indicating that having linear correlations between seeds of multiple
# PRNG's can correlate the outputs
self.seed = seed
self.np_seeded_random_gen, _ = cast(
Tuple[np.random.RandomState, Any], seeding.np_random(self.seed)
)
def random_reset(self, goal_position: Optional[bool] = None):
self.last_action = None
self.world_tensor = copy.deepcopy(
_base_world_tensor(world_radius=self.world_radius, world_dim=self.world_dim)
)
if goal_position is None:
self.goal_position = self.world_corners[
self.np_seeded_random_gen.randint(low=0, high=len(self.world_corners))
]
self.world_tensor[
tuple(cast(np.ndarray, self.world_radius + self.goal_position))
] = GOAL
if self.curses_screen is not None:
curses.nocbreak()
self.curses_screen.keypad(False)
curses.echo()
curses.endwin()
self.curses_screen = None
self.current_position = np.zeros(self.world_dim, dtype=int)
self.closest_distance_to_corners = np.abs(
(self.world_corners - self.current_position.reshape(1, -1))
).max(1)
self.positions = [tuple(self.current_position)]
def step(self, action: int) -> bool:
assert 0 <= action < 2 * self.world_dim
self.last_action = action
delta = -1 if action >= self.world_dim else 1
ind = action % self.world_dim
old = self.current_position[ind]
new = min(max(delta + old, -self.world_radius), self.world_radius)
if new == old:
self.positions.append(self.positions[-1])
return False
else:
self.current_position[ind] = new
self.closest_distance_to_corners = np.minimum(
np.abs((self.world_corners - self.current_position.reshape(1, -1))).max(
1
),
self.closest_distance_to_corners,
)
self.positions.append(tuple(self.current_position))
return True
def render(self, mode="array", **kwargs):
if mode == "array":
arr = copy.deepcopy(self.world_tensor)
arr[tuple(self.world_radius + self.current_position)] = 9
return arr
elif mode == "curses":
if self.world_dim == 1:
space_list = ["_"] * (1 + 2 * self.world_radius)
goal_ind = self.goal_position[0] + self.world_radius
space_list[goal_ind] = "G"
space_list[2 * self.world_radius - goal_ind] = "W"
space_list[self.current_position[0] + self.world_radius] = "X"
to_print = " ".join(space_list)
if self.curses_screen is None:
self.curses_screen = curses.initscr()
self.curses_screen.addstr(0, 0, to_print)
if "extra_text" in kwargs:
self.curses_screen.addstr(1, 0, kwargs["extra_text"])
self.curses_screen.refresh()
elif self.world_dim == 2:
space_list = [
["_"] * (1 + 2 * self.world_radius)
for _ in range(1 + 2 * self.world_radius)
]
for row_ind in range(1 + 2 * self.world_radius):
for col_ind in range(1 + 2 * self.world_radius):
if self.world_tensor[row_ind][col_ind] == self.GOAL:
space_list[row_ind][col_ind] = "G"
if self.world_tensor[row_ind][col_ind] == self.WRONG_CORNER:
space_list[row_ind][col_ind] = "C"
if self.world_tensor[row_ind][col_ind] == self.WALL:
space_list[row_ind][col_ind] = "W"
if (
(row_ind, col_ind)
== self.world_radius + self.current_position
).all():
space_list[row_ind][col_ind] = "X"
if self.curses_screen is None:
self.curses_screen = curses.initscr()
for i, sl in enumerate(space_list):
self.curses_screen.addstr(i, 0, " ".join(sl))
self.curses_screen.addstr(len(space_list), 0, str(self.state()))
if "extra_text" in kwargs:
self.curses_screen.addstr(
len(space_list) + 1, 0, kwargs["extra_text"]
)
self.curses_screen.refresh()
else:
raise NotImplementedError("Cannot render worlds of > 2 dimensions.")
elif mode == "minigrid":
height = width = 2 * self.world_radius + 2
grid = minigrid.Grid(width, height)
# Generate the surrounding walls
grid.horz_wall(0, 0)
grid.horz_wall(0, height - 1)
grid.vert_wall(0, 0)
grid.vert_wall(width - 1, 0)
# Place fake agent at the center
agent_pos = np.array(self.positions[-1]) + 1 + self.world_radius
# grid.set(*agent_pos, None)
agent = minigrid.Goal()
agent.color = "red"
grid.set(agent_pos[0], agent_pos[1], agent)
agent.init_pos = tuple(agent_pos)
agent.cur_pos = tuple(agent_pos)
goal_pos = self.goal_position + self.world_radius
goal = minigrid.Goal()
grid.set(goal_pos[0], goal_pos[1], goal)
goal.init_pos = tuple(goal_pos)
goal.cur_pos = tuple(goal_pos)
highlight_mask = np.zeros((height, width), dtype=bool)
minx, maxx = max(1, agent_pos[0] - 5), min(height - 1, agent_pos[0] + 5)
miny, maxy = max(1, agent_pos[1] - 5), min(height - 1, agent_pos[1] + 5)
highlight_mask[minx : (maxx + 1), miny : (maxy + 1)] = True
img = grid.render(
minigrid.TILE_PIXELS, agent_pos, None, highlight_mask=highlight_mask
)
return img
else:
raise NotImplementedError("Unknown render mode {}.".format(mode))
time.sleep(0.0 if "sleep_time" not in kwargs else kwargs["sleep_time"])
def close(self):
if self.curses_screen is not None:
curses.nocbreak()
self.curses_screen.keypad(False)
curses.echo()
curses.endwin()
@staticmethod
def optimal_ave_ep_length(world_dim: int, world_radius: int, view_radius: int):
if world_dim == 1:
max_steps_wrong_dir = max(world_radius - view_radius, 0)
return max_steps_wrong_dir + world_radius
elif world_dim == 2:
tau = 2 * (world_radius - view_radius)
average_steps_needed = 0.25 * (4 * 2 * view_radius + 10 * tau)
return average_steps_needed
else:
raise NotImplementedError(
"`optimal_average_ep_length` is only implemented"
" for when the `world_dim` is 1 or 2 ({} given).".format(world_dim)
)
| ask4help-main | allenact_plugins/lighthouse_plugin/lighthouse_environment.py |
import abc
import string
from typing import List, Dict, Any, Optional, Tuple, Union, Sequence, cast
import gym
import numpy as np
from gym.utils import seeding
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.experiment_utils import set_seed
from allenact.utils.system import get_logger
from allenact_plugins.lighthouse_plugin.lighthouse_environment import (
LightHouseEnvironment,
)
from allenact_plugins.lighthouse_plugin.lighthouse_sensors import get_corner_observation
DISCOUNT_FACTOR = 0.99
STEP_PENALTY = -0.01
FOUND_TARGET_REWARD = 1.0
class LightHouseTask(Task[LightHouseEnvironment], abc.ABC):
"""Defines an abstract embodied task in the light house gridworld.
# Attributes
env : The light house environment.
sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer.
task_info : Dictionary of (k, v) pairs defining task goals and other task information.
max_steps : The maximum number of steps an agent can take an in the task before it is considered failed.
observation_space: The observation space returned on each step from the sensors.
"""
def __init__(
self,
env: LightHouseEnvironment,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._last_action: Optional[int] = None
@property
def last_action(self) -> int:
return self._last_action
@last_action.setter
def last_action(self, value: int):
self._last_action = value
def step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
self.last_action = action
return super(LightHouseTask, self).step(action=action)
def render(self, mode: str = "array", *args, **kwargs) -> np.ndarray:
if mode == "array":
return self.env.render(mode, **kwargs)
elif mode in ["rgb", "rgb_array", "human"]:
arr = self.env.render("array", **kwargs)
colors = np.array(
[
(31, 119, 180),
(255, 127, 14),
(44, 160, 44),
(214, 39, 40),
(148, 103, 189),
(140, 86, 75),
(227, 119, 194),
(127, 127, 127),
(188, 189, 34),
(23, 190, 207),
],
dtype=np.uint8,
)
return colors[arr]
else:
raise NotImplementedError("Render mode '{}' is not supported.".format(mode))
class FindGoalLightHouseTask(LightHouseTask):
_CACHED_ACTION_NAMES: Dict[int, Tuple[str, ...]] = {}
def __init__(
self,
env: LightHouseEnvironment,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
):
super().__init__(env, sensors, task_info, max_steps, **kwargs)
self._found_target = False
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(2 * self.env.world_dim)
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
self.env.step(action)
reward = STEP_PENALTY
if np.all(self.env.current_position == self.env.goal_position):
self._found_target = True
reward += FOUND_TARGET_REWARD
elif self.num_steps_taken() == self.max_steps - 1:
reward = STEP_PENALTY / (1 - DISCOUNT_FACTOR)
return RLStepResult(
observation=self.get_observations(),
reward=reward,
done=self.is_done(),
info=None,
)
def reached_terminal_state(self) -> bool:
return self._found_target
@classmethod
def class_action_names(cls, world_dim: int = 2, **kwargs) -> Tuple[str, ...]:
assert 1 <= world_dim <= 26, "Too many dimensions."
if world_dim not in cls._CACHED_ACTION_NAMES:
action_names = [
"{}(+1)".format(string.ascii_lowercase[i] for i in range(world_dim))
]
action_names.extend(
"{}(-1)".format(string.ascii_lowercase[i] for i in range(world_dim))
)
cls._CACHED_ACTION_NAMES[world_dim] = tuple(action_names)
return cls._CACHED_ACTION_NAMES[world_dim]
def action_names(self) -> Tuple[str, ...]:
return self.class_action_names(world_dim=self.env.world_dim)
def close(self) -> None:
pass
def query_expert(
self,
expert_view_radius: int,
return_policy: bool = False,
deterministic: bool = False,
**kwargs,
) -> Tuple[Any, bool]:
view_tuple = get_corner_observation(
env=self.env, view_radius=expert_view_radius, view_corner_offsets=None,
)
goal = self.env.GOAL
wrong = self.env.WRONG_CORNER
if self.env.world_dim == 1:
left_view, right_view, hitting, last_action = view_tuple
left = 1
right = 0
expert_action: Optional[int] = None
policy: Optional[np.ndarray] = None
if left_view == goal:
expert_action = left
elif right_view == goal:
expert_action = right
elif hitting != 2 * self.env.world_dim:
expert_action = left if last_action == right else right
elif left_view == wrong:
expert_action = right
elif right_view == wrong:
expert_action = left
elif last_action == 2 * self.env.world_dim:
policy = np.array([0.5, 0.5])
else:
expert_action = last_action
if policy is None:
policy = np.array([expert_action == right, expert_action == left])
elif self.env.world_dim == 2:
tl, tr, bl, br, hitting, last_action = view_tuple
wall = self.env.WALL
d, r, u, l, none = 0, 1, 2, 3, 4
if tr == goal:
if hitting != r:
expert_action = r
else:
expert_action = u
elif br == goal:
if hitting != d:
expert_action = d
else:
expert_action = r
elif bl == goal:
if hitting != l:
expert_action = l
else:
expert_action = d
elif tl == goal:
if hitting != u:
expert_action = u
else:
expert_action = l
elif tr == wrong and not any(x == wrong for x in [br, bl, tl]):
expert_action = l
elif br == wrong and not any(x == wrong for x in [bl, tl, tr]):
expert_action = u
elif bl == wrong and not any(x == wrong for x in [tl, tr, br]):
expert_action = r
elif tl == wrong and not any(x == wrong for x in [tr, br, bl]):
expert_action = d
elif all(x == wrong for x in [tr, br]) and not any(
x == wrong for x in [bl, tl]
):
expert_action = l
elif all(x == wrong for x in [br, bl]) and not any(
x == wrong for x in [tl, tr]
):
expert_action = u
elif all(x == wrong for x in [bl, tl]) and not any(
x == wrong for x in [tr, br]
):
expert_action = r
elif all(x == wrong for x in [tl, tr]) and not any(
x == wrong for x in [br, bl]
):
expert_action = d
elif hitting != none and tr == br == bl == tl:
# Only possible if in 0 vis setting
if tr == self.env.WRONG_CORNER or last_action == hitting:
if last_action == r:
expert_action = u
elif last_action == u:
expert_action = l
elif last_action == l:
expert_action = d
elif last_action == d:
expert_action = r
else:
raise NotImplementedError()
else:
expert_action = last_action
elif last_action == r and tr == wall:
expert_action = u
elif last_action == u and tl == wall:
expert_action = l
elif last_action == l and bl == wall:
expert_action = d
elif last_action == d and br == wall:
expert_action = r
elif last_action == none:
expert_action = r
else:
expert_action = last_action
policy = np.array(
[
expert_action == d,
expert_action == r,
expert_action == u,
expert_action == l,
]
)
else:
raise NotImplementedError("Can only query expert for world dims of 1 or 2.")
if return_policy:
return policy, True
elif deterministic:
return int(np.argmax(policy)), True
else:
return (
int(np.argmax(np.random.multinomial(1, policy / (1.0 * policy.sum())))),
True,
)
class FindGoalLightHouseTaskSampler(TaskSampler):
def __init__(
self,
world_dim: int,
world_radius: int,
sensors: Union[SensorSuite, List[Sensor]],
max_steps: int,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
seed: Optional[int] = None,
**kwargs,
):
self.env = LightHouseEnvironment(world_dim=world_dim, world_radius=world_radius)
self._last_sampled_task: Optional[FindGoalLightHouseTask] = None
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_steps = max_steps
self.max_tasks = max_tasks
self.num_tasks_generated = 0
self.deterministic_sampling = deterministic_sampling
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
self.seed: int = int(
seed if seed is not None else np.random.randint(0, 2 ** 31 - 1)
)
self.np_seeded_random_gen: Optional[np.random.RandomState] = None
self.set_seed(self.seed)
@property
def world_dim(self):
return self.env.world_dim
@property
def world_radius(self):
return self.env.world_radius
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
n = 2 ** self.world_dim
return n if self.num_unique_seeds is None else min(n, self.num_unique_seeds)
@property
def last_sampled_task(self) -> Optional[Task]:
return self._last_sampled_task
def next_task(self, force_advance_scene: bool = False) -> Optional[Task]:
if self.length <= 0:
return None
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
seed = self.np_seeded_random_gen.choice(self.task_seeds_list)
else:
seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
self.num_tasks_generated += 1
self.env.set_seed(seed)
self.env.random_reset()
return FindGoalLightHouseTask(
env=self.env, sensors=self.sensors, task_info={}, max_steps=self.max_steps
)
def close(self) -> None:
pass
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.set_seed(seed=self.seed)
def set_seed(self, seed: int) -> None:
set_seed(seed)
self.np_seeded_random_gen, _ = seeding.np_random(seed)
self.seed = seed
| ask4help-main | allenact_plugins/lighthouse_plugin/lighthouse_tasks.py |
ask4help-main | allenact_plugins/lighthouse_plugin/__init__.py |
|
import itertools
from typing import Any, Dict, Optional, Tuple, Sequence
import gym
import numpy as np
import pandas as pd
import patsy
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task
from allenact_plugins.lighthouse_plugin.lighthouse_environment import (
LightHouseEnvironment,
)
def get_corner_observation(
env: LightHouseEnvironment,
view_radius: int,
view_corner_offsets: Optional[np.array],
):
if view_corner_offsets is None:
view_corner_offsets = view_radius * (2 * (env.world_corners > 0) - 1)
world_corners_offset = env.world_corners + env.world_radius
multidim_view_corner_indices = np.clip(
np.reshape(env.current_position, (1, -1))
+ view_corner_offsets
+ env.world_radius,
a_min=0,
a_max=2 * env.world_radius,
)
flat_view_corner_indices = np.ravel_multi_index(
np.transpose(multidim_view_corner_indices), env.world_tensor.shape
)
view_values = env.world_tensor.reshape(-1)[flat_view_corner_indices]
last_action = 2 * env.world_dim if env.last_action is None else env.last_action
on_border_bools = np.concatenate(
(
env.current_position == env.world_radius,
env.current_position == -env.world_radius,
),
axis=0,
)
if last_action == 2 * env.world_dim or on_border_bools[last_action]:
on_border_value = last_action
elif on_border_bools.any():
on_border_value = np.argwhere(on_border_bools).reshape(-1)[0]
else:
on_border_value = 2 * env.world_dim
seen_mask = np.array(env.closest_distance_to_corners <= view_radius, dtype=int)
seen_corner_values = (
env.world_tensor.reshape(-1)[
np.ravel_multi_index(
np.transpose(world_corners_offset), env.world_tensor.shape
)
]
* seen_mask
)
return np.concatenate(
(
seen_corner_values + view_values * (1 - seen_mask),
[on_border_value, last_action],
),
axis=0,
out=np.zeros((seen_corner_values.shape[0] + 2,), dtype=np.float32,),
)
class CornerSensor(Sensor[LightHouseEnvironment, Any]):
def __init__(
self,
view_radius: int,
world_dim: int,
uuid: str = "corner_fixed_radius",
**kwargs: Any
):
self.view_radius = view_radius
self.world_dim = world_dim
self.view_corner_offsets: Optional[np.ndarray] = None
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(
low=min(LightHouseEnvironment.SPACE_LEVELS),
high=max(LightHouseEnvironment.SPACE_LEVELS),
shape=(2 ** self.world_dim + 2,),
dtype=int,
)
def get_observation(
self,
env: LightHouseEnvironment,
task: Optional[Task],
*args: Any,
**kwargs: Any
) -> Any:
if self.view_corner_offsets is None:
self.view_corner_offsets = self.view_radius * (
2 * (env.world_corners > 0) - 1
)
return get_corner_observation(
env=env,
view_radius=self.view_radius,
view_corner_offsets=self.view_corner_offsets,
)
class FactorialDesignCornerSensor(Sensor[LightHouseEnvironment, Any]):
_DESIGN_MAT_CACHE: Dict[Tuple, Any] = {}
def __init__(
self,
view_radius: int,
world_dim: int,
degree: int,
uuid: str = "corner_fixed_radius_categorical",
**kwargs: Any
):
self.view_radius = view_radius
self.world_dim = world_dim
self.degree = degree
if self.world_dim > 2:
raise NotImplementedError(
"When using the `FactorialDesignCornerSensor`,"
"`world_dim` must be <= 2 due to memory constraints."
"In the current implementation, creating the design"
"matrix in the `world_dim == 3` case would require"
"instantiating a matrix of size ~ 3Mx3M (9 trillion entries)."
)
self.view_corner_offsets: Optional[np.ndarray] = None
# self.world_corners_offset: Optional[List[typing.Tuple[int, ...]]] = None
self.corner_sensor = CornerSensor(self.view_radius, self.world_dim)
self.variables_and_levels = self._get_variables_and_levels(
world_dim=self.world_dim
)
self._design_mat_formula = self._create_formula(
variables_and_levels=self._get_variables_and_levels(
world_dim=self.world_dim
),
degree=self.degree,
)
self.single_row_df = pd.DataFrame(
data=[[0] * len(self.variables_and_levels)],
columns=[x[0] for x in self.variables_and_levels],
)
self._view_tuple_to_design_array: Dict[Tuple[int, ...], np.ndarray] = {}
(
design_matrix,
tuple_to_ind,
) = self._create_full_design_matrix_and_tuple_to_ind_dict(
variables_and_levels=tuple(self.variables_and_levels), degree=self.degree
)
self.design_matrix = design_matrix
self.tuple_to_ind = tuple_to_ind
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(
low=min(LightHouseEnvironment.SPACE_LEVELS),
high=max(LightHouseEnvironment.SPACE_LEVELS),
shape=(
len(
self.view_tuple_to_design_array(
(0,) * len(self.variables_and_levels)
)
),
),
dtype=int,
)
def view_tuple_to_design_array(self, view_tuple: Tuple):
return np.array(
self.design_matrix[self.tuple_to_ind[view_tuple], :], dtype=np.float32
)
@classmethod
def output_dim(cls, world_dim: int):
return ((3 if world_dim == 1 else 4) ** (2 ** world_dim)) * (
2 * world_dim + 1
) ** 2
@classmethod
def _create_full_design_matrix_and_tuple_to_ind_dict(
cls, variables_and_levels: Sequence[Tuple[str, Sequence[int]]], degree: int
):
variables_and_levels = tuple((x, tuple(y)) for x, y in variables_and_levels)
key = (variables_and_levels, degree)
if key not in cls._DESIGN_MAT_CACHE:
all_tuples = [
tuple(x)
for x in itertools.product(
*[levels for _, levels in variables_and_levels]
)
]
tuple_to_ind = {}
for i, t in enumerate(all_tuples):
tuple_to_ind[t] = i
df = pd.DataFrame(
data=all_tuples,
columns=[var_name for var_name, _ in variables_and_levels],
)
cls._DESIGN_MAT_CACHE[key] = (
np.array(
1.0
* patsy.dmatrix(
cls._create_formula(
variables_and_levels=variables_and_levels, degree=degree
),
data=df,
),
dtype=bool,
),
tuple_to_ind,
)
return cls._DESIGN_MAT_CACHE[key]
@staticmethod
def _get_variables_and_levels(world_dim: int):
return (
[
("s{}".format(i), list(range(3 if world_dim == 1 else 4)))
for i in range(2 ** world_dim)
]
+ [("b{}".format(i), list(range(2 * world_dim + 1))) for i in range(1)]
+ [("a{}".format(i), list(range(2 * world_dim + 1))) for i in range(1)]
)
@classmethod
def _create_formula(
cls, variables_and_levels: Sequence[Tuple[str, Sequence[int]]], degree: int
):
def make_categorial(var_name, levels):
return "C({}, levels={})".format(var_name, levels)
if degree == -1:
return ":".join(
make_categorial(var_name, levels)
for var_name, levels in variables_and_levels
)
else:
return "({})**{}".format(
"+".join(
make_categorial(var_name, levels)
for var_name, levels in variables_and_levels
),
degree,
)
def get_observation(
self,
env: LightHouseEnvironment,
task: Optional[Task],
*args: Any,
**kwargs: Any
) -> Any:
kwargs["as_tuple"] = True
view_array = self.corner_sensor.get_observation(env, task, *args, **kwargs)
return self.view_tuple_to_design_array(tuple(view_array))
| ask4help-main | allenact_plugins/lighthouse_plugin/lighthouse_sensors.py |
import numpy as np
from allenact.utils.experiment_utils import EarlyStoppingCriterion, ScalarMeanTracker
class StopIfNearOptimal(EarlyStoppingCriterion):
def __init__(self, optimal: float, deviation: float, min_memory_size: int = 100):
self.optimal = optimal
self.deviation = deviation
self.current_pos = 0
self.has_filled = False
self.memory: np.ndarray = np.zeros(min_memory_size)
def __call__(
self, stage_steps: int, total_steps: int, training_metrics: ScalarMeanTracker,
) -> bool:
sums = training_metrics.sums()
counts = training_metrics.counts()
k = "ep_length"
if k in sums:
count = counts[k]
ep_length_ave = sums[k] / count
n = self.memory.shape[0]
if count >= n:
if count > n:
# Increase memory size to fit all of the new values
self.memory = np.full(count, fill_value=ep_length_ave)
else:
# We have exactly as many values as the memory size,
# simply set the whole memory to be equal to the new
# average ep length.
self.memory[:] = ep_length_ave
self.current_pos = 0
self.has_filled = True
else:
self.memory[
self.current_pos : (self.current_pos + count)
] = ep_length_ave
if self.current_pos + count > n:
self.has_filled = True
self.current_pos = self.current_pos + count % n
self.memory[: self.current_pos] = ep_length_ave
if not self.has_filled:
return False
return self.memory.mean() < self.optimal + self.deviation
| ask4help-main | allenact_plugins/lighthouse_plugin/lighthouse_util.py |
ask4help-main | allenact_plugins/lighthouse_plugin/configs/__init__.py |
|
ask4help-main | allenact_plugins/lighthouse_plugin/scripts/__init__.py |
|
ask4help-main | allenact_plugins/lighthouse_plugin/data/__init__.py |
|
import os
from pathlib import Path
BABYAI_EXPERT_TRAJECTORIES_DIR = os.path.abspath(
os.path.join(os.path.dirname(Path(__file__)), "data", "demos")
)
| ask4help-main | allenact_plugins/babyai_plugin/babyai_constants.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"\n\nPlease install babyai with:\n\n"
"pip install -e git+https://github.com/Lucaweihs/babyai.git@0b450eeb3a2dc7116c67900d51391986bdbb84cd#egg=babyai\n",
):
import babyai
| ask4help-main | allenact_plugins/babyai_plugin/__init__.py |
from typing import Dict, Optional, List, cast, Tuple, Any
import babyai.model
import babyai.rl
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
ObservationType,
Memory,
DistributionType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class BabyAIACModelWrapped(babyai.model.ACModel):
def __init__(
self,
obs_space: Dict[str, int],
action_space: gym.spaces.Discrete,
image_dim=128,
memory_dim=128,
instr_dim=128,
use_instr=False,
lang_model="gru",
use_memory=False,
arch="cnn1",
aux_info=None,
include_auxiliary_head: bool = False,
):
self.use_cnn2 = arch == "cnn2"
super().__init__(
obs_space=obs_space,
action_space=action_space,
image_dim=image_dim,
memory_dim=memory_dim,
instr_dim=instr_dim,
use_instr=use_instr,
lang_model=lang_model,
use_memory=use_memory,
arch="cnn1" if self.use_cnn2 else arch,
aux_info=aux_info,
)
self.semantic_embedding = None
if self.use_cnn2:
self.semantic_embedding = nn.Embedding(33, embedding_dim=8)
self.image_conv = nn.Sequential(
nn.Conv2d(in_channels=24, out_channels=16, kernel_size=(2, 2)),
*self.image_conv[1:] # type:ignore
)
self.image_conv[0].apply(babyai.model.initialize_parameters)
self.include_auxiliary_head = include_auxiliary_head
if self.use_memory and self.lang_model == "gru":
self.memory_rnn = nn.LSTM(self.image_dim, self.memory_dim)
if self.include_auxiliary_head:
self.aux = nn.Sequential(
nn.Linear(self.memory_dim, 64),
nn.Tanh(),
nn.Linear(64, action_space.n),
)
self.aux.apply(babyai.model.initialize_parameters)
self.train()
def forward_once(self, obs, memory, instr_embedding=None):
"""Copied (with minor modifications) from
`babyai.model.ACModel.forward(...)`."""
if self.use_instr and instr_embedding is None:
instr_embedding = self._get_instr_embedding(obs.instr)
if self.use_instr and self.lang_model == "attgru":
# outputs: B x L x D
# memory: B x M
mask = (obs.instr != 0).float()
# The mask tensor has the same length as obs.instr, and
# thus can be both shorter and longer than instr_embedding.
# It can be longer if instr_embedding is computed
# for a subbatch of obs.instr.
# It can be shorter if obs.instr is a subbatch of
# the batch that instr_embeddings was computed for.
# Here, we make sure that mask and instr_embeddings
# have equal length along dimension 1.
mask = mask[:, : instr_embedding.shape[1]]
instr_embedding = instr_embedding[:, : mask.shape[1]]
keys = self.memory2key(memory)
pre_softmax = (keys[:, None, :] * instr_embedding).sum(2) + 1000 * mask
attention = F.softmax(pre_softmax, dim=1)
instr_embedding = (instr_embedding * attention[:, :, None]).sum(1)
x = torch.transpose(torch.transpose(obs.image, 1, 3), 2, 3)
if self.arch.startswith("expert_filmcnn"):
x = self.image_conv(x)
for controler in self.controllers:
x = controler(x, instr_embedding)
x = F.relu(self.film_pool(x))
else:
x = self.image_conv(x.contiguous())
x = x.reshape(x.shape[0], -1)
if self.use_memory:
hidden = (
memory[:, : self.semi_memory_size],
memory[:, self.semi_memory_size :],
)
hidden = self.memory_rnn(x, hidden)
embedding = hidden[0]
memory = torch.cat(hidden, dim=1) # type: ignore
else:
embedding = x
if self.use_instr and not "filmcnn" in self.arch:
embedding = torch.cat((embedding, instr_embedding), dim=1)
if hasattr(self, "aux_info") and self.aux_info:
extra_predictions = {
info: self.extra_heads[info](embedding) for info in self.extra_heads
}
else:
extra_predictions = dict()
return {
"embedding": embedding,
"memory": memory,
"extra_predictions": extra_predictions,
}
def forward_loop(
self,
observations: ObservationType,
recurrent_hidden_states: torch.FloatTensor,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
results = []
images = cast(torch.FloatTensor, observations["minigrid_ego_image"]).float()
instrs: Optional[torch.Tensor] = None
if "minigrid_mission" in observations:
instrs = cast(torch.Tensor, observations["minigrid_mission"])
_, nsamplers, _ = recurrent_hidden_states.shape
rollouts_len = images.shape[0] // nsamplers
obs = babyai.rl.DictList()
images = images.view(rollouts_len, nsamplers, *images.shape[1:])
masks = masks.view(rollouts_len, nsamplers, *masks.shape[1:]) # type:ignore
# needs_reset = (masks != 1.0).view(nrollouts, -1).any(-1)
if instrs is not None:
instrs = instrs.view(rollouts_len, nsamplers, instrs.shape[-1])
needs_instr_reset_mask = masks != 1.0
needs_instr_reset_mask[0] = 1
needs_instr_reset_mask = needs_instr_reset_mask.squeeze(-1)
instr_embeddings: Optional[torch.Tensor] = None
if self.use_instr:
instr_reset_multi_inds = list(
(int(a), int(b))
for a, b in zip(*np.where(needs_instr_reset_mask.cpu().numpy()))
)
time_ind_to_which_need_instr_reset: List[List] = [
[] for _ in range(rollouts_len)
]
reset_multi_ind_to_index = {
mi: i for i, mi in enumerate(instr_reset_multi_inds)
}
for a, b in instr_reset_multi_inds:
time_ind_to_which_need_instr_reset[a].append(b)
unique_instr_embeddings = self._get_instr_embedding(
instrs[needs_instr_reset_mask]
)
instr_embeddings_list = [unique_instr_embeddings[:nsamplers]]
current_instr_embeddings_list = list(instr_embeddings_list[-1])
for time_ind in range(1, rollouts_len):
if len(time_ind_to_which_need_instr_reset[time_ind]) == 0:
instr_embeddings_list.append(instr_embeddings_list[-1])
else:
for sampler_needing_reset_ind in time_ind_to_which_need_instr_reset[
time_ind
]:
current_instr_embeddings_list[
sampler_needing_reset_ind
] = unique_instr_embeddings[
reset_multi_ind_to_index[
(time_ind, sampler_needing_reset_ind)
]
]
instr_embeddings_list.append(
torch.stack(current_instr_embeddings_list, dim=0)
)
instr_embeddings = torch.stack(instr_embeddings_list, dim=0)
assert recurrent_hidden_states.shape[0] == 1
memory = recurrent_hidden_states[0]
# instr_embedding: Optional[torch.Tensor] = None
for i in range(rollouts_len):
obs.image = images[i]
if "minigrid_mission" in observations:
obs.instr = instrs[i]
# reset = needs_reset[i].item()
# if self.baby_ai_model.use_instr and (reset or i == 0):
# instr_embedding = self.baby_ai_model._get_instr_embedding(obs.instr)
results.append(
self.forward_once(
obs, memory=memory * masks[i], instr_embedding=instr_embeddings[i]
)
)
memory = results[-1]["memory"]
embedding = torch.cat([r["embedding"] for r in results], dim=0)
extra_predictions_list = [r["extra_predictions"] for r in results]
extra_predictions = {
key: torch.cat([ep[key] for ep in extra_predictions_list], dim=0)
for key in extra_predictions_list[0]
}
return (
ActorCriticOutput(
distributions=CategoricalDistr(logits=self.actor(embedding),),
values=self.critic(embedding),
extras=extra_predictions
if not self.include_auxiliary_head
else {
**extra_predictions,
"auxiliary_distributions": cast(
Any, CategoricalDistr(logits=self.aux(embedding))
),
},
),
torch.stack([r["memory"] for r in results], dim=0),
)
# noinspection PyMethodOverriding
def forward(
self,
observations: ObservationType,
recurrent_hidden_states: torch.FloatTensor,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
(
observations,
recurrent_hidden_states,
prev_actions,
masks,
num_steps,
num_samplers,
num_agents,
num_layers,
) = self.adapt_inputs(
observations, recurrent_hidden_states, prev_actions, masks
)
if self.lang_model != "gru":
ac_output, hidden_states = self.forward_loop(
observations=observations,
recurrent_hidden_states=recurrent_hidden_states,
prev_actions=prev_actions,
masks=masks, # type: ignore
)
return self.adapt_result(
ac_output,
hidden_states[-1:],
num_steps,
num_samplers,
num_agents,
num_layers,
observations,
)
assert recurrent_hidden_states.shape[0] == 1
images = cast(torch.FloatTensor, observations["minigrid_ego_image"])
if self.use_cnn2:
images_shape = images.shape
# noinspection PyArgumentList
images = images + torch.LongTensor([0, 11, 22]).view( # type:ignore
1, 1, 1, 3
).to(images.device)
images = self.semantic_embedding(images).view( # type:ignore
*images_shape[:3], 24
)
images = images.permute(0, 3, 1, 2).float() # type:ignore
_, nsamplers, _ = recurrent_hidden_states.shape
rollouts_len = images.shape[0] // nsamplers
masks = cast(
torch.FloatTensor, masks.view(rollouts_len, nsamplers, *masks.shape[1:])
)
instrs: Optional[torch.Tensor] = None
if "minigrid_mission" in observations and self.use_instr:
instrs = cast(torch.FloatTensor, observations["minigrid_mission"])
instrs = instrs.view(rollouts_len, nsamplers, instrs.shape[-1])
needs_instr_reset_mask = masks != 1.0
needs_instr_reset_mask[0] = 1
needs_instr_reset_mask = needs_instr_reset_mask.squeeze(-1)
blocking_inds: List[int] = np.where(
needs_instr_reset_mask.view(rollouts_len, -1).any(-1).cpu().numpy()
)[0].tolist()
blocking_inds.append(rollouts_len)
instr_embeddings: Optional[torch.Tensor] = None
if self.use_instr:
instr_reset_multi_inds = list(
(int(a), int(b))
for a, b in zip(*np.where(needs_instr_reset_mask.cpu().numpy()))
)
time_ind_to_which_need_instr_reset: List[List] = [
[] for _ in range(rollouts_len)
]
reset_multi_ind_to_index = {
mi: i for i, mi in enumerate(instr_reset_multi_inds)
}
for a, b in instr_reset_multi_inds:
time_ind_to_which_need_instr_reset[a].append(b)
unique_instr_embeddings = self._get_instr_embedding(
instrs[needs_instr_reset_mask]
)
instr_embeddings_list = [unique_instr_embeddings[:nsamplers]]
current_instr_embeddings_list = list(instr_embeddings_list[-1])
for time_ind in range(1, rollouts_len):
if len(time_ind_to_which_need_instr_reset[time_ind]) == 0:
instr_embeddings_list.append(instr_embeddings_list[-1])
else:
for sampler_needing_reset_ind in time_ind_to_which_need_instr_reset[
time_ind
]:
current_instr_embeddings_list[
sampler_needing_reset_ind
] = unique_instr_embeddings[
reset_multi_ind_to_index[
(time_ind, sampler_needing_reset_ind)
]
]
instr_embeddings_list.append(
torch.stack(current_instr_embeddings_list, dim=0)
)
instr_embeddings = torch.stack(instr_embeddings_list, dim=0)
# The following code can be used to compute the instr_embeddings in another way
# and thus verify that the above logic is (more likely to be) correct
# needs_instr_reset_mask = (masks != 1.0)
# needs_instr_reset_mask[0] *= 0
# needs_instr_reset_inds = needs_instr_reset_mask.view(nrollouts, -1).any(-1).cpu().numpy()
#
# # Get inds where a new task has started
# blocking_inds: List[int] = np.where(needs_instr_reset_inds)[0].tolist()
# blocking_inds.append(needs_instr_reset_inds.shape[0])
# if nrollouts != 1:
# pdb.set_trace()
# if blocking_inds[0] != 0:
# blocking_inds.insert(0, 0)
# if self.use_instr:
# instr_embeddings_list = []
# for ind0, ind1 in zip(blocking_inds[:-1], blocking_inds[1:]):
# instr_embeddings_list.append(
# self._get_instr_embedding(instrs[ind0])
# .unsqueeze(0)
# .repeat(ind1 - ind0, 1, 1)
# )
# tmp_instr_embeddings = torch.cat(instr_embeddings_list, dim=0)
# assert (instr_embeddings - tmp_instr_embeddings).abs().max().item() < 1e-6
# Embed images
# images = images.view(nrollouts, nsamplers, *images.shape[1:])
image_embeddings = self.image_conv(images)
if self.arch.startswith("expert_filmcnn"):
instr_embeddings_flatter = instr_embeddings.view(
-1, *instr_embeddings.shape[2:]
)
for controller in self.controllers:
image_embeddings = controller(
image_embeddings, instr_embeddings_flatter
)
image_embeddings = F.relu(self.film_pool(image_embeddings))
image_embeddings = image_embeddings.view(rollouts_len, nsamplers, -1)
if self.use_instr and self.lang_model == "attgru":
raise NotImplementedError("Currently attgru is not implemented.")
memory = None
if self.use_memory:
assert recurrent_hidden_states.shape[0] == 1
hidden = (
recurrent_hidden_states[:, :, : self.semi_memory_size],
recurrent_hidden_states[:, :, self.semi_memory_size :],
)
embeddings_list = []
for ind0, ind1 in zip(blocking_inds[:-1], blocking_inds[1:]):
hidden = (hidden[0] * masks[ind0], hidden[1] * masks[ind0])
rnn_out, hidden = self.memory_rnn(image_embeddings[ind0:ind1], hidden)
embeddings_list.append(rnn_out)
# embedding = hidden[0]
embedding = torch.cat(embeddings_list, dim=0)
memory = torch.cat(hidden, dim=-1)
else:
embedding = image_embeddings
if self.use_instr and not "filmcnn" in self.arch:
embedding = torch.cat((embedding, instr_embeddings), dim=-1)
if hasattr(self, "aux_info") and self.aux_info:
extra_predictions = {
info: self.extra_heads[info](embedding) for info in self.extra_heads
}
else:
extra_predictions = dict()
embedding = embedding.view(rollouts_len * nsamplers, -1)
ac_output = ActorCriticOutput(
distributions=CategoricalDistr(logits=self.actor(embedding),),
values=self.critic(embedding),
extras=extra_predictions
if not self.include_auxiliary_head
else {
**extra_predictions,
"auxiliary_distributions": CategoricalDistr(logits=self.aux(embedding)),
},
)
hidden_states = memory
return self.adapt_result(
ac_output,
hidden_states,
num_steps,
num_samplers,
num_agents,
num_layers,
observations,
)
@staticmethod
def adapt_inputs( # type: ignore
observations: ObservationType,
recurrent_hidden_states: torch.FloatTensor,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
# INPUTS
# observations are of shape [num_steps, num_samplers, ...]
# recurrent_hidden_states are of shape [num_layers, num_samplers, (num_agents,) num_dims]
# prev_actions are of shape [num_steps, num_samplers, ...]
# masks are of shape [num_steps, num_samplers, 1]
# num_agents is assumed to be 1
num_steps, num_samplers = masks.shape[:2]
num_layers = recurrent_hidden_states.shape[0]
num_agents = 1
# Flatten all observation batch dims
def recursively_adapt_observations(obs):
for entry in obs:
if isinstance(obs[entry], Dict):
recursively_adapt_observations(obs[entry])
else:
assert isinstance(obs[entry], torch.Tensor)
if entry in ["minigrid_ego_image", "minigrid_mission"]:
final_dims = obs[entry].shape[2:]
obs[entry] = obs[entry].view(
num_steps * num_samplers, *final_dims
)
# Old-style inputs need to be
# observations [num_steps * num_samplers, ...]
# recurrent_hidden_states [num_layers, num_samplers (* num_agents), num_dims]
# prev_actions [num_steps * num_samplers, -1]
# masks [num_steps * num_samplers, 1]
recursively_adapt_observations(observations)
recurrent_hidden_states = cast(
torch.FloatTensor,
recurrent_hidden_states.view(num_layers, num_samplers * num_agents, -1),
)
if prev_actions is not None:
prev_actions = prev_actions.view( # type:ignore
num_steps * num_samplers, -1
)
masks = masks.view(num_steps * num_samplers, 1) # type:ignore
return (
observations,
recurrent_hidden_states,
prev_actions,
masks,
num_steps,
num_samplers,
num_agents,
num_layers,
)
@staticmethod
def adapt_result(ac_output, hidden_states, num_steps, num_samplers, num_agents, num_layers, observations): # type: ignore
distributions = CategoricalDistr(
logits=ac_output.distributions.logits.view(num_steps, num_samplers, -1),
)
values = ac_output.values.view(num_steps, num_samplers, num_agents)
extras = ac_output.extras # ignore shape
# TODO confirm the shape of the auxiliary distribution is the same as the actor's
if "auxiliary_distributions" in extras:
extras["auxiliary_distributions"] = CategoricalDistr(
logits=extras["auxiliary_distributions"].logits.view(
num_steps, num_samplers, -1 # assume single-agent
),
)
hidden_states = hidden_states.view(num_layers, num_samplers * num_agents, -1)
# Unflatten all observation batch dims
def recursively_adapt_observations(obs):
for entry in obs:
if isinstance(obs[entry], Dict):
recursively_adapt_observations(obs[entry])
else:
assert isinstance(obs[entry], torch.Tensor)
if entry in ["minigrid_ego_image", "minigrid_mission"]:
final_dims = obs[entry].shape[
1:
] # assumes no agents dim in observations!
obs[entry] = obs[entry].view(
num_steps, num_samplers * num_agents, *final_dims
)
recursively_adapt_observations(observations)
return (
ActorCriticOutput(
distributions=distributions, values=values, extras=extras
),
hidden_states,
)
class BabyAIRecurrentACModel(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
image_dim=128,
memory_dim=128,
instr_dim=128,
use_instr=False,
lang_model="gru",
use_memory=False,
arch="cnn1",
aux_info=None,
include_auxiliary_head: bool = False,
):
super().__init__(action_space=action_space, observation_space=observation_space)
assert "minigrid_ego_image" in observation_space.spaces
assert not use_instr or "minigrid_mission" in observation_space.spaces
self.memory_dim = memory_dim
self.include_auxiliary_head = include_auxiliary_head
self.baby_ai_model = BabyAIACModelWrapped(
obs_space={"image": 7 * 7 * 3, "instr": 100,},
action_space=action_space,
image_dim=image_dim,
memory_dim=memory_dim,
instr_dim=instr_dim,
use_instr=use_instr,
lang_model=lang_model,
use_memory=use_memory,
arch=arch,
aux_info=aux_info,
include_auxiliary_head=self.include_auxiliary_head,
)
self.memory_key = "rnn"
@property
def recurrent_hidden_state_size(self) -> int:
return 2 * self.memory_dim
@property
def num_recurrent_layers(self):
return 1
def _recurrent_memory_specification(self):
return {
self.memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
}
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
out, recurrent_hidden_states = self.baby_ai_model.forward(
observations=observations,
recurrent_hidden_states=cast(
torch.FloatTensor, memory.tensor(self.memory_key)
),
prev_actions=prev_actions,
masks=masks,
)
return out, memory.set_tensor(self.memory_key, recurrent_hidden_states)
| ask4help-main | allenact_plugins/babyai_plugin/babyai_models.py |
import random
import signal
from typing import Tuple, Any, List, Dict, Optional, Union, Callable
import babyai
import babyai.bot
import gym
import numpy as np
from gym.utils import seeding
from gym_minigrid.minigrid import MiniGridEnv
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.system import get_logger
class BabyAITask(Task[MiniGridEnv]):
def __init__(
self,
env: MiniGridEnv,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
expert_view_size: int = 7,
expert_can_see_through_walls: bool = False,
**kwargs,
):
super().__init__(
env=env,
sensors=sensors,
task_info=task_info,
max_steps=env.max_steps,
**kwargs,
)
self._was_successful: bool = False
self.bot: Optional[babyai.bot.Bot] = None
self._bot_died = False
self.expert_view_size = expert_view_size
self.expert_can_see_through_walls = expert_can_see_through_walls
self._last_action: Optional[int] = None
env.max_steps = env.max_steps + 1
@property
def action_space(self) -> gym.spaces.Discrete:
return self.env.action_space
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
return self.env.render(mode=mode)
def _step(self, action: int) -> RLStepResult:
assert isinstance(action, int)
minigrid_obs, reward, done, info = self.env.step(action=action)
self._last_action = action
self._was_successful = done and reward > 0
return RLStepResult(
observation=self.get_observations(minigrid_output_obs=minigrid_obs),
reward=reward,
done=self.is_done(),
info=info,
)
def get_observations(
self, *args, minigrid_output_obs: Optional[Dict[str, Any]] = None, **kwargs
) -> Any:
return self.sensor_suite.get_observations(
env=self.env, task=self, minigrid_output_obs=minigrid_output_obs
)
def reached_terminal_state(self) -> bool:
return self._was_successful
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return tuple(
x
for x, _ in sorted(
[(str(a), a.value) for a in MiniGridEnv.Actions], key=lambda x: x[1]
)
)
def close(self) -> None:
pass
def _expert_timeout_hander(self, signum, frame):
raise TimeoutError
def query_expert(self, **kwargs) -> Tuple[Any, bool]:
see_through_walls = self.env.see_through_walls
agent_view_size = self.env.agent_view_size
if self._bot_died:
return 0, False
try:
self.env.agent_view_size = self.expert_view_size
self.env.expert_can_see_through_walls = self.expert_can_see_through_walls
if self.bot is None:
self.bot = babyai.bot.Bot(self.env)
signal.signal(signal.SIGALRM, self._expert_timeout_hander)
signal.alarm(kwargs.get("timeout", 4 if self.num_steps_taken() == 0 else 2))
return self.bot.replan(self._last_action), True
except TimeoutError as _:
self._bot_died = True
return 0, False
finally:
signal.alarm(0)
self.env.see_through_walls = see_through_walls
self.env.agent_view_size = agent_view_size
def metrics(self) -> Dict[str, Any]:
metrics = {
**super(BabyAITask, self).metrics(),
"success": 1.0 * (self.reached_terminal_state()),
}
return metrics
class BabyAITaskSampler(TaskSampler):
def __init__(
self,
env_builder: Union[str, Callable[..., MiniGridEnv]],
sensors: Union[SensorSuite, List[Sensor]],
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
extra_task_kwargs: Optional[Dict] = None,
**kwargs,
):
super(BabyAITaskSampler, self).__init__()
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_tasks = max_tasks
self.num_unique_seeds = num_unique_seeds
self.deterministic_sampling = deterministic_sampling
self.extra_task_kwargs = (
extra_task_kwargs if extra_task_kwargs is not None else {}
)
self._last_env_seed: Optional[int] = None
self._last_task: Optional[BabyAITask] = None
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
if isinstance(env_builder, str):
self.env = gym.make(env_builder)
else:
self.env = env_builder()
self.np_seeded_random_gen, _ = seeding.np_random(random.randint(0, 2 ** 31 - 1))
self.num_tasks_generated = 0
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None if self.num_unique_seeds is None else self.num_unique_seeds
@property
def last_sampled_task(self) -> Optional[Task]:
raise NotImplementedError
def next_task(self, force_advance_scene: bool = False) -> Optional[BabyAITask]:
if self.length <= 0:
return None
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
self._last_env_seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
self._last_env_seed = self.np_seeded_random_gen.choice(
self.task_seeds_list
)
else:
self._last_env_seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
self.env.seed(self._last_env_seed)
self.env.saved_seed = self._last_env_seed
self.env.reset()
self.num_tasks_generated += 1
self._last_task = BabyAITask(env=self.env, sensors=self.sensors, task_info={})
return self._last_task
def close(self) -> None:
self.env.close()
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.env.reset()
def set_seed(self, seed: int) -> None:
self.np_seeded_random_gen, _ = seeding.np_random(seed)
| ask4help-main | allenact_plugins/babyai_plugin/babyai_tasks.py |
ask4help-main | allenact_plugins/babyai_plugin/configs/__init__.py |
|
import glob
import os
import babyai
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
def make_small_demos(dir: str):
for file_path in glob.glob(os.path.join(dir, "*.pkl")):
if "valid" not in file_path and "small" not in file_path:
new_file_path = file_path.replace(".pkl", "-small.pkl")
if os.path.exists(new_file_path):
continue
print(
"Saving small version of {} to {}...".format(
os.path.basename(file_path), new_file_path
)
)
babyai.utils.save_demos(
babyai.utils.load_demos(file_path)[:1000], new_file_path
)
print("Done.")
if __name__ == "__main__":
make_small_demos(BABYAI_EXPERT_TRAJECTORIES_DIR)
| ask4help-main | allenact_plugins/babyai_plugin/scripts/truncate_expert_demos.py |
ask4help-main | allenact_plugins/babyai_plugin/scripts/__init__.py |
|
import glob
import os
import babyai
import numpy as np
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
# Boss level
# [(50, 11.0), (90, 22.0), (99, 32.0), (99.9, 38.0), (99.99, 43.0)]
if __name__ == "__main__":
# level = "BossLevel"
level = "GoToLocal"
files = glob.glob(
os.path.join(BABYAI_EXPERT_TRAJECTORIES_DIR, "*{}-v0.pkl".format(level))
)
assert len(files) == 1
demos = babyai.utils.load_demos(files[0])
percentiles = [50, 90, 99, 99.9, 99.99, 100]
print(
list(
zip(
percentiles,
np.percentile([len(d[0].split(" ")) for d in demos], percentiles),
)
)
)
| ask4help-main | allenact_plugins/babyai_plugin/scripts/get_instr_length_percentiles.py |
import argparse
import os
import platform
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
LEVEL_TO_TRAIN_VALID_IDS = {
"BossLevel": (
"1DkVVpIEVtpyo1LxOXQL_bVyjFCTO3cHD",
"1ccEFA_n5RT4SWD0Wa_qO65z2HACJBace",
),
"GoToObjMaze": (
"1P1CuMbGDJtZit1f-8hmd-HwweXZMj77T",
"1MVlVsIpJUZ0vjrYGXY6Ku4m4vBxtWjRZ",
),
"GoTo": ("1ABR1q-TClgjSlbhVdVJjzOBpTmTtlTN1", "13DlEx5woi31MIs_dzyLxfi7dPe1g59l2"),
"GoToLocal": (
"1U8YWdd3viN2lxOP5BByNUZRPVDKVvDAN",
"1Esy-J0t8eJUg6_RT8F4kkegHYDWwqmSl",
),
}
def get_args():
"""Creates the argument parser and parses input arguments."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="download_babyai_expert_demos",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"dataset",
nargs="?",
default="all",
help="dataset name (one of {}, or all)".format(
", ".join(LEVEL_TO_TRAIN_VALID_IDS.keys())
),
)
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
if platform.system() == "Linux":
download_template = """wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={}' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id={}" -O {}"""
elif platform.system() == "Darwin":
download_template = """wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={}' -O- | gsed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id={}" -O {}"""
else:
raise NotImplementedError("{} is not supported".format(platform.system()))
try:
os.makedirs(BABYAI_EXPERT_TRAJECTORIES_DIR, exist_ok=True)
if args.dataset == "all":
id_items = LEVEL_TO_TRAIN_VALID_IDS
else:
assert (
args.dataset in LEVEL_TO_TRAIN_VALID_IDS
), "Only {} are valid datasets".format(
", ".join(LEVEL_TO_TRAIN_VALID_IDS.keys())
)
id_items = {args.dataset: LEVEL_TO_TRAIN_VALID_IDS[args.dataset]}
for level_name, (train_id, valid_id) in id_items.items():
train_path = os.path.join(
BABYAI_EXPERT_TRAJECTORIES_DIR, "BabyAI-{}-v0.pkl".format(level_name)
)
if os.path.exists(train_path):
print("{} already exists, skipping...".format(train_path))
else:
os.system(download_template.format(train_id, train_id, train_path))
print("Demos saved to {}.".format(train_path))
valid_path = os.path.join(
BABYAI_EXPERT_TRAJECTORIES_DIR,
"BabyAI-{}-v0_valid.pkl".format(level_name),
)
if os.path.exists(valid_path):
print("{} already exists, skipping...".format(valid_path))
else:
os.system(download_template.format(valid_id, valid_id, valid_path))
print("Demos saved to {}.".format(valid_path))
except Exception as _:
raise Exception(
"Failed to download babyai demos. Make sure you have the appropriate command line"
" tools installed for your platform. For MacOS you'll need to install `gsed` and `gwget (the gnu version"
" of sed) using homebrew or some other method."
)
| ask4help-main | allenact_plugins/babyai_plugin/scripts/download_babyai_expert_demos.py |
ask4help-main | allenact_plugins/babyai_plugin/data/__init__.py |
|
import random
from typing import Dict, Tuple, List, Any, Optional, Union, Sequence, cast
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_constants import (
MOVE_AHEAD,
ROTATE_LEFT,
ROTATE_RIGHT,
LOOK_DOWN,
LOOK_UP,
END,
)
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
class ObjectNaviThorGridTask(Task[IThorEnvironment]):
"""Defines the object navigation task in AI2-THOR.
In object navigation an agent is randomly initialized into an AI2-THOR scene and must
find an object of a given type (e.g. tomato, television, etc). An object is considered
found if the agent takes an `End` action and the object is visible to the agent (see
[here](https://ai2thor.allenai.org/documentation/concepts) for a definition of visibiliy
in AI2-THOR).
The actions available to an agent in this task are:
1. Move ahead
* Moves agent ahead by 0.25 meters.
1. Rotate left / rotate right
* Rotates the agent by 90 degrees counter-clockwise / clockwise.
1. Look down / look up
* Changes agent view angle by 30 degrees up or down. An agent cannot look more than 30
degrees above horizontal or less than 60 degrees below horizontal.
1. End
* Ends the task and the agent receives a positive reward if the object type is visible to the agent,
otherwise it receives a negative reward.
# Attributes
env : The ai2thor environment.
sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer.
task_info : The task info. Must contain a field "object_type" that specifies, as a string,
the goal object type.
max_steps : The maximum number of steps an agent can take an in the task before it is considered failed.
observation_space: The observation space returned on each step from the sensors.
"""
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, LOOK_DOWN, LOOK_UP, END)
_CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE: Dict[
Tuple[str, str], List[Tuple[float, float, int, int]]
] = {}
def __init__(
self,
env: IThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
self.task_info["followed_path"] = [self.env.get_agent_location()]
self.task_info["action_names"] = self.class_action_names()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
if action_str == END:
self._took_end_action = True
self._success = self.is_goal_object_visible()
self.last_action_success = self._success
else:
self.env.step({"action": action_str})
self.last_action_success = self.env.last_action_success
if (
not self.last_action_success
) and self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE is not None:
self.env.update_graph_with_failed_action(failed_action=action_str)
self.task_info["followed_path"].append(self.env.get_agent_location())
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode == "rgb", "only rgb rendering is implemented"
return self.env.current_frame
def is_goal_object_visible(self) -> bool:
"""Is the goal object currently visible?"""
return any(
o["objectType"] == self.task_info["object_type"]
for o in self.env.visible_objects()
)
def judge(self) -> float:
"""Compute the reward after having taken a step."""
reward = -0.01
if not self.last_action_success:
reward += -0.03
if self._took_end_action:
reward += 1.0 if self._success else -1.0
return float(reward)
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
else:
return {
"success": self._success,
**super(ObjectNaviThorGridTask, self).metrics(),
}
def query_expert(self, **kwargs) -> Tuple[int, bool]:
target = self.task_info["object_type"]
if self.is_goal_object_visible():
return self.class_action_names().index(END), True
else:
key = (self.env.scene_name, target)
if self._subsampled_locations_from_which_obj_visible is None:
if key not in self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE:
obj_ids: List[str] = []
obj_ids.extend(
o["objectId"]
for o in self.env.last_event.metadata["objects"]
if o["objectType"] == target
)
assert len(obj_ids) != 0, "No objects to get an expert path to."
locations_from_which_object_is_visible: List[
Tuple[float, float, int, int]
] = []
y = self.env.last_event.metadata["agent"]["position"]["y"]
positions_to_check_interactionable_from = [
{"x": x, "y": y, "z": z}
for x, z in set((x, z) for x, z, _, _ in self.env.graph.nodes)
]
for obj_id in set(obj_ids):
self.env.controller.step(
{
"action": "PositionsFromWhichItemIsInteractable",
"objectId": obj_id,
"positions": positions_to_check_interactionable_from,
}
)
assert (
self.env.last_action_success
), "Could not get positions from which item was interactable."
returned = self.env.last_event.metadata["actionReturn"]
locations_from_which_object_is_visible.extend(
(
round(x, 2),
round(z, 2),
round_to_factor(rot, 90) % 360,
round_to_factor(hor, 30) % 360,
)
for x, z, rot, hor, standing in zip(
returned["x"],
returned["z"],
returned["rotation"],
returned["horizon"],
returned["standing"],
)
if standing == 1
)
self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[
key
] = locations_from_which_object_is_visible
self._subsampled_locations_from_which_obj_visible = self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[
key
]
if len(self._subsampled_locations_from_which_obj_visible) > 5:
self._subsampled_locations_from_which_obj_visible = random.sample(
self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[key], 5
)
current_loc_key = self.env.get_key(self.env.last_event.metadata["agent"])
paths = []
for goal_key in self._subsampled_locations_from_which_obj_visible:
path = self.env.shortest_state_path(
source_state_key=current_loc_key, goal_state_key=goal_key
)
if path is not None:
paths.append(path)
if len(paths) == 0:
return 0, False
shortest_path_ind = int(np.argmin([len(p) for p in paths]))
if len(paths[shortest_path_ind]) == 1:
get_logger().warning(
"Shortest path computations suggest we are at the target but episode does not think so."
)
return 0, False
next_key_on_shortest_path = paths[shortest_path_ind][1]
return (
self.class_action_names().index(
self.env.action_transitioning_between_keys(
current_loc_key, next_key_on_shortest_path
)
),
True,
)
| ask4help-main | allenact_plugins/ithor_plugin/ithor_tasks.py |
"""A wrapper for engaging with the THOR environment."""
import copy
import functools
import math
import random
from typing import Tuple, Dict, List, Set, Union, Any, Optional, Mapping, cast
import ai2thor.server
import networkx as nx
import numpy as np
from ai2thor.controller import Controller
from scipy.spatial.transform import Rotation
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_constants import VISIBILITY_DISTANCE, FOV
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
class IThorEnvironment(object):
"""Wrapper for the ai2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/documentation/installation) for comprehensive
documentation on AI2-THOR.
# Attributes
controller : The ai2thor controller.
"""
def __init__(
self,
x_display: Optional[str] = None,
docker_enabled: bool = False,
local_thor_build: Optional[str] = None,
visibility_distance: float = VISIBILITY_DISTANCE,
fov: float = FOV,
player_screen_width: int = 300,
player_screen_height: int = 300,
quality: str = "Very Low",
restrict_to_initially_reachable_points: bool = False,
make_agents_visible: bool = True,
object_open_speed: float = 1.0,
simplify_physics: bool = False,
) -> None:
"""Initializer.
# Parameters
x_display : The x display into which to launch ai2thor (possibly necessarily if you are running on a server
without an attached display).
docker_enabled : Whether or not to run thor in a docker container (useful on a server without an attached
display so that you don't have to start an x display).
local_thor_build : The path to a local build of ai2thor. This is probably not necessary for your use case
and can be safely ignored.
visibility_distance : The distance (in meters) at which objects, in the viewport of the agent,
are considered visible by ai2thor and will have their "visible" flag be set to `True` in the metadata.
fov : The agent's camera's field of view.
player_screen_width : The width resolution (in pixels) of the images returned by ai2thor.
player_screen_height : The height resolution (in pixels) of the images returned by ai2thor.
quality : The quality at which to render. Possible quality settings can be found in
`ai2thor._quality_settings.QUALITY_SETTINGS`.
restrict_to_initially_reachable_points : Whether or not to restrict the agent to locations in ai2thor
that were found to be (initially) reachable by the agent (i.e. reachable by the agent after resetting
the scene). This can be useful if you want to ensure there are only a fixed set of locations where the
agent can go.
make_agents_visible : Whether or not the agent should be visible. Most noticable when there are multiple agents
or when quality settings are high so that the agent casts a shadow.
object_open_speed : How quickly objects should be opened. High speeds mean faster simulation but also mean
that opening objects have a lot of kinetic energy and can, possibly, knock other objects away.
simplify_physics : Whether or not to simplify physics when applicable. Currently this only simplies object
interactions when opening drawers (when simplified, objects within a drawer do not slide around on
their own when the drawer is opened or closed, instead they are effectively glued down).
"""
self._start_player_screen_width = player_screen_width
self._start_player_screen_height = player_screen_height
self._local_thor_build = local_thor_build
self.x_display = x_display
self.controller: Optional[Controller] = None
self._started = False
self._quality = quality
self._initially_reachable_points: Optional[List[Dict]] = None
self._initially_reachable_points_set: Optional[Set[Tuple[float, float]]] = None
self._move_mag: Optional[float] = None
self._grid_size: Optional[float] = None
self._visibility_distance = visibility_distance
self._fov = fov
self.restrict_to_initially_reachable_points = (
restrict_to_initially_reachable_points
)
self.make_agents_visible = make_agents_visible
self.object_open_speed = object_open_speed
self._always_return_visible_range = False
self.simplify_physics = simplify_physics
self.start(None)
# noinspection PyTypeHints
self.controller.docker_enabled = docker_enabled # type: ignore
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"]
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.controller.last_event.frame
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
@property
def started(self) -> bool:
"""Has the ai2thor controller been started."""
return self._started
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self.controller.last_event.metadata["lastAction"]
@last_action.setter
def last_action(self, value: str) -> None:
"""Set the last action taken by the agent.
Doing this is rewriting history, be careful.
"""
self.controller.last_event.metadata["lastAction"] = value
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@last_action_success.setter
def last_action_success(self, value: bool) -> None:
"""Set whether or not the last action taken by the agent was a success.
Doing this is rewriting history, be careful.
"""
self.controller.last_event.metadata["lastActionSuccess"] = value
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.controller.last_event.metadata["actionReturn"]
@last_action_return.setter
def last_action_return(self, value: Any) -> None:
"""Set the value returned by the last action.
Doing this is rewriting history, be careful.
"""
self.controller.last_event.metadata["actionReturn"] = value
def start(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
) -> None:
"""Starts the ai2thor controller if it was previously stopped.
After starting, `reset` will be called with the scene name and move magnitude.
# Parameters
scene_name : The scene to load.
move_mag : The amount of distance the agent moves in a single `MoveAhead` step.
kwargs : additional kwargs, passed to reset.
"""
if self._started:
raise RuntimeError(
"Trying to start the environment but it is already started."
)
self.controller = Controller(
x_display=self.x_display,
width=self._start_player_screen_width,
height=self._start_player_screen_height,
local_executable_path=self._local_thor_build,
quality=self._quality,
server_class=ai2thor.fifo_server.FifoServer,
)
if (
self._start_player_screen_height,
self._start_player_screen_width,
) != self.current_frame.shape[:2]:
self.controller.step(
{
"action": "ChangeResolution",
"x": self._start_player_screen_width,
"y": self._start_player_screen_height,
}
)
self._started = True
self.reset(scene_name=scene_name, move_mag=move_mag, **kwargs)
def stop(self) -> None:
"""Stops the ai2thor controller."""
try:
self.controller.stop()
except Exception as e:
get_logger().warning(str(e))
finally:
self._started = False
def reset(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
):
"""Resets the ai2thor in a new scene.
Resets ai2thor into a new scene and initializes the scene/agents with
prespecified settings (e.g. move magnitude).
# Parameters
scene_name : The scene to load.
move_mag : The amount of distance the agent moves in a single `MoveAhead` step.
kwargs : additional kwargs, passed to the controller "Initialize" action.
"""
self._move_mag = move_mag
self._grid_size = self._move_mag
if scene_name is None:
scene_name = self.controller.last_event.metadata["sceneName"]
self.controller.reset(scene_name)
self.controller.step(
{
"action": "Initialize",
"gridSize": self._grid_size,
"visibilityDistance": self._visibility_distance,
"fieldOfView": self._fov,
"makeAgentsVisible": self.make_agents_visible,
"alwaysReturnVisibleRange": self._always_return_visible_range,
**kwargs,
}
)
if self.object_open_speed != 1.0:
self.controller.step(
{"action": "ChangeOpenSpeed", "x": self.object_open_speed}
)
self._initially_reachable_points = None
self._initially_reachable_points_set = None
self.controller.step({"action": "GetReachablePositions"})
if not self.controller.last_event.metadata["lastActionSuccess"]:
get_logger().warning(
"Error when getting reachable points: {}".format(
self.controller.last_event.metadata["errorMessage"]
)
)
self._initially_reachable_points = self.last_action_return
def teleport_agent_to(
self,
x: float,
y: float,
z: float,
rotation: float,
horizon: float,
standing: Optional[bool] = None,
force_action: bool = False,
only_initially_reachable: Optional[bool] = None,
verbose=True,
ignore_y_diffs=False,
) -> None:
"""Helper function teleporting the agent to a given location."""
if standing is None:
standing = self.last_event.metadata.get(
"isStanding", self.last_event.metadata["agent"].get("isStanding")
)
original_location = self.get_agent_location()
target = {"x": x, "y": y, "z": z}
if only_initially_reachable is None:
only_initially_reachable = self.restrict_to_initially_reachable_points
if only_initially_reachable:
reachable_points = self.initially_reachable_points
reachable = False
for p in reachable_points:
if self.position_dist(target, p, ignore_y=ignore_y_diffs) < 0.01:
reachable = True
break
if not reachable:
self.last_action = "TeleportFull"
self.last_event.metadata[
"errorMessage"
] = "Target position was not initially reachable."
self.last_action_success = False
return
self.controller.step(
dict(
action="TeleportFull",
x=x,
y=y,
z=z,
rotation={"x": 0.0, "y": rotation, "z": 0.0},
horizon=horizon,
standing=standing,
forceAction=force_action,
)
)
if not self.last_action_success:
agent_location = self.get_agent_location()
rot_diff = (
agent_location["rotation"] - original_location["rotation"]
) % 360
new_old_dist = self.position_dist(
original_location, agent_location, ignore_y=ignore_y_diffs
)
if (
self.position_dist(
original_location, agent_location, ignore_y=ignore_y_diffs
)
> 1e-2
or min(rot_diff, 360 - rot_diff) > 1
):
get_logger().warning(
"Teleportation FAILED but agent still moved (position_dist {}, rot diff {})"
" (\nprevious location\n{}\ncurrent_location\n{}\n)".format(
new_old_dist, rot_diff, original_location, agent_location
)
)
return
if force_action:
assert self.last_action_success
return
agent_location = self.get_agent_location()
rot_diff = (agent_location["rotation"] - rotation) % 360
if (
self.position_dist(agent_location, target, ignore_y=ignore_y_diffs) > 1e-2
or min(rot_diff, 360 - rot_diff) > 1
):
if only_initially_reachable:
self._snap_agent_to_initially_reachable(verbose=False)
if verbose:
get_logger().warning(
"Teleportation did not place agent"
" precisely where desired in scene {}"
" (\ndesired\n{}\nactual\n{}\n)"
" perhaps due to grid snapping."
" Action is considered failed but agent may have moved.".format(
self.scene_name,
{
"x": x,
"y": y,
"z": z,
"rotation": rotation,
"standing": standing,
"horizon": horizon,
},
agent_location,
)
)
self.last_action_success = False
return
def random_reachable_state(self, seed: int = None) -> Dict:
"""Returns a random reachable location in the scene."""
if seed is not None:
random.seed(seed)
xyz = random.choice(self.currently_reachable_points)
rotation = random.choice([0, 90, 180, 270])
horizon = random.choice([0, 30, 60, 330])
state = copy.copy(xyz)
state["rotation"] = rotation
state["horizon"] = horizon
return state
def randomize_agent_location(
self, seed: int = None, partial_position: Optional[Dict[str, float]] = None
) -> Dict:
"""Teleports the agent to a random reachable location in the scene."""
if partial_position is None:
partial_position = {}
k = 0
state: Optional[Dict] = None
while k == 0 or (not self.last_action_success and k < 10):
state = self.random_reachable_state(seed=seed)
self.teleport_agent_to(**{**state, **partial_position})
k += 1
if not self.last_action_success:
get_logger().warning(
(
"Randomize agent location in scene {}"
" with seed {} and partial position {} failed in "
"10 attempts. Forcing the action."
).format(self.scene_name, seed, partial_position)
)
self.teleport_agent_to(**{**state, **partial_position}, force_action=True) # type: ignore
assert self.last_action_success
assert state is not None
return state
def object_pixels_in_frame(
self, object_id: str, hide_all: bool = True, hide_transparent: bool = False
) -> np.ndarray:
"""Return an mask for a given object in the agent's current view.
# Parameters
object_id : The id of the object.
hide_all : Whether or not to hide all other objects in the scene before getting the mask.
hide_transparent : Whether or not partially transparent objects are considered to occlude the object.
# Returns
A numpy array of the mask.
"""
# Emphasizing an object turns it magenta and hides all other objects
# from view, we can find where the hand object is on the screen by
# emphasizing it and then scanning across the image for the magenta pixels.
if hide_all:
self.step({"action": "EmphasizeObject", "objectId": object_id})
else:
self.step({"action": "MaskObject", "objectId": object_id})
if hide_transparent:
self.step({"action": "HideTranslucentObjects"})
# noinspection PyShadowingBuiltins
filter = np.array([[[255, 0, 255]]])
object_pixels = 1 * np.all(self.current_frame == filter, axis=2)
if hide_all:
self.step({"action": "UnemphasizeAll"})
else:
self.step({"action": "UnmaskObject", "objectId": object_id})
if hide_transparent:
self.step({"action": "UnhideAllObjects"})
return object_pixels
def object_pixels_on_grid(
self,
object_id: str,
grid_shape: Tuple[int, int],
hide_all: bool = True,
hide_transparent: bool = False,
) -> np.ndarray:
"""Like `object_pixels_in_frame` but counts object pixels in a
partitioning of the image."""
def partition(n, num_parts):
m = n // num_parts
parts = [m] * num_parts
num_extra = n % num_parts
for k in range(num_extra):
parts[k] += 1
return parts
object_pixels = self.object_pixels_in_frame(
object_id=object_id, hide_all=hide_all, hide_transparent=hide_transparent
)
# Divide the current frame into a grid and count the number
# of hand object pixels in each of the grid squares
sums_in_blocks: List[List] = []
frame_shape = self.current_frame.shape[:2]
row_inds = np.cumsum([0] + partition(frame_shape[0], grid_shape[0]))
col_inds = np.cumsum([0] + partition(frame_shape[1], grid_shape[1]))
for i in range(len(row_inds) - 1):
sums_in_blocks.append([])
for j in range(len(col_inds) - 1):
sums_in_blocks[i].append(
np.sum(
object_pixels[
row_inds[i] : row_inds[i + 1], col_inds[j] : col_inds[j + 1]
]
)
)
return np.array(sums_in_blocks, dtype=np.float32)
def object_in_hand(self):
"""Object metadata for the object in the agent's hand."""
inv_objs = self.last_event.metadata["inventoryObjects"]
if len(inv_objs) == 0:
return None
elif len(inv_objs) == 1:
return self.get_object_by_id(
self.last_event.metadata["inventoryObjects"][0]["objectId"]
)
else:
raise AttributeError("Must be <= 1 inventory objects.")
@property
def initially_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that were
reachable after initially resetting."""
assert self._initially_reachable_points is not None
return copy.deepcopy(self._initially_reachable_points) # type:ignore
@property
def initially_reachable_points_set(self) -> Set[Tuple[float, float]]:
"""Set of (x,z) locations in the scene that were reachable after
initially resetting."""
if self._initially_reachable_points_set is None:
self._initially_reachable_points_set = set()
for p in self.initially_reachable_points:
self._initially_reachable_points_set.add(
self._agent_location_to_tuple(p)
)
return self._initially_reachable_points_set
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
self.step({"action": "GetReachablePositions"})
return self.last_event.metadata["actionReturn"] # type:ignore
def get_agent_location(self) -> Dict[str, Union[float, bool]]:
"""Gets agent's location."""
metadata = self.controller.last_event.metadata
location = {
"x": metadata["agent"]["position"]["x"],
"y": metadata["agent"]["position"]["y"],
"z": metadata["agent"]["position"]["z"],
"rotation": metadata["agent"]["rotation"]["y"],
"horizon": metadata["agent"]["cameraHorizon"],
"standing": metadata.get("isStanding", metadata["agent"].get("isStanding")),
}
return location
@staticmethod
def _agent_location_to_tuple(p: Dict[str, float]) -> Tuple[float, float]:
return round(p["x"], 2), round(p["z"], 2)
def _snap_agent_to_initially_reachable(self, verbose=True):
agent_location = self.get_agent_location()
end_location_tuple = self._agent_location_to_tuple(agent_location)
if end_location_tuple in self.initially_reachable_points_set:
return
agent_x = agent_location["x"]
agent_z = agent_location["z"]
closest_reachable_points = list(self.initially_reachable_points_set)
closest_reachable_points = sorted(
closest_reachable_points,
key=lambda xz: abs(xz[0] - agent_x) + abs(xz[1] - agent_z),
)
# In rare cases end_location_tuple might be not considered to be in self.initially_reachable_points_set
# even when it is, here we check for such cases.
if (
math.sqrt(
(
(
np.array(closest_reachable_points[0])
- np.array(end_location_tuple)
)
** 2
).sum()
)
< 1e-6
):
return
saved_last_action = self.last_action
saved_last_action_success = self.last_action_success
saved_last_action_return = self.last_action_return
saved_error_message = self.last_event.metadata["errorMessage"]
# Thor behaves weirdly when the agent gets off of the grid and you
# try to teleport the agent back to the closest grid location. To
# get around this we first teleport the agent to random location
# and then back to where it should be.
for point in self.initially_reachable_points:
if abs(agent_x - point["x"]) > 0.1 or abs(agent_z - point["z"]) > 0.1:
self.teleport_agent_to(
rotation=0,
horizon=30,
**point,
only_initially_reachable=False,
verbose=False,
)
if self.last_action_success:
break
for p in closest_reachable_points:
self.teleport_agent_to(
**{**agent_location, "x": p[0], "z": p[1]},
only_initially_reachable=False,
verbose=False,
)
if self.last_action_success:
break
teleport_forced = False
if not self.last_action_success:
self.teleport_agent_to(
**{
**agent_location,
"x": closest_reachable_points[0][0],
"z": closest_reachable_points[0][1],
},
force_action=True,
only_initially_reachable=False,
verbose=False,
)
teleport_forced = True
self.last_action = saved_last_action
self.last_action_success = saved_last_action_success
self.last_action_return = saved_last_action_return
self.last_event.metadata["errorMessage"] = saved_error_message
new_agent_location = self.get_agent_location()
if verbose:
get_logger().warning(
(
"In {}, at location (x,z)=({},{}) which is not in the set "
"of initially reachable points;"
" attempting to correct this: agent teleported to (x,z)=({},{}).\n"
"Teleportation {} forced."
).format(
self.scene_name,
agent_x,
agent_z,
new_agent_location["x"],
new_agent_location["z"],
"was" if teleport_forced else "wasn't",
)
)
def step(
self,
action_dict: Optional[Dict[str, Union[str, int, float, Dict]]] = None,
**kwargs: Union[str, int, float, Dict],
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
if action_dict is None:
action_dict = dict()
action_dict.update(kwargs)
action = cast(str, action_dict["action"])
skip_render = "renderImage" in action_dict and not action_dict["renderImage"]
last_frame: Optional[np.ndarray] = None
if skip_render:
last_frame = self.current_frame
if self.simplify_physics:
action_dict["simplifyOPhysics"] = True
if "Move" in action and "Hand" not in action: # type: ignore
action_dict = {
**action_dict,
"moveMagnitude": self._move_mag,
} # type: ignore
start_location = self.get_agent_location()
sr = self.controller.step(action_dict)
if self.restrict_to_initially_reachable_points:
end_location_tuple = self._agent_location_to_tuple(
self.get_agent_location()
)
if end_location_tuple not in self.initially_reachable_points_set:
self.teleport_agent_to(**start_location, force_action=True) # type: ignore
self.last_action = action
self.last_action_success = False
self.last_event.metadata[
"errorMessage"
] = "Moved to location outside of initially reachable points."
elif "RandomizeHideSeekObjects" in action:
last_position = self.get_agent_location()
self.controller.step(action_dict)
metadata = self.last_event.metadata
if self.position_dist(last_position, self.get_agent_location()) > 0.001:
self.teleport_agent_to(**last_position, force_action=True) # type: ignore
get_logger().warning(
"In scene {}, after randomization of hide and seek objects, agent moved.".format(
self.scene_name
)
)
sr = self.controller.step({"action": "GetReachablePositions"})
self._initially_reachable_points = self.controller.last_event.metadata[
"actionReturn"
]
self._initially_reachable_points_set = None
self.last_action = action
self.last_action_success = metadata["lastActionSuccess"]
self.controller.last_event.metadata["actionReturn"] = []
elif "RotateUniverse" in action:
sr = self.controller.step(action_dict)
metadata = self.last_event.metadata
if metadata["lastActionSuccess"]:
sr = self.controller.step({"action": "GetReachablePositions"})
self._initially_reachable_points = self.controller.last_event.metadata[
"actionReturn"
]
self._initially_reachable_points_set = None
self.last_action = action
self.last_action_success = metadata["lastActionSuccess"]
self.controller.last_event.metadata["actionReturn"] = []
else:
sr = self.controller.step(action_dict)
if self.restrict_to_initially_reachable_points:
self._snap_agent_to_initially_reachable()
if skip_render:
assert last_frame is not None
self.last_event.frame = last_frame
return sr
@staticmethod
def position_dist(
p0: Mapping[str, Any],
p1: Mapping[str, Any],
ignore_y: bool = False,
l1_dist: bool = False,
) -> float:
"""Distance between two points of the form {"x": x, "y":y, "z":z"}."""
if l1_dist:
return (
abs(p0["x"] - p1["x"])
+ (0 if ignore_y else abs(p0["y"] - p1["y"]))
+ abs(p0["z"] - p1["z"])
)
else:
return math.sqrt(
(p0["x"] - p1["x"]) ** 2
+ (0 if ignore_y else (p0["y"] - p1["y"]) ** 2)
+ (p0["z"] - p1["z"]) ** 2
)
@staticmethod
def rotation_dist(a: Dict[str, float], b: Dict[str, float]):
"""Distance between rotations."""
def deg_dist(d0: float, d1: float):
dist = (d0 - d1) % 360
return min(dist, 360 - dist)
return sum(deg_dist(a[k], b[k]) for k in ["x", "y", "z"])
@staticmethod
def angle_between_rotations(a: Dict[str, float], b: Dict[str, float]):
return np.abs(
(180 / (2 * math.pi))
* (
Rotation.from_euler("xyz", [a[k] for k in "xyz"], degrees=True)
* Rotation.from_euler("xyz", [b[k] for k in "xyz"], degrees=True).inv()
).as_rotvec()
).sum()
def closest_object_with_properties(
self, properties: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
"""Find the object closest to the agent that has the given
properties."""
agent_pos = self.controller.last_event.metadata["agent"]["position"]
min_dist = float("inf")
closest = None
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
d = self.position_dist(agent_pos, o["position"])
if d < min_dist:
min_dist = d
closest = o
return closest
def closest_visible_object_of_type(
self, object_type: str
) -> Optional[Dict[str, Any]]:
"""Find the object closest to the agent that is visible and has the
given type."""
properties = {"visible": True, "objectType": object_type}
return self.closest_object_with_properties(properties)
def closest_object_of_type(self, object_type: str) -> Optional[Dict[str, Any]]:
"""Find the object closest to the agent that has the given type."""
properties = {"objectType": object_type}
return self.closest_object_with_properties(properties)
def closest_reachable_point_to_position(
self, position: Dict[str, float]
) -> Tuple[Dict[str, float], float]:
"""Of all reachable positions, find the one that is closest to the
given location."""
target = np.array([position["x"], position["z"]])
min_dist = float("inf")
closest_point = None
for pt in self.initially_reachable_points:
dist = np.linalg.norm(target - np.array([pt["x"], pt["z"]]))
if dist < min_dist:
closest_point = pt
min_dist = dist
if min_dist < 1e-3:
break
assert closest_point is not None
return closest_point, min_dist
@staticmethod
def _angle_from_to(a_from: float, a_to: float) -> float:
a_from = a_from % 360
a_to = a_to % 360
min_rot = min(a_from, a_to)
max_rot = max(a_from, a_to)
rot_across_0 = (360 - max_rot) + min_rot
rot_not_across_0 = max_rot - min_rot
rot_err = min(rot_across_0, rot_not_across_0)
if rot_across_0 == rot_err:
rot_err *= -1 if a_to > a_from else 1
else:
rot_err *= 1 if a_to > a_from else -1
return rot_err
def agent_xz_to_scene_xz(self, agent_xz: Dict[str, float]) -> Dict[str, float]:
agent_pos = self.get_agent_location()
x_rel_agent = agent_xz["x"]
z_rel_agent = agent_xz["z"]
scene_x = agent_pos["x"]
scene_z = agent_pos["z"]
rotation = agent_pos["rotation"]
if abs(rotation) < 1e-5:
scene_x += x_rel_agent
scene_z += z_rel_agent
elif abs(rotation - 90) < 1e-5:
scene_x += z_rel_agent
scene_z += -x_rel_agent
elif abs(rotation - 180) < 1e-5:
scene_x += -x_rel_agent
scene_z += -z_rel_agent
elif abs(rotation - 270) < 1e-5:
scene_x += -z_rel_agent
scene_z += x_rel_agent
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
return {"x": scene_x, "z": scene_z}
def scene_xz_to_agent_xz(self, scene_xz: Dict[str, float]) -> Dict[str, float]:
agent_pos = self.get_agent_location()
x_err = scene_xz["x"] - agent_pos["x"]
z_err = scene_xz["z"] - agent_pos["z"]
rotation = agent_pos["rotation"]
if abs(rotation) < 1e-5:
agent_x = x_err
agent_z = z_err
elif abs(rotation - 90) < 1e-5:
agent_x = -z_err
agent_z = x_err
elif abs(rotation - 180) < 1e-5:
agent_x = -x_err
agent_z = -z_err
elif abs(rotation - 270) < 1e-5:
agent_x = z_err
agent_z = -x_err
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
return {"x": agent_x, "z": agent_z}
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.controller.last_event.metadata["objects"]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
def get_object_by_id(self, object_id: str) -> Optional[Dict[str, Any]]:
for o in self.last_event.metadata["objects"]:
if o["objectId"] == object_id:
return o
return None
###
# Following is used for computing shortest paths between states
###
_CACHED_GRAPHS: Dict[str, nx.DiGraph] = {}
GRAPH_ACTIONS_SET = {"LookUp", "LookDown", "RotateLeft", "RotateRight", "MoveAhead"}
def reachable_points_with_rotations_and_horizons(self):
self.controller.step({"action": "GetReachablePositions"})
assert self.last_action_success
points_slim = self.last_event.metadata["actionReturn"]
points = []
for r in [0, 90, 180, 270]:
for horizon in [-30, 0, 30, 60]:
for p in points_slim:
p = copy.copy(p)
p["rotation"] = r
p["horizon"] = horizon
points.append(p)
return points
@staticmethod
def location_for_key(key, y_value=0.0):
x, z, rot, hor = key
loc = dict(x=x, y=y_value, z=z, rotation=rot, horizon=hor)
return loc
@staticmethod
def get_key(input_dict: Dict[str, Any]) -> Tuple[float, float, int, int]:
if "x" in input_dict:
x = input_dict["x"]
z = input_dict["z"]
rot = input_dict["rotation"]
hor = input_dict["horizon"]
else:
x = input_dict["position"]["x"]
z = input_dict["position"]["z"]
rot = input_dict["rotation"]["y"]
hor = input_dict["cameraHorizon"]
return (
round(x, 2),
round(z, 2),
round_to_factor(rot, 90) % 360,
round_to_factor(hor, 30) % 360,
)
def update_graph_with_failed_action(self, failed_action: str):
if (
self.scene_name not in self._CACHED_GRAPHS
or failed_action not in self.GRAPH_ACTIONS_SET
):
return
source_key = self.get_key(self.last_event.metadata["agent"])
self._check_contains_key(source_key)
edge_dict = self.graph[source_key]
to_remove_key = None
for target_key in self.graph[source_key]:
if edge_dict[target_key]["action"] == failed_action:
to_remove_key = target_key
break
if to_remove_key is not None:
self.graph.remove_edge(source_key, to_remove_key)
def _add_from_to_edge(
self,
g: nx.DiGraph,
s: Tuple[float, float, int, int],
t: Tuple[float, float, int, int],
):
def ae(x, y):
return abs(x - y) < 0.001
s_x, s_z, s_rot, s_hor = s
t_x, t_z, t_rot, t_hor = t
dist = round(math.sqrt((s_x - t_x) ** 2 + (s_z - t_z) ** 2), 2)
angle_dist = (round_to_factor(t_rot - s_rot, 90) % 360) // 90
horz_dist = (round_to_factor(t_hor - s_hor, 30) % 360) // 30
# If source and target differ by more than one action, continue
if sum(x != 0 for x in [dist, angle_dist, horz_dist]) != 1:
return
grid_size = self._grid_size
action = None
if angle_dist != 0:
if angle_dist == 1:
action = "RotateRight"
elif angle_dist == 3:
action = "RotateLeft"
elif horz_dist != 0:
if horz_dist == 11:
action = "LookUp"
elif horz_dist == 1:
action = "LookDown"
elif ae(dist, grid_size):
if (
(s_rot == 0 and ae(t_z - s_z, grid_size))
or (s_rot == 90 and ae(t_x - s_x, grid_size))
or (s_rot == 180 and ae(t_z - s_z, -grid_size))
or (s_rot == 270 and ae(t_x - s_x, -grid_size))
):
g.add_edge(s, t, action="MoveAhead")
if action is not None:
g.add_edge(s, t, action=action)
@functools.lru_cache(1)
def possible_neighbor_offsets(self) -> Tuple[Tuple[float, float, int, int], ...]:
grid_size = round(self._grid_size, 2)
offsets = []
for rot_diff in [-90, 0, 90]:
for horz_diff in [-30, 0, 30, 60]:
for x_diff in [-grid_size, 0, grid_size]:
for z_diff in [-grid_size, 0, grid_size]:
if (rot_diff != 0) + (horz_diff != 0) + (x_diff != 0) + (
z_diff != 0
) == 1:
offsets.append((x_diff, z_diff, rot_diff, horz_diff))
return tuple(offsets)
def _add_node_to_graph(self, graph: nx.DiGraph, s: Tuple[float, float, int, int]):
if s in graph:
return
existing_nodes = set(graph.nodes())
graph.add_node(s)
for o in self.possible_neighbor_offsets():
t = (s[0] + o[0], s[1] + o[1], s[2] + o[2], s[3] + o[3])
if t in existing_nodes:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
@property
def graph(self):
if self.scene_name not in self._CACHED_GRAPHS:
g = nx.DiGraph()
points = self.reachable_points_with_rotations_and_horizons()
for p in points:
self._add_node_to_graph(g, self.get_key(p))
self._CACHED_GRAPHS[self.scene_name] = g
return self._CACHED_GRAPHS[self.scene_name]
@graph.setter
def graph(self, g):
self._CACHED_GRAPHS[self.scene_name] = g
def _check_contains_key(self, key: Tuple[float, float, int, int], add_if_not=True):
if key not in self.graph:
get_logger().warning(
"{} was not in the graph for scene {}.".format(key, self.scene_name)
)
if add_if_not:
self._add_node_to_graph(self.graph, key)
def shortest_state_path(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
# noinspection PyBroadException
try:
path = nx.shortest_path(self.graph, source_state_key, goal_state_key)
return path
except Exception as _:
return None
def action_transitioning_between_keys(self, s, t):
self._check_contains_key(s)
self._check_contains_key(t)
if self.graph.has_edge(s, t):
return self.graph.get_edge_data(s, t)["action"]
else:
return None
def shortest_path_next_state(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
if source_state_key == goal_state_key:
raise RuntimeError("called next state on the same source and goal state")
state_path = self.shortest_state_path(source_state_key, goal_state_key)
return state_path[1]
def shortest_path_next_action(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
next_state_key = self.shortest_path_next_state(source_state_key, goal_state_key)
return self.graph.get_edge_data(source_state_key, next_state_key)["action"]
def shortest_path_length(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
try:
return nx.shortest_path_length(self.graph, source_state_key, goal_state_key)
except nx.NetworkXNoPath as _:
return float("inf")
| ask4help-main | allenact_plugins/ithor_plugin/ithor_environment.py |
ask4help-main | allenact_plugins/ithor_plugin/__init__.py |
|
"""Common constants used when training agents to complete tasks in iTHOR, the
interactive version of AI2-THOR."""
from collections import OrderedDict
from typing import Set, Dict
MOVE_AHEAD = "MoveAhead"
ROTATE_LEFT = "RotateLeft"
ROTATE_RIGHT = "RotateRight"
LOOK_DOWN = "LookDown"
LOOK_UP = "LookUp"
END = "End"
VISIBILITY_DISTANCE = 1.25
FOV = 90.0
ORDERED_SCENE_TYPES = ("kitchens", "livingrooms", "bedrooms", "bathrooms")
NUM_SCENE_TYPES = len(ORDERED_SCENE_TYPES)
def make_scene_name(type_ind, scene_num):
if type_ind == 1:
return "FloorPlan" + str(scene_num) + "_physics"
elif scene_num < 10:
return "FloorPlan" + str(type_ind) + "0" + str(scene_num) + "_physics"
else:
return "FloorPlan" + str(type_ind) + str(scene_num) + "_physics"
SCENES_TYPE_TO_SCENE_NAMES = OrderedDict(
[
(
ORDERED_SCENE_TYPES[type_ind - 1],
tuple(
make_scene_name(type_ind=type_ind, scene_num=scene_num)
for scene_num in range(1, 31)
),
)
for type_ind in range(1, NUM_SCENE_TYPES + 1)
]
)
SCENES_TYPE_TO_TRAIN_SCENE_NAMES = OrderedDict(
(key, scenes[:20]) for key, scenes in SCENES_TYPE_TO_SCENE_NAMES.items()
)
SCENES_TYPE_TO_VALID_SCENE_NAMES = OrderedDict(
(key, scenes[20:25]) for key, scenes in SCENES_TYPE_TO_SCENE_NAMES.items()
)
SCENES_TYPE_TO_TEST_SCENE_NAMES = OrderedDict(
(key, scenes[25:30]) for key, scenes in SCENES_TYPE_TO_SCENE_NAMES.items()
)
ALL_SCENE_NAMES = sum(SCENES_TYPE_TO_SCENE_NAMES.values(), tuple())
TRAIN_SCENE_NAMES = sum(
(scenes for scenes in SCENES_TYPE_TO_TRAIN_SCENE_NAMES.values()), tuple()
)
VALID_SCENE_NAMES = sum(
(scenes for scenes in SCENES_TYPE_TO_VALID_SCENE_NAMES.values()), tuple()
)
TEST_SCENE_NAMES = sum(
(scenes for scenes in SCENES_TYPE_TO_TEST_SCENE_NAMES.values()), tuple()
)
TRAIN_SCENE_NAMES_SET = set(TRAIN_SCENE_NAMES)
VALID_SCENE_NAMES_SET = set(VALID_SCENE_NAMES)
TEST_SCENE_NAMES_SET = set(TEST_SCENE_NAMES)
_object_type_and_location_tsv = """
AlarmClock bedrooms
Apple kitchens
ArmChair livingrooms,bedrooms
BaseballBat bedrooms
BasketBall bedrooms
Bathtub bathrooms
BathtubBasin bathrooms
Bed bedrooms
Blinds kitchens,bedrooms
Book kitchens,livingrooms,bedrooms
Boots livingrooms,bedrooms
Bottle kitchens
Bowl kitchens,livingrooms,bedrooms
Box livingrooms,bedrooms
Bread kitchens
ButterKnife kitchens
Cabinet kitchens,livingrooms,bedrooms,bathrooms
Candle livingrooms,bathrooms
Cart bathrooms
CD bedrooms
CellPhone kitchens,livingrooms,bedrooms
Chair kitchens,livingrooms,bedrooms
Cloth bedrooms,bathrooms
CoffeeMachine kitchens
CoffeeTable livingrooms,bedrooms
CounterTop kitchens,livingrooms,bedrooms,bathrooms
CreditCard kitchens,livingrooms,bedrooms
Cup kitchens
Curtains kitchens,livingrooms,bedrooms
Desk bedrooms
DeskLamp livingrooms,bedrooms
DiningTable kitchens,livingrooms,bedrooms
DishSponge kitchens,bathrooms
Drawer kitchens,livingrooms,bedrooms,bathrooms
Dresser livingrooms,bedrooms,bathrooms
Egg kitchens
Faucet kitchens,bathrooms
FloorLamp livingrooms,bedrooms
Footstool bedrooms
Fork kitchens
Fridge kitchens
GarbageCan kitchens,livingrooms,bedrooms,bathrooms
HandTowel bathrooms
HandTowelHolder bathrooms
HousePlant kitchens,livingrooms,bedrooms,bathrooms
Kettle kitchens
KeyChain livingrooms,bedrooms
Knife kitchens
Ladle kitchens
Laptop kitchens,livingrooms,bedrooms
LaundryHamper bedrooms
LaundryHamperLid bedrooms
Lettuce kitchens
LightSwitch kitchens,livingrooms,bedrooms,bathrooms
Microwave kitchens
Mirror kitchens,livingrooms,bedrooms,bathrooms
Mug kitchens,bedrooms
Newspaper livingrooms
Ottoman livingrooms,bedrooms
Painting kitchens,livingrooms,bedrooms,bathrooms
Pan kitchens
PaperTowel kitchens,bathrooms
Pen kitchens,livingrooms,bedrooms
Pencil kitchens,livingrooms,bedrooms
PepperShaker kitchens
Pillow livingrooms,bedrooms
Plate kitchens,livingrooms
Plunger bathrooms
Poster bedrooms
Pot kitchens
Potato kitchens
RemoteControl livingrooms,bedrooms
Safe kitchens,livingrooms,bedrooms
SaltShaker kitchens
ScrubBrush bathrooms
Shelf kitchens,livingrooms,bedrooms,bathrooms
ShowerCurtain bathrooms
ShowerDoor bathrooms
ShowerGlass bathrooms
ShowerHead bathrooms
SideTable livingrooms,bedrooms
Sink kitchens,bathrooms
SinkBasin kitchens,bathrooms
SoapBar bathrooms
SoapBottle kitchens,bathrooms
Sofa livingrooms,bedrooms
Spatula kitchens
Spoon kitchens
SprayBottle bathrooms
Statue kitchens,livingrooms,bedrooms
StoveBurner kitchens
StoveKnob kitchens
TeddyBear bedrooms
Television livingrooms,bedrooms
TennisRacket bedrooms
TissueBox livingrooms,bedrooms,bathrooms
Toaster kitchens
Toilet bathrooms
ToiletPaper bathrooms
ToiletPaperHanger bathrooms
Tomato kitchens
Towel bathrooms
TowelHolder bathrooms
TVStand livingrooms
Vase kitchens,livingrooms,bedrooms
Watch livingrooms,bedrooms
WateringCan livingrooms
Window kitchens,livingrooms,bedrooms,bathrooms
WineBottle kitchens
"""
OBJECT_TYPE_TO_SCENE_TYPES = OrderedDict()
for ot_tab_scene_types in _object_type_and_location_tsv.split("\n"):
if ot_tab_scene_types != "":
ot, scene_types_csv = ot_tab_scene_types.split("\t")
OBJECT_TYPE_TO_SCENE_TYPES[ot] = tuple(sorted(scene_types_csv.split(",")))
SCENE_TYPE_TO_OBJECT_TYPES: Dict[str, Set[str]] = OrderedDict(
((k, set()) for k in ORDERED_SCENE_TYPES)
)
for ot_tab_scene_types in _object_type_and_location_tsv.split("\n"):
if ot_tab_scene_types != "":
ot, scene_types_csv = ot_tab_scene_types.split("\t")
for scene_type in scene_types_csv.split(","):
SCENE_TYPE_TO_OBJECT_TYPES[scene_type].add(ot)
| ask4help-main | allenact_plugins/ithor_plugin/ithor_constants.py |
import glob
import math
import os
import platform
from contextlib import contextmanager
from typing import Sequence
import Xlib
import Xlib.display
import ai2thor.controller
@contextmanager
def include_object_data(controller: ai2thor.controller.Controller):
needs_reset = len(controller.last_event.metadata["objects"]) == 0
try:
if needs_reset:
controller.step("ResetObjectFilter")
assert controller.last_event.metadata["lastActionSuccess"]
yield None
finally:
if needs_reset:
controller.step("SetObjectFilter", objectIds=[])
assert controller.last_event.metadata["lastActionSuccess"]
def vertical_to_horizontal_fov(
vertical_fov_in_degrees: float, height: float, width: float
):
assert 0 < vertical_fov_in_degrees < 180
aspect_ratio = width / height
vertical_fov_in_rads = (math.pi / 180) * vertical_fov_in_degrees
return (
(180 / math.pi)
* math.atan(math.tan(vertical_fov_in_rads * 0.5) * aspect_ratio)
* 2
)
def horizontal_to_vertical_fov(
horizontal_fov_in_degrees: float, height: float, width: float
):
return vertical_to_horizontal_fov(
vertical_fov_in_degrees=horizontal_fov_in_degrees, height=width, width=height,
)
def round_to_factor(num: float, base: int) -> int:
"""Rounds floating point number to the nearest integer multiple of the
given base. E.g., for floating number 90.1 and integer base 45, the result
is 90.
# Attributes
num : floating point number to be rounded.
base: integer base
"""
return round(num / base) * base
def get_open_x_displays(throw_error_if_empty: bool = False) -> Sequence[str]:
assert platform.system() == "Linux", "Can only get X-displays for Linux systems."
displays = []
open_display_strs = [
os.path.basename(s)[1:] for s in glob.glob("/tmp/.X11-unix/X*")
]
for open_display_str in sorted(open_display_strs):
try:
open_display_str = str(int(open_display_str))
except Exception:
continue
display = Xlib.display.Display(":{}".format(open_display_str))
displays.extend(
[f"{open_display_str}.{i}" for i in range(display.screen_count())]
)
if throw_error_if_empty and len(displays) == 0:
raise IOError(
"Could not find any open X-displays on which to run AI2-THOR processes. "
" Please see the AI2-THOR installation instructions at"
" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin"
" for information as to how to start such displays."
)
return displays
| ask4help-main | allenact_plugins/ithor_plugin/ithor_util.py |
import copy
from typing import Any, Dict, Optional, Union, Sequence
import ai2thor.controller
import gym
import gym.spaces
import numpy as np
import torch
from allenact.base_abstractions.sensor import Sensor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor
from allenact.base_abstractions.task import Task
from allenact.embodiedai.mapping.mapping_utils.map_builders import (
BinnedPointCloudMapBuilder,
SemanticMapBuilder,
ObjectHull2d,
)
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_tasks import ObjectNaviThorGridTask
from allenact_plugins.ithor_plugin.ithor_util import include_object_data
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask, ObjectNavTask
class RGBSensorThor(
RGBSensor[
Union[IThorEnvironment, RoboThorEnvironment],
Union[Task[IThorEnvironment], Task[RoboThorEnvironment]],
]
):
"""Sensor for RGB images in THOR.
Returns from a running IThorEnvironment instance, the current RGB
frame corresponding to the agent's egocentric view.
"""
def frame_from_env(
self, env: IThorEnvironment, task: Task[IThorEnvironment]
) -> np.ndarray: # type:ignore
return env.current_frame.copy()
class GoalObjectTypeThorSensor(Sensor):
def __init__(
self,
object_types: Sequence[str],
target_to_detector_map: Optional[Dict[str, str]] = None,
detector_types: Optional[Sequence[str]] = None,
uuid: str = "goal_object_type_ind",
**kwargs: Any,
):
self.ordered_object_types = list(object_types)
assert self.ordered_object_types == sorted(
self.ordered_object_types
), "object types input to goal object type sensor must be ordered"
self.target_to_detector_map = target_to_detector_map
if target_to_detector_map is None:
self.object_type_to_ind = {
ot: i for i, ot in enumerate(self.ordered_object_types)
}
else:
assert (
detector_types is not None
), "Missing detector_types for map {}".format(target_to_detector_map)
self.target_to_detector = target_to_detector_map
self.detector_types = detector_types
detector_index = {ot: i for i, ot in enumerate(self.detector_types)}
self.object_type_to_ind = {
ot: detector_index[self.target_to_detector[ot]]
for ot in self.ordered_object_types
}
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
if self.target_to_detector_map is None:
return gym.spaces.Discrete(len(self.ordered_object_types))
else:
return gym.spaces.Discrete(len(self.detector_types))
def get_observation(
self,
env: IThorEnvironment,
task: Optional[ObjectNaviThorGridTask],
*args: Any,
**kwargs: Any,
) -> Any:
return self.object_type_to_ind[task.task_info["object_type"]]
class TakeEndActionThorNavSensor(
Sensor[
Union[RoboThorEnvironment, IThorEnvironment],
Union[ObjectNaviThorGridTask, ObjectNavTask, PointNavTask],
]
):
def __init__(self, nactions: int, uuid: str, **kwargs: Any) -> None:
self.nactions = nactions
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.spaces.Discrete:
"""The observation space.
Equals `gym.spaces.Discrete(2)` where a 0 indicates that the agent
**should not** take the `End` action and a 1 indicates that the agent
**should** take the end action.
"""
return gym.spaces.Discrete(2)
def get_observation( # type:ignore
self,
env: IThorEnvironment,
task: Union[ObjectNaviThorGridTask, ObjectNavTask, PointNavTask],
*args,
**kwargs,
) -> np.ndarray:
if isinstance(task, ObjectNaviThorGridTask):
should_end = task.is_goal_object_visible()
elif isinstance(task, ObjectNavTask):
should_end = task._is_goal_in_range()
elif isinstance(task, PointNavTask):
should_end = task._is_goal_in_range()
else:
raise NotImplementedError
if should_end is None:
should_end = False
return np.array([1 * should_end], dtype=np.int64)
class RelativePositionChangeTHORSensor(
Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]
):
def __init__(self, uuid: str = "rel_position_change", **kwargs: Any):
observation_space = gym.spaces.Dict(
{
"last_allocentric_position": gym.spaces.Box(
low=np.array([-np.inf, -np.inf, 0], dtype=np.float32),
high=np.array([np.inf, np.inf, 360], dtype=np.float32),
shape=(3,),
dtype=np.float32,
),
"dx_dz_dr": gym.spaces.Box(
low=np.array([-np.inf, -np.inf, -360], dtype=np.float32),
high=np.array([-np.inf, -np.inf, 360], dtype=np.float32),
shape=(3,),
dtype=np.float32,
),
}
)
super().__init__(**prepare_locals_for_super(locals()))
self.last_xzr: Optional[np.ndarray] = None
@staticmethod
def get_relative_position_change(from_xzr: np.ndarray, to_xzr: np.ndarray):
dx_dz_dr = to_xzr - from_xzr
# Transform dx, dz (in global coordinates) into the relative coordinates
# given by rotation r0=from_xzr[-2]. This requires rotating everything so that
# r0 is facing in the positive z direction. Since thor rotations are negative
# the usual rotation direction this means we want to rotate by r0 degrees.
theta = np.pi * from_xzr[-1] / 180
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
dx_dz_dr = (
np.array(
[
[cos_theta, -sin_theta, 0],
[sin_theta, cos_theta, 0],
[0, 0, 1], # Don't change dr
]
)
@ dx_dz_dr.reshape(-1, 1)
).reshape(-1)
dx_dz_dr[-1] = dx_dz_dr[-1] % 360
return dx_dz_dr
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
if task.num_steps_taken() == 0:
p = env.controller.last_event.metadata["agent"]["position"]
r = env.controller.last_event.metadata["agent"]["rotation"]["y"]
self.last_xzr = np.array([p["x"], p["z"], r % 360])
p = env.controller.last_event.metadata["agent"]["position"]
r = env.controller.last_event.metadata["agent"]["rotation"]["y"]
current_xzr = np.array([p["x"], p["z"], r % 360])
dx_dz_dr = self.get_relative_position_change(
from_xzr=self.last_xzr, to_xzr=current_xzr
)
to_return = {"last_allocentric_position": self.last_xzr, "dx_dz_dr": dx_dz_dr}
self.last_xzr = current_xzr
return to_return
class ReachableBoundsTHORSensor(Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
def __init__(self, margin: float, uuid: str = "scene_bounds", **kwargs: Any):
observation_space = gym.spaces.Dict(
{
"x_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf], dtype=np.float32),
high=np.array([np.inf, np.inf], dtype=np.float32),
shape=(2,),
dtype=np.float32,
),
"z_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf], dtype=np.float32),
high=np.array([np.inf, np.inf], dtype=np.float32),
shape=(2,),
dtype=np.float32,
),
}
)
super().__init__(**prepare_locals_for_super(locals()))
self.margin = margin
self._bounds_cache = {}
@staticmethod
def get_bounds(
controller: ai2thor.controller.Controller, margin: float,
) -> Dict[str, np.ndarray]:
positions = controller.step("GetReachablePositions").metadata["actionReturn"]
min_x = min(p["x"] for p in positions)
max_x = max(p["x"] for p in positions)
min_z = min(p["z"] for p in positions)
max_z = max(p["z"] for p in positions)
return {
"x_range": np.array([min_x - margin, max_x + margin]),
"z_range": np.array([min_z - margin, max_z + margin]),
}
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
scene_name = env.controller.last_event.metadata["sceneName"]
if scene_name not in self._bounds_cache:
self._bounds_cache[scene_name] = self.get_bounds(
controller=env.controller, margin=self.margin
)
return copy.deepcopy(self._bounds_cache[scene_name])
class SceneBoundsTHORSensor(Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
def __init__(self, uuid: str = "scene_bounds", **kwargs: Any):
observation_space = gym.spaces.Dict(
{
"x_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf]),
high=np.array([np.inf, np.inf]),
shape=(2,),
dtype=np.float32,
),
"z_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf]),
high=np.array([np.inf, np.inf]),
shape=(2,),
dtype=np.float32,
),
}
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
scene_bounds = env.controller.last_event.metadata["sceneBounds"]
center = scene_bounds["center"]
size = scene_bounds["size"]
return {
"x_range": np.array(
[center["x"] - size["x"] / 2, center["x"] + size["x"] / 2]
),
"z_range": np.array(
[center["z"] - size["z"] / 2, center["z"] + size["z"] / 2]
),
}
class BinnedPointCloudMapTHORSensor(
Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]
):
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
map_range_sensor: Sensor,
height_bins: Sequence[float] = (0.02, 2),
ego_only: bool = True,
uuid: str = "binned_pc_map",
**kwargs: Any,
):
self.fov = fov
self.vision_range_in_cm = vision_range_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.height_bins = height_bins
self.ego_only = ego_only
self.binned_pc_map_builder = BinnedPointCloudMapBuilder(
fov=fov,
vision_range_in_cm=vision_range_in_cm,
map_size_in_cm=map_size_in_cm,
resolution_in_cm=resolution_in_cm,
height_bins=height_bins,
)
map_space = gym.spaces.Box(
low=0,
high=np.inf,
shape=self.binned_pc_map_builder.binned_point_cloud_map.shape,
dtype=np.float32,
)
space_dict = {
"egocentric_update": map_space,
}
if not ego_only:
space_dict["allocentric_update"] = copy.deepcopy(map_space)
space_dict["map"] = copy.deepcopy(map_space)
observation_space = gym.spaces.Dict(space_dict)
super().__init__(**prepare_locals_for_super(locals()))
self.map_range_sensor = map_range_sensor
@property
def device(self):
return self.binned_pc_map_builder.device
@device.setter
def device(self, val: torch.device):
self.binned_pc_map_builder.device = torch.device(val)
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
e = env.controller.last_event
metadata = e.metadata
if task.num_steps_taken() == 0:
xz_ranges_dict = self.map_range_sensor.get_observation(env=env, task=task)
self.binned_pc_map_builder.reset(
min_xyz=np.array(
[
xz_ranges_dict["x_range"][0],
0, # TODO: Should y be different per scene?
xz_ranges_dict["z_range"][0],
]
)
)
map_dict = self.binned_pc_map_builder.update(
depth_frame=e.depth_frame,
camera_xyz=np.array(
[metadata["cameraPosition"][k] for k in ["x", "y", "z"]]
),
camera_rotation=metadata["agent"]["rotation"]["y"],
camera_horizon=metadata["agent"]["cameraHorizon"],
)
return {k: map_dict[k] for k in self.observation_space.spaces.keys()}
class SemanticMapTHORSensor(Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
ordered_object_types: Sequence[str],
map_range_sensor: Sensor,
ego_only: bool = True,
uuid: str = "semantic_map",
device: torch.device = torch.device("cpu"),
**kwargs: Any,
):
self.fov = fov
self.vision_range_in_cm = vision_range_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.ordered_object_types = ordered_object_types
self.map_range_sensor = map_range_sensor
self.ego_only = ego_only
self.semantic_map_builder = SemanticMapBuilder(
fov=fov,
vision_range_in_cm=vision_range_in_cm,
map_size_in_cm=map_size_in_cm,
resolution_in_cm=resolution_in_cm,
ordered_object_types=ordered_object_types,
device=device,
)
def get_map_space(nchannels: int, size: int):
return gym.spaces.Box(
low=0, high=1, shape=(size, size, nchannels), dtype=np.bool,
)
n = len(self.ordered_object_types)
small = self.vision_range_in_cm // self.resolution_in_cm
big = self.semantic_map_builder.ground_truth_semantic_map.shape[0]
space_dict = {
"egocentric_update": get_map_space(nchannels=n, size=small,),
"egocentric_mask": get_map_space(nchannels=1, size=small,),
}
if not ego_only:
space_dict["explored_mask"] = get_map_space(nchannels=1, size=big,)
space_dict["map"] = get_map_space(nchannels=n, size=big,)
observation_space = gym.spaces.Dict(space_dict)
super().__init__(**prepare_locals_for_super(locals()))
@property
def device(self):
return self.semantic_map_builder.device
@device.setter
def device(self, val: torch.device):
self.semantic_map_builder.device = torch.device(val)
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
with include_object_data(env.controller):
last_event = env.controller.last_event
metadata = last_event.metadata
if task.num_steps_taken() == 0:
env.controller.step(
"Get2DSemanticHulls", objectTypes=self.ordered_object_types
)
assert env.last_event.metadata[
"lastActionSuccess"
], f"Get2DSemanticHulls failed with error '{env.last_event.metadata['lastActionSuccess']}'"
object_id_to_hull = env.controller.last_event.metadata["actionReturn"]
xz_ranges_dict = self.map_range_sensor.get_observation(
env=env, task=task
)
self.semantic_map_builder.reset(
min_xyz=np.array(
[
xz_ranges_dict["x_range"][0],
0, # TODO: Should y be different per scene?
xz_ranges_dict["z_range"][0],
]
),
object_hulls=[
ObjectHull2d(
object_id=o["objectId"],
object_type=o["objectType"],
hull_points=object_id_to_hull[o["objectId"]],
)
for o in metadata["objects"]
if o["objectId"] in object_id_to_hull
],
)
map_dict = self.semantic_map_builder.update(
depth_frame=last_event.depth_frame,
camera_xyz=np.array(
[metadata["cameraPosition"][k] for k in ["x", "y", "z"]]
),
camera_rotation=metadata["agent"]["rotation"]["y"],
camera_horizon=metadata["agent"]["cameraHorizon"],
)
return {
k: map_dict[k] > 0.001 if map_dict[k].dtype != np.bool else map_dict[k]
for k in self.observation_space.spaces.keys()
}
| ask4help-main | allenact_plugins/ithor_plugin/ithor_sensors.py |
import copy
import json
import math
import os
from typing import Tuple, Sequence, Union, Dict, Optional, Any, cast, Generator, List
import cv2
import numpy as np
from PIL import Image, ImageDraw
from ai2thor.controller import Controller
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
import colour as col
from allenact.utils.system import get_logger
from allenact.utils.viz_utils import TrajectoryViz
ITHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR = os.path.join(
os.path.expanduser("~"), ".allenact", "ithor", "top_down_viz_cache"
)
class ThorPositionTo2DFrameTranslator(object):
def __init__(
self,
frame_shape_rows_cols: Tuple[int, int],
cam_position: Sequence[float],
orth_size: float,
):
self.frame_shape = frame_shape_rows_cols
self.lower_left = np.array((cam_position[0], cam_position[2])) - orth_size
self.span = 2 * orth_size
def __call__(self, position: Sequence[float]):
if len(position) == 3:
x, _, z = position
else:
x, z = position
camera_position = (np.array((x, z)) - self.lower_left) / self.span
return np.array(
(
round(self.frame_shape[0] * (1.0 - camera_position[1])),
round(self.frame_shape[1] * camera_position[0]),
),
dtype=int,
)
class ThorViz(TrajectoryViz):
def __init__(
self,
path_to_trajectory: Sequence[str] = ("task_info", "followed_path"),
label: str = "thor_trajectory",
figsize: Tuple[float, float] = (8, 8), # width, height
fontsize: float = 10,
scenes: Union[Tuple[str, int, int], Sequence[Tuple[str, int, int]]] = (
("FloorPlan{}_physics", 1, 30),
("FloorPlan{}_physics", 201, 230),
("FloorPlan{}_physics", 301, 330),
("FloorPlan{}_physics", 401, 430),
),
viz_rows_cols: Tuple[int, int] = (448, 448),
single_color: bool = False,
view_triangle_only_on_last: bool = True,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
path_to_rot_degrees: Sequence[str] = ("rotation",),
**kwargs,
):
super().__init__(
path_to_trajectory=path_to_trajectory,
label=label,
figsize=figsize,
fontsize=fontsize,
path_to_rot_degrees=path_to_rot_degrees,
**kwargs,
)
if isinstance(scenes[0], str):
scenes = [cast(Tuple[str, int, int], scenes)] # make it list of tuples
self.scenes = cast(List[Tuple[str, int, int]], scenes)
self.room_path = ITHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR
os.makedirs(self.room_path, exist_ok=True)
self.viz_rows_cols = viz_rows_cols
self.single_color = single_color
self.view_triangle_only_on_last = view_triangle_only_on_last
self.disable_view_triangle = disable_view_triangle
self.line_opacity = line_opacity
# Only needed for rendering
self.map_data: Optional[Dict[str, Any]] = None
self.thor_top_downs: Optional[Dict[str, np.ndarray]] = None
self.controller: Optional[Controller] = None
def init_top_down_render(self):
self.map_data = self.get_translator()
self.thor_top_downs = self.make_top_down_views()
# No controller needed after this point
if self.controller is not None:
self.controller.stop()
self.controller = None
@staticmethod
def iterate_scenes(
all_scenes: Sequence[Tuple[str, int, int]]
) -> Generator[str, None, None]:
for scenes in all_scenes:
for wall in range(scenes[1], scenes[2] + 1):
roomname = scenes[0].format(wall)
yield roomname
def cached_map_data_path(self, roomname: str) -> str:
return os.path.join(self.room_path, "map_data__{}.json".format(roomname))
def get_translator(self) -> Dict[str, Any]:
# roomname = list(ThorViz.iterate_scenes(self.scenes))[0]
all_map_data = {}
for roomname in ThorViz.iterate_scenes(self.scenes):
json_file = self.cached_map_data_path(roomname)
if not os.path.exists(json_file):
self.make_controller()
self.controller.reset(roomname)
map_data = self.get_agent_map_data()
get_logger().info("Dumping {}".format(json_file))
with open(json_file, "w") as f:
json.dump(map_data, f, indent=4, sort_keys=True)
else:
with open(json_file, "r") as f:
map_data = json.load(f)
pos_translator = ThorPositionTo2DFrameTranslator(
self.viz_rows_cols,
self.position_to_tuple(map_data["cam_position"]),
map_data["cam_orth_size"],
)
map_data["pos_translator"] = pos_translator
all_map_data[roomname] = map_data
get_logger().debug("Using map_data {}".format(all_map_data))
return all_map_data
def cached_image_path(self, roomname: str) -> str:
return os.path.join(
self.room_path, "{}__r{}_c{}.png".format(roomname, *self.viz_rows_cols)
)
def make_top_down_views(self) -> Dict[str, np.ndarray]:
top_downs = {}
for roomname in self.iterate_scenes(self.scenes):
fname = self.cached_image_path(roomname)
if not os.path.exists(fname):
self.make_controller()
self.dump_top_down_view(roomname, fname)
top_downs[roomname] = cv2.imread(fname)
return top_downs
def crop_viz_image(self, viz_image: np.ndarray) -> np.ndarray:
y_min = int(self.viz_rows_cols[0] * 0)
y_max = int(self.viz_rows_cols[0] * 1)
# But it covers approximately the entire width:
x_min = 0
x_max = self.viz_rows_cols[1]
cropped_viz_image = viz_image[y_min:y_max, x_min:x_max, :]
return cropped_viz_image
def make_controller(self):
if self.controller is None:
self.controller = Controller()
self.controller.step({"action": "ChangeQuality", "quality": "Very High"})
self.controller.step(
{
"action": "ChangeResolution",
"x": self.viz_rows_cols[1],
"y": self.viz_rows_cols[0],
}
)
def get_agent_map_data(self):
self.controller.step({"action": "ToggleMapView"})
cam_position = self.controller.last_event.metadata["cameraPosition"]
cam_orth_size = self.controller.last_event.metadata["cameraOrthSize"]
to_return = {
"cam_position": cam_position,
"cam_orth_size": cam_orth_size,
}
self.controller.step({"action": "ToggleMapView"})
return to_return
@staticmethod
def position_to_tuple(position: Dict[str, float]) -> Tuple[float, float, float]:
return position["x"], position["y"], position["z"]
@staticmethod
def add_lines_to_map(
ps: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if len(ps) <= 1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
for i in range(len(ps) - 1):
draw.line(
tuple(reversed(pos_translator(ps[i])))
+ tuple(reversed(pos_translator(ps[i + 1]))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_line_to_map(
p0: Any,
p1: Any,
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if p0 == p1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
draw.line(
tuple(reversed(pos_translator(p0))) + tuple(reversed(pos_translator(p1))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_agent_view_triangle(
position: Any,
rotation: float,
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
scale: float = 1.0,
opacity: float = 0.1,
) -> np.ndarray:
p0 = np.array((position[0], position[2]))
p1 = copy.copy(p0)
p2 = copy.copy(p0)
theta = -2 * math.pi * (rotation / 360.0)
rotation_mat = np.array(
[[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]
)
offset1 = scale * np.array([-1 / 2.0, 1])
offset2 = scale * np.array([1 / 2.0, 1])
p1 += np.matmul(rotation_mat, offset1)
p2 += np.matmul(rotation_mat, offset2)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
points = [tuple(reversed(pos_translator(p))) for p in [p0, p1, p2]]
draw = ImageDraw.Draw(img2)
draw.polygon(points, fill=(255, 255, 255, opacity))
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def visualize_agent_path(
positions: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
single_color: bool = False,
view_triangle_only_on_last: bool = False,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
trajectory_start_end_color_str: Tuple[str, str] = ("red", "green"),
) -> np.ndarray:
if single_color:
frame = ThorViz.add_lines_to_map(
list(map(ThorViz.position_to_tuple, positions)),
frame,
pos_translator,
line_opacity,
tuple(
map(
lambda x: int(round(255 * x)),
col.Color(trajectory_start_end_color_str[0]).rgb,
)
),
)
else:
if len(positions) > 1:
colors = list(
col.Color(trajectory_start_end_color_str[0]).range_to(
col.Color(trajectory_start_end_color_str[1]), len(positions) - 1
)
)
for i in range(len(positions) - 1):
frame = ThorViz.add_line_to_map(
ThorViz.position_to_tuple(positions[i]),
ThorViz.position_to_tuple(positions[i + 1]),
frame,
pos_translator,
opacity=line_opacity,
color=tuple(map(lambda x: int(round(255 * x)), colors[i].rgb)),
)
if view_triangle_only_on_last:
positions = [positions[-1]]
if disable_view_triangle:
positions = []
for position in positions:
frame = ThorViz.add_agent_view_triangle(
ThorViz.position_to_tuple(position),
rotation=position["rotation"],
frame=frame,
pos_translator=pos_translator,
opacity=0.05 + view_triangle_only_on_last * 0.2,
)
return frame
def dump_top_down_view(self, room_name: str, image_path: str):
get_logger().debug("Dumping {}".format(image_path))
self.controller.reset(room_name)
self.controller.step(
{"action": "Initialize", "gridSize": 0.1, "makeAgentsVisible": False}
)
self.controller.step({"action": "ToggleMapView"})
top_down_view = self.controller.last_event.cv2img
cv2.imwrite(image_path, top_down_view)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
trajectory: Sequence[Dict[str, Any]] = self._access(
episode, self.path_to_trajectory
)
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:2])
im = self.visualize_agent_path(
trajectory,
self.thor_top_downs[roomname],
self.map_data[roomname]["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
class ThorMultiViz(ThorViz):
def __init__(
self,
path_to_trajectory_prefix: Sequence[str] = ("task_info", "followed_path"),
agent_suffixes: Sequence[str] = ("1", "2"),
label: str = "thor_trajectories",
trajectory_start_end_color_strs: Sequence[Tuple[str, str]] = (
("red", "green"),
("cyan", "purple"),
),
**kwargs,
):
super().__init__(label=label, **kwargs)
self.path_to_trajectory_prefix = list(path_to_trajectory_prefix)
self.agent_suffixes = list(agent_suffixes)
self.trajectory_start_end_color_strs = list(trajectory_start_end_color_strs)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:2])
im = self.thor_top_downs[roomname]
for agent, start_end_color in zip(
self.agent_suffixes, self.trajectory_start_end_color_strs
):
path = self.path_to_trajectory_prefix[:]
path[-1] = path[-1] + agent
trajectory = self._access(episode, path)
im = self.visualize_agent_path(
trajectory,
im,
self.map_data[roomname]["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
trajectory_start_end_color_str=start_end_color,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
| ask4help-main | allenact_plugins/ithor_plugin/ithor_viz.py |
import copy
import random
from typing import List, Dict, Optional, Any, Union, cast
import gym
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import set_deterministic_cudnn, set_seed
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_tasks import ObjectNaviThorGridTask
class ObjectNavTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
object_types: str,
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
**kwargs,
) -> None:
self.env_args = env_args
self.scenes = scenes
self.object_types = object_types
self.grid_size = 0.25
self.env: Optional[IThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[ObjectNaviThorGridTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> IThorEnvironment:
env = IThorEnvironment(
make_agents_visible=False,
object_open_speed=0.05,
restrict_to_initially_reachable_points=True,
**self.env_args,
)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None
@property
def last_sampled_task(self) -> Optional[ObjectNaviThorGridTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
def next_task(
self, force_advance_scene: bool = False
) -> Optional[ObjectNaviThorGridTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
pose = self.env.randomize_agent_location()
object_types_in_scene = set(
[o["objectType"] for o in self.env.last_event.metadata["objects"]]
)
task_info: Dict[str, Any] = {}
for ot in random.sample(self.object_types, len(self.object_types)):
if ot in object_types_in_scene:
task_info["object_type"] = ot
break
if len(task_info) == 0:
get_logger().warning(
"Scene {} does not contain any"
" objects of any of the types {}.".format(scene, self.object_types)
)
task_info["start_pose"] = copy.copy(pose)
task_info[
"id"
] = f"{scene}__{'_'.join(list(map(str, self.env.get_key(pose))))}__{task_info['object_type']}"
self._last_sampled_task = ObjectNaviThorGridTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
)
return self._last_sampled_task
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
| ask4help-main | allenact_plugins/ithor_plugin/ithor_task_samplers.py |
import os
from allenact_plugins.robothor_plugin.scripts.make_objectnav_debug_dataset import (
create_debug_dataset_from_train_dataset,
)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan1"
TARGET = "Apple"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "ithor-objectnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=TARGET,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "ithor-objectnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| ask4help-main | allenact_plugins/ithor_plugin/scripts/make_objectnav_debug_dataset.py |
ask4help-main | allenact_plugins/ithor_plugin/scripts/__init__.py |
|
import os
from allenact_plugins.robothor_plugin.scripts.make_objectnav_debug_dataset import (
create_debug_dataset_from_train_dataset,
)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan1"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "ithor-pointnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=None,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "ithor-pointnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| ask4help-main | allenact_plugins/ithor_plugin/scripts/make_pointnav_debug_dataset.py |
from collections import OrderedDict
from typing import Dict, Any, Optional, List, cast
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.cacheless_frcnn import fasterrcnn_resnet50_fpn
from allenact.utils.misc_utils import prepare_locals_for_super
class BatchedFasterRCNN(torch.nn.Module):
# fmt: off
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# fmt: on
def __init__(self, thres=0.12, maxdets=3, res=7):
super().__init__()
self.model = fasterrcnn_resnet50_fpn(pretrained=True)
self.eval()
self.min_score = thres
self.maxdets = maxdets
self.res = res
def detector_tensor(self, boxes, classes, scores, aspect_ratio=1.0):
res, maxdets = self.res, self.maxdets
bins = np.array(list(range(res + 1)))[1:-1] / res
res_classes = torch.zeros(
res, res, maxdets, dtype=torch.int64
) # 0 is background
res_boxes = -1 * torch.ones(
res, res, maxdets, 5
) # regular range is [0, 1] (vert) or [0, aspect_ratio] (horiz)
temp = [[[] for _ in range(res)] for _ in range(res)] # grid of arrays
# # TODO Debug
# print('NEW IMAGE')
for it in range(classes.shape[0]):
cx = (boxes[it, 0].item() + boxes[it, 2].item()) / 2
cy = (boxes[it, 1].item() + boxes[it, 3].item()) / 2
px = np.digitize(cx, bins=aspect_ratio * bins).item()
py = np.digitize(cy, bins=bins).item()
temp[py][px].append(
(
scores[it][classes[it]].item(), # prob
(boxes[it, 2] - boxes[it, 0]).item() / aspect_ratio, # width
(boxes[it, 3] - boxes[it, 1]).item(), # height
boxes[it, 0].item() / aspect_ratio, # x
boxes[it, 1].item(), # y
classes[it].item(), # class
)
)
# # TODO Debug:
# print(self.COCO_INSTANCE_CATEGORY_NAMES[classes[it].item()])
for py in range(res):
for px in range(res):
order = sorted(temp[py][px], reverse=True)[:maxdets]
for it, data in enumerate(order):
res_classes[py, px, it] = data[-1]
res_boxes[py, px, it, :] = torch.tensor(
list(data[:-1])
) # prob, size, top left
res_classes = res_classes.permute(2, 0, 1).unsqueeze(0).contiguous()
res_boxes = (
res_boxes.view(res, res, -1).permute(2, 0, 1).unsqueeze(0).contiguous()
)
return res_classes, res_boxes
def forward(self, imbatch):
with torch.no_grad():
imglist = [im_in.squeeze(0) for im_in in imbatch.split(split_size=1, dim=0)]
# # TODO Debug
# import cv2
# for it, im_in in enumerate(imglist):
# cvim = 255.0 * im_in.to('cpu').permute(1, 2, 0).numpy()[:, :, ::-1]
# cv2.imwrite('test_highres{}.png'.format(it), cvim)
preds = self.model(imglist)
keeps = [
pred["scores"] > self.min_score for pred in preds
] # already after nms
# [0, 1] for rows, [0, aspect_ratio] for cols (im_in is C x H x W), with all images of same size (batch)
all_boxes = [
pred["boxes"][keep] / imbatch.shape[-2]
for pred, keep in zip(preds, keeps)
]
all_classes = [pred["labels"][keep] for pred, keep in zip(preds, keeps)]
all_pred_scores = [pred["scores"][keep] for pred, keep in zip(preds, keeps)]
# hack: fill in a full prob score (all classes, 0 score if undetected) for each box, for backwards compatibility
all_scores = [
torch.zeros(pred_scores.shape[0], 91, device=pred_scores.device)
for pred_scores in all_pred_scores
]
all_scores = [
torch.where(
torch.arange(91, device=pred_scores.device).unsqueeze(0)
== merged_classes.unsqueeze(1),
pred_scores.unsqueeze(1),
scores,
)
for merged_classes, pred_scores, scores in zip(
all_classes, all_pred_scores, all_scores
)
]
all_classes_boxes = [
self.detector_tensor(
boxes,
classes,
scores,
aspect_ratio=imbatch.shape[-1] / imbatch.shape[-2],
)
for boxes, classes, scores in zip(all_boxes, all_classes, all_scores)
]
classes = torch.cat(
[classes_boxes[0] for classes_boxes in all_classes_boxes], dim=0
).to(imbatch.device)
boxes = torch.cat(
[classes_boxes[1] for classes_boxes in all_classes_boxes], dim=0
).to(imbatch.device)
return classes, boxes
class FasterRCNNPreProcessorRoboThor(Preprocessor):
"""Preprocess RGB image using a ResNet model."""
COCO_INSTANCE_CATEGORY_NAMES = BatchedFasterRCNN.COCO_INSTANCE_CATEGORY_NAMES
def __init__(
self,
input_uuids: List[str],
output_uuid: str,
input_height: int,
input_width: int,
max_dets: int,
detector_spatial_res: int,
detector_thres: float,
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
**kwargs: Any,
):
self.input_height = input_height
self.input_width = input_width
self.max_dets = max_dets
self.detector_spatial_res = detector_spatial_res
self.detector_thres = detector_thres
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
self.frcnn: BatchedFasterRCNN = BatchedFasterRCNN(
thres=self.detector_thres,
maxdets=self.max_dets,
res=self.detector_spatial_res,
)
spaces: OrderedDict[str, gym.Space] = OrderedDict()
shape = (self.max_dets, self.detector_spatial_res, self.detector_spatial_res)
spaces["frcnn_classes"] = gym.spaces.Box(
low=0, # 0 is bg
high=len(self.COCO_INSTANCE_CATEGORY_NAMES) - 1,
shape=shape,
dtype=np.int64,
)
shape = (
self.max_dets * 5,
self.detector_spatial_res,
self.detector_spatial_res,
)
spaces["frcnn_boxes"] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=shape)
assert (
len(input_uuids) == 1
), "fasterrcnn preprocessor can only consume one observation type"
observation_space = SpaceDict(spaces=spaces)
super().__init__(**prepare_locals_for_super(locals()))
def to(self, device: torch.device) -> "FasterRCNNPreProcessorRoboThor":
self.frcnn = self.frcnn.to(device)
self.device = device
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
frames_tensor = (
obs[self.input_uuids[0]].to(self.device).permute(0, 3, 1, 2)
) # bhwc -> bchw (unnormalized)
classes, boxes = self.frcnn(frames_tensor)
return {"frcnn_classes": classes, "frcnn_boxes": boxes}
| ask4help-main | allenact_plugins/robothor_plugin/robothor_preprocessors.py |
import copy
import gzip
import json
import random
import itertools
from typing import List, Optional, Union, Dict, Any, cast, Tuple
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.cache_utils import str_to_pos_for_cache
from allenact.utils.experiment_utils import set_seed, set_deterministic_cudnn
from allenact.utils.system import get_logger
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from allenact_plugins.robothor_plugin.robothor_tasks import (
ObjectNavTask,
PointNavTask,
NavToPartnerTask,
)
class ObjectNavTaskSampler(TaskSampler):
def __init__(
self,
scenes: Union[List[str], str],
object_types: List[str],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
allow_flipping: bool = False,
dataset_first: int = -1,
dataset_last: int = -1,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.object_types = object_types
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.allow_flipping = allow_flipping
self.scenes_is_dataset = (dataset_first >= 0) or (dataset_last >= 0)
if not self.scenes_is_dataset:
assert isinstance(
self.scenes, List
), "When not using a dataset, scenes ({}) must be a list".format(
self.scenes
)
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
else:
assert isinstance(
self.scenes, str
), "When using a dataset, scenes ({}) must be a json file name string".format(
self.scenes
)
with open(self.scenes, "r") as f:
self.dataset_episodes = json.load(f)
# get_logger().debug("Loaded {} object nav episodes".format(len(self.dataset_episodes)))
self.dataset_first = dataset_first if dataset_first >= 0 else 0
self.dataset_last = (
dataset_last if dataset_last >= 0 else len(self.dataset_episodes) - 1
)
assert (
0 <= self.dataset_first <= self.dataset_last
), "dataset_last {} must be >= dataset_first {} >= 0".format(
dataset_last, dataset_first
)
self.reset_tasks = self.dataset_last - self.dataset_first + 1
# get_logger().debug("{} tasks ({}, {}) in sampler".format(self.reset_tasks, self.dataset_first, self.dataset_last))
self._last_sampled_task: Optional[ObjectNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = RoboThorEnvironment(**self.env_args)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[ObjectNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
# def sample_episode(self, scene):
# self.scene_counters[scene] = (self.scene_counters[scene] + 1) % len(self.scene_to_episodes[scene])
# if self.scene_counters[scene] == 0:
# random.shuffle(self.scene_to_episodes[scene])
# return self.scene_to_episodes[scene][self.scene_counters[scene]]
def next_task(self, force_advance_scene: bool = False) -> Optional[ObjectNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
# get_logger().debug("max_tasks {}".format(self.max_tasks))
return None
if not self.scenes_is_dataset:
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
pose = self.env.randomize_agent_location()
object_types_in_scene = set(
[o["objectType"] for o in self.env.last_event.metadata["objects"]]
)
task_info = {"scene": scene}
for ot in random.sample(self.object_types, len(self.object_types)):
if ot in object_types_in_scene:
task_info["object_type"] = ot
break
if len(task_info) == 0:
get_logger().warning(
"Scene {} does not contain any"
" objects of any of the types {}.".format(scene, self.object_types)
)
task_info["initial_position"] = {k: pose[k] for k in ["x", "y", "z"]}
task_info["initial_orientation"] = cast(Dict[str, float], pose["rotation"])[
"y"
]
else:
assert self.max_tasks is not None
next_task_id = self.dataset_first + self.max_tasks - 1
# get_logger().debug("task {}".format(next_task_id))
assert (
self.dataset_first <= next_task_id <= self.dataset_last
), "wrong task_id {} for min {} max {}".format(
next_task_id, self.dataset_first, self.dataset_last
)
task_info = copy.deepcopy(self.dataset_episodes[next_task_id])
scene = task_info["scene"]
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
self.env.step(
{
"action": "TeleportFull",
**{k: float(v) for k, v in task_info["initial_position"].items()},
"rotation": {
"x": 0.0,
"y": float(task_info["initial_orientation"]),
"z": 0.0,
},
"horizon": 0.0,
"standing": True,
}
)
assert self.env.last_action_success, "Failed to reset agent for {}".format(
task_info
)
self.max_tasks -= 1
# task_info["actions"] = [] # TODO populated by Task(Generic[EnvType]).step(...) but unused
if self.allow_flipping and random.random() > 0.5:
task_info["mirrored"] = True
else:
task_info["mirrored"] = False
self._last_sampled_task = ObjectNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
if not self.scenes_is_dataset:
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class ObjectNavDatasetTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
scene_directory: str,
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
adaptive_reward: bool = False,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
loop_dataset: bool = True,
task_mode: str = 'Train',
allow_flipping=False,
env_class=RoboThorEnvironment,
randomize_materials_in_training: bool = False,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.task_mode = task_mode
self.episodes = {
scene: ObjectNavDatasetTaskSampler.load_dataset(
scene, scene_directory + "/episodes"
)
for scene in scenes
}
# Only keep episodes containing desired objects
if 'object_types' in kwargs:
self.episodes = {
scene : [ep for ep in episodes if ep["object_type"] in kwargs['object_types']]
for scene, episodes in self.episodes.items()}
self.episodes = {scene:episodes for scene, episodes in self.episodes.items() if len(episodes) > 0}
self.scenes = [scene for scene in self.scenes if scene in self.episodes]
self.env_class = env_class
self.object_types = [
ep["object_type"] for scene in self.episodes for ep in self.episodes[scene]
]
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.adaptive_reward = adaptive_reward
self.allow_flipping = allow_flipping
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
# get the total number of tasks assigned to this process
if loop_dataset:
self.max_tasks = None
else:
self.max_tasks = sum(len(self.episodes[scene]) for scene in self.episodes)
self.reset_tasks = self.max_tasks
self.scene_index = 0
self.episode_index = 0
self.randomize_materials_in_training = randomize_materials_in_training
self._last_sampled_task: Optional[ObjectNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = self.env_class(**self.env_args)
return env
@staticmethod
def load_dataset(scene: str, base_directory: str) -> List[Dict]:
filename = (
"/".join([base_directory, scene])
if base_directory[-1] != "/"
else "".join([base_directory, scene])
)
filename += ".json.gz"
fin = gzip.GzipFile(filename, "r")
json_bytes = fin.read()
fin.close()
json_str = json_bytes.decode("utf-8")
data = json.loads(json_str)
random.shuffle(data)
return data
@staticmethod
def load_distance_cache_from_file(scene: str, base_directory: str) -> Dict:
filename = (
"/".join([base_directory, scene])
if base_directory[-1] != "/"
else "".join([base_directory, scene])
)
filename += ".json.gz"
fin = gzip.GzipFile(filename, "r")
json_bytes = fin.read()
fin.close()
json_str = json_bytes.decode("utf-8")
data = json.loads(json_str)
return data
@property
def __len__(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[ObjectNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
def next_task(self, force_advance_scene: bool = False) -> Optional[ObjectNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.episode_index >= len(self.episodes[self.scenes[self.scene_index]]):
self.scene_index = (self.scene_index + 1) % len(self.scenes)
# shuffle the new list of episodes to train on
random.shuffle(self.episodes[self.scenes[self.scene_index]])
self.episode_index = 0
scene = self.scenes[self.scene_index]
episode = self.episodes[scene][self.episode_index]
if self.env is None:
self.env = self._create_environment()
if scene.replace("_physics", "") != self.env.scene_name.replace("_physics", ""):
self.env.reset(scene_name=scene)
else:
self.env.reset_object_filter()
self.env.set_object_filter(
object_ids=[
o["objectId"]
for o in self.env.last_event.metadata["objects"]
if o["objectType"] == episode["object_type"]
]
)
# only randomize materials in train scenes
were_materials_randomized = False
if self.randomize_materials_in_training:
if (
"Train" in scene
or int(scene.replace("FloorPlan", "").replace("_physics", "")) % 100
< 21
):
were_materials_randomized = True
self.env.controller.step(action="RandomizeMaterials")
task_info = {
"scene": scene,
"object_type": episode["object_type"],
"materials_randomized": were_materials_randomized,
}
if len(task_info) == 0:
get_logger().warning(
"Scene {} does not contain any"
" objects of any of the types {}.".format(scene, self.object_types)
)
task_info["initial_position"] = episode["initial_position"]
task_info["initial_orientation"] = episode["initial_orientation"]
task_info["initial_horizon"] = episode.get("initial_horizon", 0)
task_info["distance_to_target"] = episode.get("shortest_path_length")
task_info["path_to_target"] = episode.get("shortest_path")
task_info["object_type"] = episode["object_type"]
task_info["id"] = episode["id"]
if self.allow_flipping and random.random() > 0.5:
task_info["mirrored"] = True
else:
task_info["mirrored"] = False
self.episode_index += 1
if self.max_tasks is not None:
self.max_tasks -= 1
if not self.env.teleport(
pose=episode["initial_position"],
rotation=episode["initial_orientation"],
horizon=episode.get("initial_horizon", 0),
):
return self.next_task()
if self.adaptive_reward:
rewards_config = {
"step_penalty": -0.0,
"goal_success_reward": 0.00,
"failed_stop_reward": -15.00,
"shaping_weight": 0.00,
"penalty_for_init_ask": -1.00,
"penalty_for_step_ask": -0.01,
}
# init_asked_configs = list(np.linspace(0,5,num=6,endpoint=True))
failed_stop_configs = list(np.linspace(1,30,num=30,endpoint=True)) ##trying 13 different reward different configs
##change adaptive reward embedding size in visual_nav_models.py
#all_configs = [init_asked_configs,failed_stop_configs]
#combined_configs = list(itertools.product(*all_configs))
# probs = [1/len(combined_configs)]*len(combined_configs)
probs = [1/len(failed_stop_configs)]*len(failed_stop_configs)
if self.task_mode == 'Train':
config_idx = np.random.choice(np.arange(len(failed_stop_configs)),1,p=probs)[0]
reward = failed_stop_configs[config_idx]
# init_ask,failed_stop = -1*reward[0],-1*reward[1]
failed_stop = -1*reward
else:
# config_idx = 15.0
config_idx = failed_stop_configs.index(13.0)
failed_stop = -13.0
# init_ask = -1.0
rewards_config['failed_stop_reward'] = failed_stop #-1*config_idx ### -1 is important
# rewards_config['penalty_for_init_ask'] = init_ask
'''
config_idx = np.random.choice(4,1,p=[0.25,0.25,0.25,0.25])[0]
rewards_config = adaptive_configs_dict[config_idx]
'''
task_info['reward_config_idx'] = config_idx #failed_stop_configs.index(config_idx)
self._last_sampled_task = ObjectNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=rewards_config,
)
else:
self._last_sampled_task = ObjectNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.episode_index = 0
self.scene_index = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class PointNavTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
# object_types: List[str],
# scene_to_episodes: List[Dict[str, Any]],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
# self.object_types = object_types
# self.scene_to_episodes = scene_to_episodes
# self.scene_counters = {scene: -1 for scene in self.scene_to_episodes}
# self.scenes = list(self.scene_to_episodes.keys())
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[PointNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = RoboThorEnvironment(**self.env_args)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled.
Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
# total = 0
# for scene in self.scene_to_episodes:
# total += len(self.scene_to_episodes[scene])
# return total
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[PointNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler
have the same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
# def sample_episode(self, scene):
# self.scene_counters[scene] = (self.scene_counters[scene] + 1) % len(self.scene_to_episodes[scene])
# if self.scene_counters[scene] == 0:
# random.shuffle(self.scene_to_episodes[scene])
# return self.scene_to_episodes[scene][self.scene_counters[scene]]
def next_task(self, force_advance_scene: bool = False) -> Optional[PointNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
# task_info = copy.deepcopy(self.sample_episode(scene))
# task_info['target'] = task_info['target_position']
# task_info['actions'] = []
locs = self.env.known_good_locations_list()
# get_logger().debug("locs[0] {} locs[-1] {}".format(locs[0], locs[-1]))
ys = [loc["y"] for loc in locs]
miny = min(ys)
maxy = max(ys)
assert maxy - miny < 1e-6, "miny {} maxy {} for scene {}".format(
miny, maxy, scene
)
too_close_to_target = True
target: Optional[Dict[str, float]] = None
for _ in range(10):
self.env.randomize_agent_location()
target = copy.copy(random.choice(locs))
too_close_to_target = self.env.distance_to_point(target) <= 0
if not too_close_to_target:
break
pose = self.env.agent_state()
task_info = {
"scene": scene,
"initial_position": {k: pose[k] for k in ["x", "y", "z"]},
"initial_orientation": pose["rotation"]["y"],
"target": target,
"actions": [],
}
if too_close_to_target:
get_logger().warning("No path for sampled episode {}".format(task_info))
# else:
# get_logger().debug("Path found for sampled episode {}".format(task_info))
# pose = {**task_info['initial_position'], 'rotation': {'x': 0.0, 'y': task_info['initial_orientation'], 'z': 0.0}, 'horizon': 0.0}
# self.env.step({"action": "TeleportFull", **pose})
# assert self.env.last_action_success, "Failed to initialize agent to {} in {} for epsiode {}".format(pose, scene, task_info)
self._last_sampled_task = PointNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
# for scene in self.scene_to_episodes:
# random.shuffle(self.scene_to_episodes[scene])
# for scene in self.scene_counters:
# self.scene_counters[scene] = -1
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class PointNavDatasetTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
scene_directory: str,
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
loop_dataset: bool = True,
shuffle_dataset: bool = True,
allow_flipping=False,
env_class=RoboThorEnvironment,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.shuffle_dataset: bool = shuffle_dataset
self.episodes = {
scene: ObjectNavDatasetTaskSampler.load_dataset(
scene, scene_directory + "/episodes"
)
for scene in scenes
}
self.env_class = env_class
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.allow_flipping = allow_flipping
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
# get the total number of tasks assigned to this process
if loop_dataset:
self.max_tasks = None
else:
self.max_tasks = sum(len(self.episodes[scene]) for scene in self.episodes)
self.reset_tasks = self.max_tasks
self.scene_index = 0
self.episode_index = 0
self._last_sampled_task: Optional[PointNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = self.env_class(**self.env_args)
return env
@property
def __len__(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[PointNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def next_task(self, force_advance_scene: bool = False) -> Optional[PointNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.episode_index >= len(self.episodes[self.scenes[self.scene_index]]):
self.scene_index = (self.scene_index + 1) % len(self.scenes)
# shuffle the new list of episodes to train on
if self.shuffle_dataset:
random.shuffle(self.episodes[self.scenes[self.scene_index]])
self.episode_index = 0
scene = self.scenes[self.scene_index]
episode = self.episodes[scene][self.episode_index]
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene, filtered_objects=[])
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene, filtered_objects=[])
def to_pos(s):
if isinstance(s, (Dict, Tuple)):
return s
if isinstance(s, float):
return {"x": 0, "y": s, "z": 0}
return str_to_pos_for_cache(s)
for k in ["initial_position", "initial_orientation", "target_position"]:
episode[k] = to_pos(episode[k])
task_info = {
"scene": scene,
"initial_position": episode["initial_position"],
"initial_orientation": episode["initial_orientation"],
"target": episode["target_position"],
"shortest_path": episode["shortest_path"],
"distance_to_target": episode["shortest_path_length"],
"id": episode["id"],
}
if self.allow_flipping and random.random() > 0.5:
task_info["mirrored"] = True
else:
task_info["mirrored"] = False
self.episode_index += 1
if self.max_tasks is not None:
self.max_tasks -= 1
if not self.env.teleport(
pose=episode["initial_position"], rotation=episode["initial_orientation"]
):
return self.next_task()
self._last_sampled_task = PointNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.episode_index = 0
self.scene_index = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled.
Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
class NavToPartnerTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[NavToPartnerTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
assert (
self.env_args["agentCount"] == 2
), "NavToPartner is only defined for 2 agents!"
env = RoboThorEnvironment(**self.env_args)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled.
Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[NavToPartnerTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler
have the same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
def next_task(
self, force_advance_scene: bool = False
) -> Optional[NavToPartnerTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
too_close_to_target = True
for _ in range(10):
self.env.randomize_agent_location(agent_id=0)
self.env.randomize_agent_location(agent_id=1)
pose1 = self.env.agent_state(0)
pose2 = self.env.agent_state(1)
dist = self.env.distance_cache.find_distance(
self.env.scene_name,
{k: pose1[k] for k in ["x", "y", "z"]},
{k: pose2[k] for k in ["x", "y", "z"]},
self.env.distance_from_point_to_point,
)
too_close_to_target = (
dist <= 1.25 * self.rewards_config["max_success_distance"]
)
if not too_close_to_target:
break
task_info = {
"scene": scene,
"initial_position1": {k: pose1[k] for k in ["x", "y", "z"]},
"initial_position2": {k: pose2[k] for k in ["x", "y", "z"]},
"initial_orientation1": pose1["rotation"]["y"],
"initial_orientation2": pose2["rotation"]["y"],
"id": "_".join(
[scene]
# + ["%4.2f" % pose1[k] for k in ["x", "y", "z"]]
# + ["%4.2f" % pose1["rotation"]["y"]]
# + ["%4.2f" % pose2[k] for k in ["x", "y", "z"]]
# + ["%4.2f" % pose2["rotation"]["y"]]
+ ["%d" % random.randint(0, 2 ** 63 - 1)]
),
}
if too_close_to_target:
get_logger().warning("Bad sampled episode {}".format(task_info))
self._last_sampled_task = NavToPartnerTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
| ask4help-main | allenact_plugins/robothor_plugin/robothor_task_samplers.py |
import copy
import glob
import math
import pickle
import random
import warnings
from typing import Any, Optional, Dict, List, Union, Tuple, Collection
from ai2thor.fifo_server import FifoServer
import ai2thor.server
import numpy as np
from ai2thor.controller import Controller
from ai2thor.util import metrics
from allenact.utils.cache_utils import (
DynamicDistanceCache,
pos_to_str_for_cache,
str_to_pos_for_cache,
)
from allenact.utils.experiment_utils import recursive_update
from allenact.utils.system import get_logger
class RoboThorEnvironment:
"""Wrapper for the robo2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/robothor/documentation) for comprehensive
documentation on RoboTHOR.
# Attributes
controller : The AI2-THOR controller.
config : The AI2-THOR controller configuration
"""
def __init__(self, all_metadata_available: bool = True, **kwargs):
self.config = dict(
rotateStepDegrees=30.0,
visibilityDistance=1.0,
gridSize=0.25,
continuousMode=True,
snapToGrid=False,
agentMode="locobot",
width=640,
height=480,
agentCount=1,
server_class=FifoServer,
)
if "agentCount" in kwargs:
assert kwargs["agentCount"] > 0
kwargs["agentMode"] = kwargs.get("agentMode", "locobot")
if kwargs["agentMode"] not in ["bot", "locobot"]:
warnings.warn(
f"The RoboTHOR environment has not been tested using"
f" an agent of mode '{kwargs['agentMode']}'."
)
recursive_update(self.config, kwargs)
self.controller = Controller(**self.config,)
self.all_metadata_available = all_metadata_available
self.scene_to_reachable_positions: Optional[Dict[str, Any]] = None
self.distance_cache: Optional[DynamicDistanceCache] = None
if self.all_metadata_available:
self.scene_to_reachable_positions = {
self.scene_name: copy.deepcopy(self.currently_reachable_points)
}
assert len(self.scene_to_reachable_positions[self.scene_name]) > 10
self.distance_cache = DynamicDistanceCache(rounding=1)
self.agent_count = self.config["agentCount"]
self._extra_teleport_kwargs: Dict[
str, Any
] = {} # Used for backwards compatability with the teleport action
def initialize_grid_dimensions(
self, reachable_points: Collection[Dict[str, float]]
) -> Tuple[int, int, int, int]:
"""Computes bounding box for reachable points quantized with the
current gridSize."""
points = {
(
round(p["x"] / self.config["gridSize"]),
round(p["z"] / self.config["gridSize"]),
): p
for p in reachable_points
}
assert len(reachable_points) == len(points)
xmin, xmax = min([p[0] for p in points]), max([p[0] for p in points])
zmin, zmax = min([p[1] for p in points]), max([p[1] for p in points])
return xmin, xmax, zmin, zmax
def set_object_filter(self, object_ids: List[str]):
self.controller.step("SetObjectFilter", objectIds=object_ids, renderImage=False)
def reset_object_filter(self):
self.controller.step("ResetObjectFilter", renderImage=False)
def path_from_point_to_object_type(
self, point: Dict[str, float], object_type: str, allowed_error: float
) -> Optional[List[Dict[str, float]]]:
event = self.controller.step(
action="GetShortestPath",
objectType=object_type,
position=point,
allowedError=allowed_error,
)
if event.metadata["lastActionSuccess"]:
return event.metadata["actionReturn"]["corners"]
else:
get_logger().debug(
"Failed to find path for {} in {}. Start point {}, agent state {}.".format(
object_type,
self.controller.last_event.metadata["sceneName"],
point,
self.agent_state(),
)
)
return None
def distance_from_point_to_object_type(
self, point: Dict[str, float], object_type: str, allowed_error: float
) -> float:
"""Minimal geodesic distance from a point to an object of the given
type.
It might return -1.0 for unreachable targets.
"""
path = self.path_from_point_to_object_type(point, object_type, allowed_error)
if path:
# Because `allowed_error != 0` means that the path returned above might not start
# at `point`, we explicitly add any offset there is.
s_dist = math.sqrt(
(point["x"] - path[0]["x"]) ** 2 + (point["z"] - path[0]["z"]) ** 2
)
return metrics.path_distance(path) + s_dist
return -1.0
def distance_to_object_type(self, object_type: str, agent_id: int = 0) -> float:
"""Minimal geodesic distance to object of given type from agent's
current location.
It might return -1.0 for unreachable targets.
"""
assert 0 <= agent_id < self.agent_count
assert (
self.all_metadata_available
), "`distance_to_object_type` cannot be called when `self.all_metadata_available` is `False`."
def retry_dist(position: Dict[str, float], object_type: str):
allowed_error = 0.05
debug_log = ""
d = -1.0
while allowed_error < 2.5:
d = self.distance_from_point_to_object_type(
position, object_type, allowed_error
)
if d < 0:
debug_log = (
f"In scene {self.scene_name}, could not find a path from {position} to {object_type} with"
f" {allowed_error} error tolerance. Increasing this tolerance to"
f" {2 * allowed_error} any trying again."
)
allowed_error *= 2
else:
break
if d < 0:
get_logger().warning(
f"In scene {self.scene_name}, could not find a path from {position} to {object_type}"
f" with {allowed_error} error tolerance. Returning a distance of -1."
)
elif debug_log != "":
get_logger().debug(debug_log)
return d
return self.distance_cache.find_distance(
self.scene_name,
self.controller.last_event.events[agent_id].metadata["agent"]["position"],
object_type,
retry_dist,
)
def path_from_point_to_point(
self, position: Dict[str, float], target: Dict[str, float], allowedError: float
) -> Optional[List[Dict[str, float]]]:
try:
return self.controller.step(
action="GetShortestPathToPoint",
position=position,
x=target["x"],
y=target["y"],
z=target["z"],
allowedError=allowedError,
).metadata["actionReturn"]["corners"]
except Exception:
get_logger().debug(
"Failed to find path for {} in {}. Start point {}, agent state {}.".format(
target,
self.controller.last_event.metadata["sceneName"],
position,
self.agent_state(),
)
)
return None
def distance_from_point_to_point(
self, position: Dict[str, float], target: Dict[str, float], allowed_error: float
) -> float:
path = self.path_from_point_to_point(position, target, allowed_error)
if path:
# Because `allowed_error != 0` means that the path returned above might not start
# or end exactly at the position/target points, we explictly add any offset there is.
s_dist = math.sqrt(
(position["x"] - path[0]["x"]) ** 2
+ (position["z"] - path[0]["z"]) ** 2
)
t_dist = math.sqrt(
(target["x"] - path[-1]["x"]) ** 2 + (target["z"] - path[-1]["z"]) ** 2
)
return metrics.path_distance(path) + s_dist + t_dist
return -1.0
def distance_to_point(self, target: Dict[str, float], agent_id: int = 0) -> float:
"""Minimal geodesic distance to end point from agent's current
location.
It might return -1.0 for unreachable targets.
"""
assert 0 <= agent_id < self.agent_count
assert (
self.all_metadata_available
), "`distance_to_object_type` cannot be called when `self.all_metadata_available` is `False`."
def retry_dist(position: Dict[str, float], target: Dict[str, float]):
allowed_error = 0.05
debug_log = ""
d = -1.0
while allowed_error < 2.5:
d = self.distance_from_point_to_point(position, target, allowed_error)
if d < 0:
debug_log = (
f"In scene {self.scene_name}, could not find a path from {position} to {target} with"
f" {allowed_error} error tolerance. Increasing this tolerance to"
f" {2 * allowed_error} any trying again."
)
allowed_error *= 2
else:
break
if d < 0:
get_logger().warning(
f"In scene {self.scene_name}, could not find a path from {position} to {target}"
f" with {allowed_error} error tolerance. Returning a distance of -1."
)
elif debug_log != "":
get_logger().debug(debug_log)
return d
return self.distance_cache.find_distance(
self.scene_name,
self.controller.last_event.events[agent_id].metadata["agent"]["position"],
target,
retry_dist,
)
def agent_state(self, agent_id: int = 0) -> Dict:
"""Return agent position, rotation and horizon."""
assert 0 <= agent_id < self.agent_count
agent_meta = self.last_event.events[agent_id].metadata["agent"]
return {
**{k: float(v) for k, v in agent_meta["position"].items()},
"rotation": {k: float(v) for k, v in agent_meta["rotation"].items()},
"horizon": round(float(agent_meta["cameraHorizon"]), 1),
}
def teleport(
self,
pose: Dict[str, float],
rotation: Dict[str, float],
horizon: float = 0.0,
agent_id: int = 0,
):
assert 0 <= agent_id < self.agent_count
try:
e = self.controller.step(
action="TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=rotation,
horizon=horizon,
agentId=agent_id,
**self._extra_teleport_kwargs,
)
except ValueError as e:
if len(self._extra_teleport_kwargs) == 0:
self._extra_teleport_kwargs["standing"] = True
else:
raise e
return self.teleport(
pose=pose, rotation=rotation, horizon=horizon, agent_id=agent_id
)
return e.metadata["lastActionSuccess"]
def reset(
self, scene_name: str = None, filtered_objects: Optional[List[str]] = None
) -> None:
"""Resets scene to a known initial state."""
if scene_name is not None and scene_name != self.scene_name:
self.controller.reset(scene_name)
assert self.last_action_success, "Could not reset to new scene"
if (
self.all_metadata_available
and scene_name not in self.scene_to_reachable_positions
):
self.scene_to_reachable_positions[scene_name] = copy.deepcopy(
self.currently_reachable_points
)
assert len(self.scene_to_reachable_positions[scene_name]) > 10
if filtered_objects:
self.set_object_filter(filtered_objects)
else:
self.reset_object_filter()
def random_reachable_state(
self, seed: Optional[int] = None
) -> Dict[str, Union[Dict[str, float], float]]:
"""Returns a random reachable location in the scene."""
assert (
self.all_metadata_available
), "`random_reachable_state` cannot be called when `self.all_metadata_available` is `False`."
if seed is not None:
random.seed(seed)
# xyz = random.choice(self.currently_reachable_points)
assert len(self.scene_to_reachable_positions[self.scene_name]) > 10
xyz = copy.deepcopy(
random.choice(self.scene_to_reachable_positions[self.scene_name])
)
rotation = random.choice(
np.arange(0.0, 360.0, self.config["rotateStepDegrees"])
)
horizon = 0.0 # random.choice([0.0, 30.0, 330.0])
return {
**{k: float(v) for k, v in xyz.items()},
"rotation": {"x": 0.0, "y": float(rotation), "z": 0.0},
"horizon": float(horizon),
}
def randomize_agent_location(
self,
seed: int = None,
partial_position: Optional[Dict[str, float]] = None,
agent_id: int = 0,
) -> Dict[str, Union[Dict[str, float], float]]:
"""Teleports the agent to a random reachable location in the scene."""
assert 0 <= agent_id < self.agent_count
if partial_position is None:
partial_position = {}
k = 0
state: Optional[Dict] = None
while k == 0 or (not self.last_action_success and k < 10):
# self.reset()
state = {**self.random_reachable_state(seed=seed), **partial_position}
# get_logger().debug("picked target location {}".format(state))
self.controller.step("TeleportFull", **state, agentId=agent_id)
k += 1
if not self.last_action_success:
get_logger().warning(
(
"Randomize agent location in scene {} and current random state {}"
" with seed {} and partial position {} failed in "
"10 attempts. Forcing the action."
).format(self.scene_name, state, seed, partial_position)
)
self.controller.step("TeleportFull", **state, force_action=True, agentId=agent_id) # type: ignore
assert self.last_action_success, "Force action failed with {}".format(state)
# get_logger().debug("location after teleport full {}".format(self.agent_state()))
# self.controller.step("TeleportFull", **self.agent_state()) # TODO only for debug
# get_logger().debug("location after re-teleport full {}".format(self.agent_state()))
return self.agent_state(agent_id=agent_id)
def known_good_locations_list(self):
assert (
self.all_metadata_available
), "`known_good_locations_list` cannot be called when `self.all_metadata_available` is `False`."
return self.scene_to_reachable_positions[self.scene_name]
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
self.controller.step(action="GetReachablePositions")
assert (
self.last_action_success
), f"Could not get reachable positions for reason {self.last_event.metadata['errorMessage']}."
return self.last_action_return
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"].replace("_physics", "")
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.controller.last_event.frame
@property
def current_depth(self) -> np.ndarray:
"""Returns depth image corresponding to the agent's egocentric view."""
return self.controller.last_event.depth_frame
@property
def current_frames(self) -> List[np.ndarray]:
"""Returns rgb images corresponding to the agents' egocentric views."""
return [
self.controller.last_event.events[agent_id].frame
for agent_id in range(self.agent_count)
]
@property
def current_depths(self) -> List[np.ndarray]:
"""Returns depth images corresponding to the agents' egocentric
views."""
return [
self.controller.last_event.events[agent_id].depth_frame
for agent_id in range(self.agent_count)
]
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self.controller.last_event.metadata["lastAction"]
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.controller.last_event.metadata["actionReturn"]
def step(
self,
action_dict: Optional[Dict[str, Union[str, int, float, Dict]]] = None,
**kwargs: Union[str, int, float, Dict],
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
if action_dict is None:
action_dict = dict()
action_dict.update(kwargs)
return self.controller.step(**action_dict)
def stop(self):
"""Stops the ai2thor controller."""
try:
self.controller.stop()
except Exception as e:
get_logger().warning(str(e))
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.controller.last_event.metadata["objects"]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
class RoboThorCachedEnvironment:
"""Wrapper for the robo2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/robothor/documentation) for comprehensive
documentation on RoboTHOR.
# Attributes
controller : The AI2THOR controller.
config : The AI2THOR controller configuration
"""
def __init__(self, **kwargs):
self.config = dict(
rotateStepDegrees=30.0,
visibilityDistance=1.0,
gridSize=0.25,
continuousMode=True,
snapToGrid=False,
agentMode="locobot",
width=640,
height=480,
)
self.env_root_dir = kwargs["env_root_dir"]
random_scene = random.choice(list(glob.glob(self.env_root_dir + "/*.pkl")))
handle = open(random_scene, "rb")
self.view_cache = pickle.load(handle)
handle.close()
self.agent_position = list(self.view_cache.keys())[0]
self.agent_rotation = list(self.view_cache[self.agent_position].keys())[0]
self.known_good_locations: Dict[str, Any] = {
self.scene_name: copy.deepcopy(self.currently_reachable_points)
}
self._last_action = "None"
assert len(self.known_good_locations[self.scene_name]) > 10
def agent_state(self) -> Dict[str, Union[Dict[str, float], float]]:
"""Return agent position, rotation and horizon."""
return {
**str_to_pos_for_cache(self.agent_position),
"rotation": {"x": 0.0, "y": self.agent_rotation, "z": 0.0},
"horizon": 1.0,
}
def teleport(
self, pose: Dict[str, float], rotation: Dict[str, float], horizon: float = 0.0
):
self.agent_position = pos_to_str_for_cache(pose)
self.agent_rotation = (
math.floor(rotation["y"] / 90.0) * 90
) # round to nearest 90 degree angle
return True
def reset(self, scene_name: str = None) -> None:
"""Resets scene to a known initial state."""
try:
handle = open(self.env_root_dir + "/" + scene_name + ".pkl", "rb")
self.view_cache = pickle.load(handle)
handle.close()
self.agent_position = list(self.view_cache.keys())[0]
self.agent_rotation = list(self.view_cache[self.agent_position].keys())[0]
self.known_good_locations[self.scene_name] = copy.deepcopy(
self.currently_reachable_points
)
self._last_action = "None"
assert len(self.known_good_locations[self.scene_name]) > 10
except Exception as _:
raise RuntimeError("Could not load scene:", scene_name)
def known_good_locations_list(self):
return self.known_good_locations[self.scene_name]
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
return [str_to_pos_for_cache(pos) for pos in self.view_cache]
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.view_cache[self.agent_position][self.agent_rotation].metadata[
"sceneName"
]
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.view_cache[self.agent_position][self.agent_rotation].frame
@property
def current_depth(self) -> np.ndarray:
"""Returns depth image corresponding to the agent's egocentric view."""
return self.view_cache[self.agent_position][self.agent_rotation].depth_frame
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.view_cache[self.agent_position][self.agent_rotation]
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self._last_action
@property
def last_action_success(self) -> bool:
"""In the cached environment, all actions succeed."""
return True
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.view_cache[self.agent_position][self.agent_rotation].metadata[
"actionReturn"
]
def step(
self, action_dict: Dict[str, Union[str, int, float]]
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
self._last_action = action_dict["action"]
if action_dict["action"] == "RotateLeft":
self.agent_rotation = (self.agent_rotation - 90.0) % 360.0
elif action_dict["action"] == "RotateRight":
self.agent_rotation = (self.agent_rotation + 90.0) % 360.0
elif action_dict["action"] == "MoveAhead":
pos = str_to_pos_for_cache(self.agent_position)
if self.agent_rotation == 0.0:
pos["x"] += 0.25
elif self.agent_rotation == 90.0:
pos["z"] += 0.25
elif self.agent_rotation == 180.0:
pos["x"] -= 0.25
elif self.agent_rotation == 270.0:
pos["z"] -= 0.25
pos_string = pos_to_str_for_cache(pos)
if pos_string in self.view_cache:
self.agent_position = pos_to_str_for_cache(pos)
return self.last_event
# noinspection PyMethodMayBeStatic
def stop(self):
"""Stops the ai2thor controller."""
print("No need to stop cached environment")
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.view_cache[self.agent_position][self.agent_rotation].metadata[
"objects"
]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
| ask4help-main | allenact_plugins/robothor_plugin/robothor_environment.py |
MOVE_AHEAD = "MoveAhead"
ROTATE_LEFT = "RotateLeft"
ROTATE_RIGHT = "RotateRight"
LOOK_DOWN = "LookDown"
LOOK_UP = "LookUp"
END = "End"
PASS = "Pass"
| ask4help-main | allenact_plugins/robothor_plugin/robothor_constants.py |
from typing import Tuple
import torch
from allenact.base_abstractions.distributions import CategoricalDistr, Distr
class TupleCategoricalDistr(Distr):
def __init__(self, probs=None, logits=None, validate_args=None):
self.dists = CategoricalDistr(
probs=probs, logits=logits, validate_args=validate_args
)
def log_prob(self, actions: Tuple[torch.LongTensor, ...]) -> torch.FloatTensor:
# flattened output [steps, samplers, num_agents]
return self.dists.log_prob(torch.stack(actions, dim=-1))
def entropy(self) -> torch.FloatTensor:
# flattened output [steps, samplers, num_agents]
return self.dists.entropy()
def sample(self, sample_shape=torch.Size()) -> Tuple[torch.LongTensor, ...]:
# split and remove trailing singleton dim
res = self.dists.sample(sample_shape).split(1, dim=-1)
return tuple([r.view(r.shape[:2]) for r in res])
def mode(self) -> Tuple[torch.LongTensor, ...]:
# split and remove trailing singleton dim
res = self.dists.mode().split(1, dim=-1)
return tuple([r.view(r.shape[:2]) for r in res])
| ask4help-main | allenact_plugins/robothor_plugin/robothor_distributions.py |
from typing import Tuple, Dict, Union, Sequence, Optional, cast
import gym
import torch
import torch.nn as nn
from gym.spaces import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
LinearActorCriticHead,
DistributionType,
Memory,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.embodiedai.models.basic_models import RNNStateEncoder, SimpleCNN
from allenact_plugins.robothor_plugin.robothor_distributions import (
TupleCategoricalDistr,
)
class ResnetTensorGoalEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
class_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.resnet_uuid = resnet_preprocessor_uuid
self.class_dims = class_dims
self.resnet_hid_out_dims = resnet_compressor_hidden_out_dims
self.combine_hid_out_dims = combiner_hidden_out_dims
self.embed_class = nn.Embedding(
num_embeddings=observation_spaces.spaces[self.goal_uuid].n,
embedding_dim=self.class_dims,
)
self.blind = self.resnet_uuid not in observation_spaces.spaces
if not self.blind:
self.resnet_tensor_shape = observation_spaces.spaces[self.resnet_uuid].shape
self.resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.class_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
@property
def is_blind(self):
return self.blind
@property
def output_dims(self):
if self.blind:
return self.class_dims
else:
return (
self.combine_hid_out_dims[-1]
* self.resnet_tensor_shape[1]
* self.resnet_tensor_shape[2]
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return cast(
torch.FloatTensor,
self.embed_class(observations[self.goal_uuid].to(torch.int64)),
)
def compress_resnet(self, observations):
return self.resnet_compressor(observations[self.resnet_uuid])
def distribute_target(self, observations):
target_emb = self.embed_class(observations[self.goal_uuid])
return target_emb.view(-1, self.class_dims, 1, 1).expand(
-1, -1, self.resnet_tensor_shape[-2], self.resnet_tensor_shape[-1]
)
def adapt_input(self, observations):
resnet = observations[self.resnet_uuid]
use_agent = False
nagent = 1
if len(resnet.shape) == 6:
use_agent = True
nstep, nsampler, nagent = resnet.shape[:3]
else:
nstep, nsampler = resnet.shape[:2]
observations[self.resnet_uuid] = resnet.view(-1, *resnet.shape[-3:])
observations[self.goal_uuid] = observations[self.goal_uuid].view(-1, 1)
return observations, use_agent, nstep, nsampler, nagent
@staticmethod
def adapt_output(x, use_agent, nstep, nsampler, nagent):
if use_agent:
return x.view(nstep, nsampler, nagent, -1)
return x.view(nstep, nsampler * nagent, -1)
def forward(self, observations):
observations, use_agent, nstep, nsampler, nagent = self.adapt_input(
observations
)
if self.blind:
return self.embed_class(observations[self.goal_uuid])
embs = [
self.compress_resnet(observations),
self.distribute_target(observations),
]
x = self.target_obs_combiner(torch.cat(embs, dim=-3,))
x = x.reshape(x.size(0), -1) # flatten
return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
class ResnetTensorObjectNavActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
rnn_hidden_size: int = 512,
goal_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
):
super().__init__(
action_space=action_space, observation_space=observation_space,
)
self.hidden_size = rnn_hidden_size
self.goal_visual_encoder = ResnetTensorGoalEncoder(
self.observation_space,
goal_sensor_uuid,
resnet_preprocessor_uuid,
goal_dims,
resnet_compressor_hidden_out_dims,
combiner_hidden_out_dims,
)
self.state_encoder = RNNStateEncoder(
self.goal_visual_encoder.output_dims, rnn_hidden_size,
)
self.actor_critic = LinearActorCriticHead(self.hidden_size, action_space.n)
self.train()
@property
def recurrent_hidden_state_size(
self,
) -> Union[int, Dict[str, Tuple[Sequence[Tuple[str, Optional[int]]], torch.dtype]]]:
"""The recurrent hidden state size of the model."""
return self.hidden_size
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.goal_visual_encoder.is_blind
@property
def num_recurrent_layers(self) -> int:
"""Number of recurrent hidden layers."""
return self.state_encoder.num_recurrent_layers
def _recurrent_memory_specification(self):
return {
"rnn_hidden": (
(
("layer", self.state_encoder.num_recurrent_layers),
("sampler", None),
("hidden", self.hidden_size),
),
torch.float32,
)
}
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return self.goal_visual_encoder.get_object_type_encoding(observations)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
x = self.goal_visual_encoder(observations)
x, rnn_hidden_states = self.state_encoder(x, memory.tensor("rnn_hidden"), masks)
dists, vals = self.actor_critic(x)
return (
ActorCriticOutput(distributions=dists, values=vals, extras={},),
memory.set_tensor("rnn_hidden", rnn_hidden_states),
)
class ResnetFasterRCNNTensorsGoalEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
detector_preprocessor_uuid: str,
class_dims: int = 32,
max_dets: int = 3,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
box_embedder_hidden_out_dims: Tuple[int, int] = (128, 32),
class_embedder_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.resnet_uuid = resnet_preprocessor_uuid
self.detector_uuid = detector_preprocessor_uuid
self.class_dims = class_dims
self.max_dets = max_dets
self.resnet_hid_out_dims = resnet_compressor_hidden_out_dims
self.box_hid_out_dims = box_embedder_hidden_out_dims
self.class_hid_out_dims = class_embedder_hidden_out_dims
self.combine_hid_out_dims = combiner_hidden_out_dims
self.embed_class = nn.Embedding(
num_embeddings=observation_spaces.spaces[self.goal_uuid].n,
embedding_dim=self.class_dims,
)
self.blind = (self.resnet_uuid not in observation_spaces.spaces) and (
self.detector_uuid not in observation_spaces.spaces
)
if not self.blind:
self.resnet_tensor_shape = observation_spaces.spaces[self.resnet_uuid].shape
self.resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.box_tensor_shape = (
observation_spaces.spaces[self.detector_uuid]
.spaces["frcnn_boxes"]
.shape
)
assert (
self.box_tensor_shape[1:] == self.resnet_tensor_shape[1:]
), "Spatial dimensions of object detector and resnet tensor do not match: {} vs {}".format(
self.box_tensor_shape, self.resnet_tensor_shape
)
self.box_embedder = nn.Sequential(
nn.Conv2d(self.box_tensor_shape[0], self.box_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.box_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.class_combiner = nn.Sequential(
nn.Conv2d(
self.max_dets * self.class_dims, self.class_hid_out_dims[0], 1
),
nn.ReLU(),
nn.Conv2d(*self.class_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1]
+ self.box_hid_out_dims[1]
+ self.class_hid_out_dims[1]
+ self.class_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
@property
def is_blind(self):
return self.blind
@property
def output_dims(self):
if self.blind:
return self.class_dims
else:
return (
self.combine_hid_out_dims[-1]
* self.resnet_tensor_shape[1]
* self.resnet_tensor_shape[2]
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return cast(
torch.FloatTensor,
self.embed_class(observations[self.goal_uuid].to(torch.int64)),
)
def compress_resnet(self, observations):
return self.resnet_compressor(observations[self.resnet_uuid])
def distribute_target(self, observations):
target_emb = self.embed_class(observations[self.goal_uuid])
return target_emb.view(-1, self.class_dims, 1, 1).expand(
-1, -1, self.resnet_tensor_shape[-2], self.resnet_tensor_shape[-1]
)
def embed_classes(self, observations):
classes = observations[self.detector_uuid]["frcnn_classes"]
classes = classes.permute(0, 2, 3, 1).contiguous() # move classes to last dim
classes_shape = classes.shape
class_emb = self.embed_class(classes.view(-1)) # (flattened)
class_emb = class_emb.view(
classes_shape[:-1] + (self.max_dets * class_emb.shape[-1],)
) # align embedding along last dimension
class_emb = class_emb.permute(
0, 3, 1, 2
).contiguous() # convert into image tensor
return self.class_combiner(class_emb)
def embed_boxes(self, observations):
return self.box_embedder(observations[self.detector_uuid]["frcnn_boxes"])
def adapt_input(self, observations):
boxes = observations[self.detector_uuid]["frcnn_boxes"]
classes = observations[self.detector_uuid]["frcnn_classes"]
use_agent = False
nagent = 1
if len(boxes.shape) == 6:
use_agent = True
nstep, nsampler, nagent = boxes.shape[:3]
else:
nstep, nsampler = boxes.shape[:2]
observations[self.detector_uuid]["frcnn_boxes"] = boxes.view(
-1, *boxes.shape[-3:]
)
observations[self.detector_uuid]["frcnn_classes"] = classes.view(
-1, *classes.shape[-3:]
)
observations[self.goal_uuid] = observations[self.goal_uuid].view(-1, 1)
return observations, use_agent, nstep, nsampler, nagent
@staticmethod
def adapt_output(x, use_agent, nstep, nsampler, nagent):
if use_agent:
return x.view(nstep, nsampler, nagent, -1)
return x.view(nstep, nsampler * nagent, -1)
def forward(self, observations):
observations, use_agent, nstep, nsampler, nagent = self.adapt_input(
observations
)
if self.blind:
return self.embed_class(observations[self.goal_uuid])
embs = [
self.compress_resnet(observations),
self.embed_boxes(observations),
self.embed_classes(observations),
self.distribute_target(observations),
]
x = self.target_obs_combiner(torch.cat(embs, dim=-3,))
x = x.reshape(x.size(0), -1) # flatten
return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
class ResnetFasterRCNNTensorsObjectNavActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
detector_preprocessor_uuid: str,
rnn_hidden_size=512,
goal_dims: int = 32,
max_dets: int = 3,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
box_embedder_hidden_out_dims: Tuple[int, int] = (128, 32),
class_embedder_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
):
super().__init__(
action_space=action_space, observation_space=observation_space,
)
self.hidden_size = rnn_hidden_size
self.goal_visual_encoder = ResnetFasterRCNNTensorsGoalEncoder(
self.observation_space,
goal_sensor_uuid,
resnet_preprocessor_uuid,
detector_preprocessor_uuid,
goal_dims,
max_dets,
resnet_compressor_hidden_out_dims,
box_embedder_hidden_out_dims,
class_embedder_hidden_out_dims,
combiner_hidden_out_dims,
)
self.state_encoder = RNNStateEncoder(
self.goal_visual_encoder.output_dims, rnn_hidden_size,
)
self.actor_critic = LinearActorCriticHead(self.hidden_size, action_space.n)
self.train()
@property
def recurrent_hidden_state_size(self) -> int:
"""The recurrent hidden state size of the model."""
return self.hidden_size
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.goal_visual_encoder.is_blind
@property
def num_recurrent_layers(self) -> int:
"""Number of recurrent hidden layers."""
return self.state_encoder.num_recurrent_layers
def _recurrent_memory_specification(self):
return {
"rnn_hidden": (
(
("layer", self.state_encoder.num_recurrent_layers),
("sampler", None),
("hidden", self.hidden_size),
),
torch.float32,
)
}
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return self.goal_visual_encoder.get_object_type_encoding(observations)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
x = self.goal_visual_encoder(observations)
x, rnn_hidden_states = self.state_encoder(x, memory.tensor("rnn_hidden"), masks)
dists, vals = self.actor_critic(x)
return (
ActorCriticOutput(distributions=dists, values=vals, extras={},),
memory.set_tensor("rnn_hidden", rnn_hidden_states),
)
class TupleLinearActorCriticHead(LinearActorCriticHead):
def forward(self, x):
out = self.actor_and_critic(x)
logits = out[..., :-1]
values = out[..., -1:]
# noinspection PyArgumentList
return (
TupleCategoricalDistr(logits=logits), # [steps, samplers, ...]
values.view(*values.shape[:2], -1), # [steps, samplers, flattened]
)
class NavToPartnerActorCriticSimpleConvRNN(ActorCriticModel[TupleCategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Tuple,
observation_space: SpaceDict,
rgb_uuid: Optional[str] = "rgb",
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
self.rgb_uuid = rgb_uuid
self.visual_encoder = SimpleCNN(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=self.rgb_uuid,
depth_uuid=None,
)
self.state_encoder = RNNStateEncoder(
0 if self.is_blind else self.recurrent_hidden_state_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.actor_critic = TupleLinearActorCriticHead(
self._hidden_size, action_space[0].n
)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return self.visual_encoder.is_blind
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
@property
def num_agents(self):
return len(self.action_space)
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("agent", self.num_agents),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
else:
# TODO manage blindness for all agents simultaneously or separate?
raise NotImplementedError()
# TODO alternative where all agents consume all observations
x, rnn_hidden_states = self.state_encoder(
perception_embed, memory.tensor("rnn"), masks
)
dists, vals = self.actor_critic(x)
return (
ActorCriticOutput(distributions=dists, values=vals, extras={},),
memory.set_tensor("rnn", rnn_hidden_states),
)
| ask4help-main | allenact_plugins/robothor_plugin/robothor_models.py |
ask4help-main | allenact_plugins/robothor_plugin/__init__.py |
|
import math
from typing import Tuple, List, Dict, Any, Optional, Union, Sequence, cast
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.system import get_logger
from allenact.utils.tensor_utils import tile_images
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.robothor_plugin.robothor_constants import (
MOVE_AHEAD,
ROTATE_LEFT,
ROTATE_RIGHT,
END,
LOOK_UP,
LOOK_DOWN,
)
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
def spl_metric(
success: bool, optimal_distance: float, travelled_distance: float
) -> Optional[float]:
if not success:
return 0.0
elif optimal_distance < 0:
return None
elif optimal_distance == 0:
if travelled_distance == 0:
return 1.0
else:
return 0.0
else:
travelled_distance = max(travelled_distance, optimal_distance)
return optimal_distance / travelled_distance
class PointNavTask(Task[RoboThorEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END)
def __init__(
self,
env: RoboThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
reward_configs: Dict[str, Any],
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.reward_configs = reward_configs
self._took_end_action: bool = False
self._success: Optional[bool] = False
self.last_geodesic_distance = self.env.distance_to_point(
self.task_info["target"]
)
self.optimal_distance = self.last_geodesic_distance
self._rewards: List[float] = []
self._distance_to_goal: List[float] = []
self._metrics = None
self.path: List[
Any
] = [] # the initial coordinate will be directly taken from the optimal path
self.travelled_distance = 0.0
self.task_info["followed_path"] = [self.env.agent_state()]
self.task_info["action_names"] = self.action_names()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.env.step({"action": action_str})
self.last_action_success = self.env.last_action_success
pose = self.env.agent_state()
self.path.append({k: pose[k] for k in ["x", "y", "z"]})
self.task_info["followed_path"].append(pose)
if len(self.path) > 1:
self.travelled_distance += IThorEnvironment.position_dist(
p0=self.path[-1], p1=self.path[-2], ignore_y=True
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success, "action": action},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
if mode == "rgb":
return self.env.current_frame
elif mode == "depth":
return self.env.current_depth
def _is_goal_in_range(self) -> Optional[bool]:
tget = self.task_info["target"]
dist = self.dist_to_target()
if -0.5 < dist <= 0.2:
return True
elif dist > 0.2:
return False
else:
get_logger().debug(
"No path for {} from {} to {}".format(
self.env.scene_name, self.env.agent_state(), tget
)
)
return None
def shaping(self) -> float:
rew = 0.0
if self.reward_configs["shaping_weight"] == 0.0:
return rew
geodesic_distance = self.dist_to_target()
if geodesic_distance == -1.0:
geodesic_distance = self.last_geodesic_distance
if (
self.last_geodesic_distance > -0.5 and geodesic_distance > -0.5
): # (robothor limits)
rew += self.last_geodesic_distance - geodesic_distance
self.last_geodesic_distance = geodesic_distance
return rew * self.reward_configs["shaping_weight"]
def judge(self) -> float:
"""Judge the last event."""
reward = self.reward_configs["step_penalty"]
reward += self.shaping()
if self._took_end_action:
if self._success is not None:
reward += (
self.reward_configs["goal_success_reward"]
if self._success
else self.reward_configs["failed_stop_reward"]
)
elif self.num_steps_taken() + 1 >= self.max_steps:
reward += self.reward_configs.get("reached_max_steps_reward", 0.0)
self._rewards.append(float(reward))
return float(reward)
def dist_to_target(self):
return self.env.distance_to_point(self.task_info["target"])
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
total_reward = float(np.sum(self._rewards))
self._rewards = []
if self._success is None:
return {}
dist2tget = self.dist_to_target()
spl = spl_metric(
success=self._success,
optimal_distance=self.optimal_distance,
travelled_distance=self.travelled_distance,
)
metrics = {
**super(PointNavTask, self).metrics(),
"success": self._success, # False also if no path to target
"total_reward": total_reward,
"dist_to_target": dist2tget,
"spl": 0 if spl is None else spl,
}
return metrics
class ObjectNavTask(Task[RoboThorEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END, LOOK_UP, LOOK_DOWN)
def __init__(
self,
env: RoboThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
reward_configs: Dict[str, Any],
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.reward_configs = reward_configs
self._took_end_action: bool = False
self._success: Optional[bool] = False
self.mirror = task_info["mirrored"]
self._all_metadata_available = env.all_metadata_available
self._rewards: List[float] = []
self._distance_to_goal: List[float] = []
self._metrics = None
self.path: List = (
[]
) # the initial coordinate will be directly taken from the optimal path
self.travelled_distance = 0.0
self.task_info["followed_path"] = [self.env.agent_state()]
self.task_info["taken_actions"] = []
self.task_info["action_names"] = self.class_action_names()
self.task_info["taken_ask_actions"] = []
if self._all_metadata_available:
self.last_geodesic_distance = self.env.distance_to_object_type(
self.task_info["object_type"]
)
self.optimal_distance = self.last_geodesic_distance
self.closest_geo_distance = self.last_geodesic_distance
self.last_expert_action: Optional[int] = None
self.agent_asked_for_help = False
self.num_steps_expert = 0
self.help_asked_at_all = False
self.false_stop = 0
self.asked_init_help_flag = False
self.expert_action_span = 0
self.max_expert_span = 0
self.expert_ends_traj = False
self.expert_took_step = False
self.penalty_given_once = False
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
ask_action = action['ask_action']
ask_action = cast(int,ask_action)
if ask_action==0:
# print ('expert takes step')
ask_action_str = 'start_asking'
self.agent_asked_for_help = True
self.help_asked_at_all = True
self.expert_action_span+=1
self.max_expert_span = max(self.expert_action_span,self.max_expert_span)
if ask_action==1:
# print ('agent takes step')
ask_action_str = 'stop_asking'
self.agent_asked_for_help = False
# self.max_expert_span = max(self.expert_action_span,self.max_expert_span)
self.expert_action_span = 0 ##reset counter
'''
if ask_action==1:
# print ('start asking for help')
self.agent_asked_for_help = True
self.help_asked_at_all = True
self.expert_action_span+=1
self.asked_init_help_flag = False
# self.max_steps = 5e5
if ask_action==2:
# print ('stop asking')
self.agent_asked_for_help = False
self.max_expert_span = max(self.expert_action_span,self.max_expert_span)
self.expert_action_span = 0 ##reset counter
if ask_action==0:
# print ('do nothing')
self.asked_init_help_flag = True
if ask_action==3:
# print ('ask policy called END')
# self._took_end_action = True
# self._success = self._is_goal_in_range()
# if not self._success:
# self.false_stop = 1
# self.last_action_success = self._success
self.agent_asked_for_help = False
action_str = END
'''
action = action['nav_action']
assert isinstance(action, int)
action = cast(int, action)
if self.agent_asked_for_help:
self.num_steps_expert+=1
action_str = self.class_action_names()[action]
if self.mirror:
if action_str == ROTATE_RIGHT:
action_str = ROTATE_LEFT
elif action_str == ROTATE_LEFT:
action_str = ROTATE_RIGHT
self.task_info["taken_actions"].append(action_str)
self.task_info["taken_ask_actions"].append(ask_action_str)
if action_str == END:
if self.expert_took_step:
self.expert_ends_traj = True
# if ask_action==3:
# print ('logic error in ask action END')
# exit()
self._took_end_action = True
self._success = self._is_goal_in_range()
if not self._success:
self.false_stop = 1
self.last_action_success = self._success
else:
self.env.step({"action": action_str})
self.last_action_success = self.env.last_action_success
pose = self.env.agent_state()
self.path.append({k: pose[k] for k in ["x", "y", "z"]})
self.task_info["followed_path"].append(pose)
if ask_action==0:
self.expert_took_step = True
else:
self.expert_took_step = False
if len(self.path) > 1:
self.travelled_distance += IThorEnvironment.position_dist(
p0=self.path[-1], p1=self.path[-2], ignore_y=True
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success, "action": action},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
if mode == "rgb":
frame = self.env.current_frame.copy()
elif mode == "depth":
frame = self.env.current_depth.copy()
else:
raise NotImplementedError(f"Mode '{mode}' is not supported.")
if self.mirror:
frame = frame[:, ::-1, :].copy() # horizontal flip
# print("mirrored render")
return frame
def _is_goal_in_range(self) -> bool:
return any(
o["objectType"] == self.task_info["object_type"]
for o in self.env.visible_objects()
)
def shaping(self) -> float:
rew = 0.0
if self.reward_configs["shaping_weight"] == 0.0:
return rew
geodesic_distance = self.env.distance_to_object_type(
self.task_info["object_type"]
)
# Ensuring the reward magnitude is not greater than the total distance moved
max_reward_mag = 0.0
if len(self.path) >= 2:
p0, p1 = self.path[-2:]
max_reward_mag = math.sqrt(
(p0["x"] - p1["x"]) ** 2 + (p0["z"] - p1["z"]) ** 2
)
if self.reward_configs.get("positive_only_reward", False):
if geodesic_distance > 0.5:
rew = max(self.closest_geo_distance - geodesic_distance, 0)
else:
if (
self.last_geodesic_distance > -0.5 and geodesic_distance > -0.5
): # (robothor limits)
rew += self.last_geodesic_distance - geodesic_distance
self.last_geodesic_distance = geodesic_distance
self.closest_geo_distance = min(self.closest_geo_distance, geodesic_distance)
return (
max(min(rew, max_reward_mag), -max_reward_mag,)
* self.reward_configs["shaping_weight"]
)
def judge(self) -> float:
"""Judge the last event."""
reward = self.reward_configs["step_penalty"]
reward += self.shaping()
'''
if self.help_asked_at_all and (self.asked_init_help_flag is False):
# print ('give initial ask penalty')
if not self.penalty_given_once:
# print ('given initial ask')
reward += self.reward_configs['penalty_for_init_ask']
self.penalty_given_once = True
else:
# print ('given recurring')
reward += self.reward_configs['penalty_for_ask_recurring']
self.asked_init_help_flag = True
'''
## for 2 actions
if self.help_asked_at_all:
if not self.penalty_given_once:
reward += self.reward_configs['penalty_for_init_ask']
self.penalty_given_once = True
if self.agent_asked_for_help:
# print ('step ask penalty')
reward += self.reward_configs['penalty_for_step_ask']
if self._took_end_action:
if self._success:
reward += self.reward_configs["goal_success_reward"]
else:
reward += self.reward_configs["failed_stop_reward"]
elif self.num_steps_taken() + 1 >= self.max_steps:
self.false_stop=1
reward += self.reward_configs['failed_stop_reward']
# reward += self.reward_configs.get("reached_max_steps_reward", 0.0)
self._rewards.append(float(reward))
return float(reward)
def get_observations(self, **kwargs) -> Any:
obs = self.sensor_suite.get_observations(env=self.env, task=self)
if self.mirror:
for o in obs:
if ("rgb" in o or "depth" in o) and isinstance(obs[o], np.ndarray):
if (
len(obs[o].shape) == 3
): # heuristic to determine this is a visual sensor
obs[o] = obs[o][:, ::-1, :].copy() # horizontal flip
elif len(obs[o].shape) == 2: # perhaps only two axes for depth?
obs[o] = obs[o][:, ::-1].copy() # horizontal flip
return obs
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
metrics = super(ObjectNavTask, self).metrics()
if self._all_metadata_available:
dist2tget = self.env.distance_to_object_type(self.task_info["object_type"])
spl = spl_metric(
success=self._success,
optimal_distance=self.optimal_distance,
travelled_distance=self.travelled_distance,
)
expert_action_ratio = self.num_steps_expert/self.num_steps_taken()
metrics = {
**metrics,
"success": self._success,
"total_reward": np.sum(self._rewards),
"dist_to_target": dist2tget,
"part_taken_over_by_expert":expert_action_ratio,
"false_done_actions":self.false_stop,
"helped_asked_at_all":self.help_asked_at_all,
"longest_span_of_expert":self.max_expert_span,
"expert_ends_traj":self.expert_ends_traj,
"spl": 0 if spl is None else spl,
}
return metrics
def query_expert(self, end_action_only: bool = False, **kwargs) -> Tuple[int, bool]:
if not self.agent_asked_for_help:
return 0,False
'''
noise_control = np.random.choice([0,1],p=[0.8,0.2])
if noise_control==0:
action_idx = np.random.choice([0,1,2,4,5],p=[1/5]*5)
#return self.class_action_names().index(action_idx), True
return action_idx, True
'''
if self._is_goal_in_range():
return self.class_action_names().index(END), True
if end_action_only:
return 0, False
else:
try:
self.env.step(
{
"action": "ObjectNavExpertAction",
"objectType": self.task_info["object_type"],
}
)
except ValueError:
raise RuntimeError(
"Attempting to use the action `ObjectNavExpertAction` which is not supported by your version of"
" AI2-THOR. The action `ObjectNavExpertAction` is experimental. In order"
" to enable this action, please install the (in development) version of AI2-THOR. Through pip"
" this can be done with the command"
" `pip install -e git+https://github.com/allenai/ai2thor.git@7d914cec13aae62298f5a6a816adb8ac6946c61f#egg=ai2thor`."
)
if self.env.last_action_success:
expert_action: Optional[str] = self.env.last_event.metadata[
"actionReturn"
]
if isinstance(expert_action, str):
if self.mirror:
if expert_action == "RotateLeft":
expert_action = "RotateRight"
elif expert_action == "RotateRight":
expert_action = "RotateLeft"
return self.class_action_names().index(expert_action), True
else:
# This should have been caught by self._is_goal_in_range()...
return 0, False
else:
return 0, False
class NavToPartnerTask(Task[RoboThorEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT)
def __init__(
self,
env: RoboThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
reward_configs: Dict[str, Any],
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.reward_configs = reward_configs
assert self.env.agent_count == 2, "NavToPartnerTask only defined for 2 agents!"
pose1 = self.env.agent_state(0)
pose2 = self.env.agent_state(1)
self.last_geodesic_distance = self.env.distance_cache.find_distance(
self.env.scene_name,
{k: pose1[k] for k in ["x", "y", "z"]},
{k: pose2[k] for k in ["x", "y", "z"]},
self.env.distance_from_point_to_point,
)
self.task_info["followed_path1"] = [pose1]
self.task_info["followed_path2"] = [pose2]
self.task_info["action_names"] = self.class_action_names()
@property
def action_space(self):
return gym.spaces.Tuple(
[
gym.spaces.Discrete(len(self._actions)),
gym.spaces.Discrete(len(self._actions)),
]
)
def reached_terminal_state(self) -> bool:
return (
self.last_geodesic_distance <= self.reward_configs["max_success_distance"]
)
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Tuple[int, int]) -> RLStepResult:
assert isinstance(action, tuple)
action_str1 = self.class_action_names()[action[0]]
action_str2 = self.class_action_names()[action[1]]
self.env.step({"action": action_str1, "agentId": 0})
self.last_action_success1 = self.env.last_action_success
self.env.step({"action": action_str2, "agentId": 1})
self.last_action_success2 = self.env.last_action_success
pose1 = self.env.agent_state(0)
self.task_info["followed_path1"].append(pose1)
pose2 = self.env.agent_state(1)
self.task_info["followed_path2"].append(pose2)
self.last_geodesic_distance = self.env.distance_cache.find_distance(
self.env.scene_name,
{k: pose1[k] for k in ["x", "y", "z"]},
{k: pose2[k] for k in ["x", "y", "z"]},
self.env.distance_from_point_to_point,
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={
"last_action_success": [
self.last_action_success1,
self.last_action_success2,
],
"action": action,
},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
if mode == "rgb":
return tile_images(self.env.current_frames)
elif mode == "depth":
return tile_images(self.env.current_depths)
def judge(self) -> float:
"""Judge the last event."""
reward = self.reward_configs["step_penalty"]
if self.reached_terminal_state():
reward += self.reward_configs["success_reward"]
return reward # reward shared by both agents (no shaping)
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
return {
**super().metrics(),
"success": self.reached_terminal_state(),
}
| ask4help-main | allenact_plugins/robothor_plugin/robothor_tasks.py |
from typing import Any, Tuple, Optional, Union
import gym
import numpy as np
import quaternion # noqa # pylint: disable=unused-import
from allenact.base_abstractions.sensor import Sensor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.base_abstractions.task import Task
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask
class RGBSensorRoboThor(RGBSensorThor):
"""Sensor for RGB images in RoboTHOR.
Returns from a running RoboThorEnvironment instance, the current RGB
frame corresponding to the agent's egocentric view.
"""
def __init__(self, *args: Any, **kwargs: Any):
get_logger().warning(
"`RGBSensorRoboThor` is deprecated, use `RGBSensorThor` instead."
)
super().__init__(*args, **kwargs)
class RGBSensorMultiRoboThor(RGBSensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
"""Sensor for RGB images in RoboTHOR.
Returns from a running RoboThorEnvironment instance, the current RGB
frame corresponding to the agent's egocentric view.
"""
def __init__(self, agent_count: int = 2, **kwargs):
# TODO take all named args from superclass and pass with super().__init__(**prepare_locals_for_super(locals()))
super().__init__(**kwargs)
self.agent_count = agent_count
self.agent_id = 0
def frame_from_env(
self, env: RoboThorEnvironment, task: Optional[Task[RoboThorEnvironment]]
) -> np.ndarray:
return env.current_frames[self.agent_id].copy()
def get_observation(
self,
env: RoboThorEnvironment,
task: Task[RoboThorEnvironment],
*args: Any,
**kwargs: Any
) -> Any:
obs = []
for self.agent_id in range(self.agent_count):
obs.append(super().get_observation(env, task, *args, **kwargs))
return np.stack(obs, axis=0) # agents x width x height x channels
class GPSCompassSensorRoboThor(Sensor[RoboThorEnvironment, PointNavTask]):
def __init__(self, uuid: str = "target_coordinates_ind", **kwargs: Any):
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
)
@staticmethod
def _compute_pointgoal(
source_position: np.ndarray,
source_rotation: np.quaternion,
goal_position: np.ndarray,
):
direction_vector = goal_position - source_position
direction_vector_agent = GPSCompassSensorRoboThor.quaternion_rotate_vector(
source_rotation.inverse(), direction_vector
)
rho, phi = GPSCompassSensorRoboThor.cartesian_to_polar(
direction_vector_agent[2], -direction_vector_agent[0]
)
return np.array([rho, phi], dtype=np.float32)
@staticmethod
def quaternion_from_y_angle(angle: float) -> np.quaternion:
r"""Creates a quaternion from rotation angle around y axis"""
return GPSCompassSensorRoboThor.quaternion_from_coeff(
np.array(
[0.0, np.sin(np.pi * angle / 360.0), 0.0, np.cos(np.pi * angle / 360.0)]
)
)
@staticmethod
def quaternion_from_coeff(coeffs: np.ndarray) -> np.quaternion:
r"""Creates a quaternions from coeffs in [x, y, z, w] format"""
quat = np.quaternion(0, 0, 0, 0)
quat.real = coeffs[3]
quat.imag = coeffs[0:3]
return quat
@staticmethod
def cartesian_to_polar(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return rho, phi
@staticmethod
def quaternion_rotate_vector(quat: np.quaternion, v: np.array) -> np.array:
r"""Rotates a vector by a quaternion
Args:
quat: The quaternion to rotate by
v: The vector to rotate
Returns:
np.array: The rotated vector
"""
vq = np.quaternion(0, 0, 0, 0)
vq.imag = v
return (quat * vq * quat.inverse()).imag
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[PointNavTask],
*args: Any,
**kwargs: Any
) -> Any:
agent_state = env.agent_state()
agent_position = np.array([agent_state[k] for k in ["x", "y", "z"]])
rotation_world_agent = self.quaternion_from_y_angle(
agent_state["rotation"]["y"]
)
goal_position = np.array([task.task_info["target"][k] for k in ["x", "y", "z"]])
return self._compute_pointgoal(
agent_position, rotation_world_agent, goal_position
)
class DepthSensorThor(
DepthSensor[
Union[IThorEnvironment, RoboThorEnvironment],
Union[Task[IThorEnvironment], Task[RoboThorEnvironment]],
],
):
def __init__(
self,
use_resnet_normalization: Optional[bool] = None,
use_normalization: Optional[bool] = None,
mean: Optional[np.ndarray] = np.array([[0.5]], dtype=np.float32),
stdev: Optional[np.ndarray] = np.array([[0.25]], dtype=np.float32),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "depth",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 1,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 5.0,
scale_first: bool = False,
**kwargs: Any
):
# Give priority to use_normalization, but use_resnet_normalization for backward compat. if not set
if use_resnet_normalization is not None and use_normalization is None:
use_normalization = use_resnet_normalization
elif use_normalization is None:
use_normalization = False
super().__init__(**prepare_locals_for_super(locals()))
def frame_from_env(
self, env: RoboThorEnvironment, task: Optional[Task[RoboThorEnvironment]]
) -> np.ndarray:
return env.controller.last_event.depth_frame
class DepthSensorRoboThor(DepthSensorThor):
# For backwards compatibility
def __init__(self, *args: Any, **kwargs: Any):
get_logger().warning(
"`DepthSensorRoboThor` is deprecated, use `DepthSensorThor` instead."
)
super().__init__(*args, **kwargs)
class RewardConfigSensor(Sensor):
def __init__(self,uuid='reward_config_sensor', **kwargs: Any):
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Discrete(1)
def get_observation(self,env,task,*args: Any, **kwargs: Any) -> Any:
config_idx = task.task_info['reward_config_idx']
return np.array(config_idx)
| ask4help-main | allenact_plugins/robothor_plugin/robothor_sensors.py |
import copy
import json
import math
import os
from typing import Tuple, Sequence, Union, Dict, Optional, Any, cast, Generator, List
import cv2
import numpy as np
from PIL import Image, ImageDraw
from ai2thor.controller import Controller
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
import colour as col
from allenact.utils.system import get_logger
from allenact.utils.viz_utils import TrajectoryViz
ROBOTHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR = os.path.join(
os.path.expanduser("~"), ".allenact", "robothor", "top_down_viz_cache"
)
class ThorPositionTo2DFrameTranslator(object):
def __init__(
self,
frame_shape_rows_cols: Tuple[int, int],
cam_position: Sequence[float],
orth_size: float,
):
self.frame_shape = frame_shape_rows_cols
self.lower_left = np.array((cam_position[0], cam_position[2])) - orth_size
self.span = 2 * orth_size
def __call__(self, position: Sequence[float]):
if len(position) == 3:
x, _, z = position
else:
x, z = position
camera_position = (np.array((x, z)) - self.lower_left) / self.span
return np.array(
(
round(self.frame_shape[0] * (1.0 - camera_position[1])),
round(self.frame_shape[1] * camera_position[0]),
),
dtype=int,
)
class ThorViz(TrajectoryViz):
def __init__(
self,
path_to_trajectory: Sequence[str] = ("task_info", "followed_path"),
label: str = "thor_trajectory",
figsize: Tuple[float, float] = (8, 4), # width, height
fontsize: float = 10,
scenes: Union[
Tuple[str, int, int, int, int], Sequence[Tuple[str, int, int, int, int]]
] = ("FloorPlan_Val{}_{}", 1, 3, 1, 5),
viz_rows_cols: Tuple[int, int] = (448, 448),
single_color: bool = False,
view_triangle_only_on_last: bool = True,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
**kwargs
):
super().__init__(
path_to_trajectory=path_to_trajectory,
label=label,
figsize=figsize,
fontsize=fontsize,
**kwargs
)
if isinstance(scenes[0], str):
scenes = [
cast(Tuple[str, int, int, int, int], scenes)
] # make it list of tuples
self.scenes = cast(List[Tuple[str, int, int, int, int]], scenes)
self.room_path = ROBOTHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR
os.makedirs(self.room_path, exist_ok=True)
self.viz_rows_cols = viz_rows_cols
self.single_color = single_color
self.view_triangle_only_on_last = view_triangle_only_on_last
self.disable_view_triangle = disable_view_triangle
self.line_opacity = line_opacity
# Only needed for rendering
self.map_data: Optional[Dict[str, Any]] = None
self.thor_top_downs: Optional[Dict[str, np.ndarray]] = None
self.controller: Optional[Controller] = None
def init_top_down_render(self):
self.map_data = self.get_translator()
self.thor_top_downs = self.make_top_down_views()
# No controller needed after this point
if self.controller is not None:
self.controller.stop()
self.controller = None
@staticmethod
def iterate_scenes(
all_scenes: Sequence[Tuple[str, int, int, int, int]]
) -> Generator[str, None, None]:
for scenes in all_scenes:
for wall in range(scenes[1], scenes[2] + 1):
for furniture in range(scenes[3], scenes[4] + 1):
roomname = scenes[0].format(wall, furniture)
yield roomname
def cached_map_data_path(self, roomname: str) -> str:
return os.path.join(self.room_path, "map_data__{}.json".format(roomname))
def get_translator(self) -> Dict[str, Any]:
roomname = list(ThorViz.iterate_scenes(self.scenes))[0]
json_file = self.cached_map_data_path(roomname)
if not os.path.exists(json_file):
self.make_controller()
self.controller.reset(roomname)
map_data = self.get_agent_map_data()
get_logger().info("Dumping {}".format(json_file))
with open(json_file, "w") as f:
json.dump(map_data, f, indent=4, sort_keys=True)
else:
with open(json_file, "r") as f:
map_data = json.load(f)
pos_translator = ThorPositionTo2DFrameTranslator(
self.viz_rows_cols,
self.position_to_tuple(map_data["cam_position"]),
map_data["cam_orth_size"],
)
map_data["pos_translator"] = pos_translator
get_logger().debug("Using map_data {}".format(map_data))
return map_data
def cached_image_path(self, roomname: str) -> str:
return os.path.join(
self.room_path, "{}__r{}_c{}.png".format(roomname, *self.viz_rows_cols)
)
def make_top_down_views(self) -> Dict[str, np.ndarray]:
top_downs = {}
for roomname in self.iterate_scenes(self.scenes):
fname = self.cached_image_path(roomname)
if not os.path.exists(fname):
self.make_controller()
self.dump_top_down_view(roomname, fname)
top_downs[roomname] = cv2.imread(fname)
return top_downs
def crop_viz_image(self, viz_image: np.ndarray) -> np.ndarray:
# Top-down view of room spans vertically near the center of the frame in RoboTHOR:
y_min = int(self.viz_rows_cols[0] * 0.3)
y_max = int(self.viz_rows_cols[0] * 0.8)
# But it covers approximately the entire width:
x_min = 0
x_max = self.viz_rows_cols[1]
cropped_viz_image = viz_image[y_min:y_max, x_min:x_max, :]
return cropped_viz_image
def make_controller(self):
if self.controller is None:
self.controller = Controller()
self.controller.step({"action": "ChangeQuality", "quality": "Very High"})
self.controller.step(
{
"action": "ChangeResolution",
"x": self.viz_rows_cols[1],
"y": self.viz_rows_cols[0],
}
)
def get_agent_map_data(self):
self.controller.step({"action": "ToggleMapView"})
cam_position = self.controller.last_event.metadata["cameraPosition"]
cam_orth_size = self.controller.last_event.metadata["cameraOrthSize"]
to_return = {
"cam_position": cam_position,
"cam_orth_size": cam_orth_size,
}
self.controller.step({"action": "ToggleMapView"})
return to_return
@staticmethod
def position_to_tuple(position: Dict[str, float]) -> Tuple[float, float, float]:
return position["x"], position["y"], position["z"]
@staticmethod
def add_lines_to_map(
ps: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if len(ps) <= 1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
for i in range(len(ps) - 1):
draw.line(
tuple(reversed(pos_translator(ps[i])))
+ tuple(reversed(pos_translator(ps[i + 1]))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_line_to_map(
p0: Any,
p1: Any,
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if p0 == p1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
draw.line(
tuple(reversed(pos_translator(p0))) + tuple(reversed(pos_translator(p1))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_agent_view_triangle(
position: Any,
rotation: Dict[str, float],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
scale: float = 1.0,
opacity: float = 0.1,
) -> np.ndarray:
p0 = np.array((position[0], position[2]))
p1 = copy.copy(p0)
p2 = copy.copy(p0)
theta = -2 * math.pi * (rotation["y"] / 360.0)
rotation_mat = np.array(
[[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]
)
offset1 = scale * np.array([-1 / 2.0, 1])
offset2 = scale * np.array([1 / 2.0, 1])
p1 += np.matmul(rotation_mat, offset1)
p2 += np.matmul(rotation_mat, offset2)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
points = [tuple(reversed(pos_translator(p))) for p in [p0, p1, p2]]
draw = ImageDraw.Draw(img2)
draw.polygon(points, fill=(255, 255, 255, opacity))
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def visualize_agent_path(
positions: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
single_color: bool = False,
view_triangle_only_on_last: bool = False,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
trajectory_start_end_color_str: Tuple[str, str] = ("red", "green"),
) -> np.ndarray:
if single_color:
frame = ThorViz.add_lines_to_map(
list(map(ThorViz.position_to_tuple, positions)),
frame,
pos_translator,
line_opacity,
tuple(
map(
lambda x: int(round(255 * x)),
col.Color(trajectory_start_end_color_str[0]).rgb,
)
),
)
else:
if len(positions) > 1:
colors = list(
col.Color(trajectory_start_end_color_str[0]).range_to(
col.Color(trajectory_start_end_color_str[1]), len(positions) - 1
)
)
for i in range(len(positions) - 1):
frame = ThorViz.add_line_to_map(
ThorViz.position_to_tuple(positions[i]),
ThorViz.position_to_tuple(positions[i + 1]),
frame,
pos_translator,
opacity=line_opacity,
color=tuple(map(lambda x: int(round(255 * x)), colors[i].rgb)),
)
if view_triangle_only_on_last:
positions = [positions[-1]]
if disable_view_triangle:
positions = []
for position in positions:
frame = ThorViz.add_agent_view_triangle(
ThorViz.position_to_tuple(position),
rotation=position["rotation"],
frame=frame,
pos_translator=pos_translator,
opacity=0.05 + view_triangle_only_on_last * 0.2,
)
return frame
def dump_top_down_view(self, room_name: str, image_path: str):
get_logger().debug("Dumping {}".format(image_path))
self.controller.reset(room_name)
self.controller.step(
{"action": "Initialize", "gridSize": 0.1, "makeAgentsVisible": False}
)
self.controller.step({"action": "ToggleMapView"})
top_down_view = self.controller.last_event.cv2img
cv2.imwrite(image_path, top_down_view)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
trajectory: Sequence[Dict[str, Any]] = self._access(
episode, self.path_to_trajectory
)
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:3])
im = self.visualize_agent_path(
trajectory,
self.thor_top_downs[roomname],
self.map_data["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
class ThorMultiViz(ThorViz):
def __init__(
self,
path_to_trajectory_prefix: Sequence[str] = ("task_info", "followed_path"),
agent_suffixes: Sequence[str] = ("1", "2"),
label: str = "thor_trajectories",
trajectory_start_end_color_strs: Sequence[Tuple[str, str]] = (
("red", "green"),
("cyan", "purple"),
),
**kwargs
):
super().__init__(label=label, **kwargs)
self.path_to_trajectory_prefix = list(path_to_trajectory_prefix)
self.agent_suffixes = list(agent_suffixes)
self.trajectory_start_end_color_strs = list(trajectory_start_end_color_strs)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:3])
im = self.thor_top_downs[roomname]
for agent, start_end_color in zip(
self.agent_suffixes, self.trajectory_start_end_color_strs
):
path = self.path_to_trajectory_prefix[:]
path[-1] = path[-1] + agent
trajectory = self._access(episode, path)
im = self.visualize_agent_path(
trajectory,
im,
self.map_data["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
trajectory_start_end_color_str=start_end_color,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
| ask4help-main | allenact_plugins/robothor_plugin/robothor_viz.py |
ask4help-main | allenact_plugins/robothor_plugin/configs/__init__.py |
|
import gzip
import json
import os
from typing import Sequence, Optional
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
ObjectNavDatasetTaskSampler,
)
def create_debug_dataset_from_train_dataset(
scene: str,
target_object_type: Optional[str],
episodes_subset: Sequence[int],
train_dataset_path: str,
base_debug_output_path: str,
):
downloaded_episodes = os.path.join(
train_dataset_path, "episodes", scene + ".json.gz"
)
assert os.path.exists(downloaded_episodes), (
"'{}' doesn't seem to exist or is empty. Make sure you've downloaded to download the appropriate"
" training dataset with"
" datasets/download_navigation_datasets.sh".format(downloaded_episodes)
)
# episodes
episodes = ObjectNavDatasetTaskSampler.load_dataset(
scene=scene, base_directory=os.path.join(train_dataset_path, "episodes")
)
if target_object_type is not None:
ids = {
"{}_{}_{}".format(scene, target_object_type, epit)
for epit in episodes_subset
}
else:
ids = {"{}_{}".format(scene, epit) for epit in episodes_subset}
debug_episodes = [ep for ep in episodes if ep["id"] in ids]
assert len(ids) == len(debug_episodes), (
f"Number of input ids ({len(ids)}) does not equal"
f" number of output debug tasks ({len(debug_episodes)})"
)
# sort by episode_ids
debug_episodes = [
idep[1]
for idep in sorted(
[(int(ep["id"].split("_")[-1]), ep) for ep in debug_episodes],
key=lambda x: x[0],
)
]
assert len(debug_episodes) == len(episodes_subset)
episodes_dir = os.path.join(base_debug_output_path, "episodes")
os.makedirs(episodes_dir, exist_ok=True)
episodes_file = os.path.join(episodes_dir, scene + ".json.gz")
json_str = json.dumps(debug_episodes)
json_bytes = json_str.encode("utf-8")
with gzip.GzipFile(episodes_file, "w") as fout:
fout.write(json_bytes)
assert os.path.exists(episodes_file)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan_Train1_1"
TARGET = "Television"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "robothor-objectnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=TARGET,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "robothor-objectnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| ask4help-main | allenact_plugins/robothor_plugin/scripts/make_objectnav_debug_dataset.py |
ask4help-main | allenact_plugins/robothor_plugin/scripts/__init__.py |
|
import os
from allenact_plugins.robothor_plugin.scripts.make_objectnav_debug_dataset import (
create_debug_dataset_from_train_dataset,
)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan_Train1_1"
EPISODES = [3, 4, 5, 6]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "robothor-pointnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=None,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "robothor-pointnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| ask4help-main | allenact_plugins/robothor_plugin/scripts/make_pointnav_debug_dataset.py |
import random
from typing import Tuple, Any, List, Dict, Optional, Union, Callable, Sequence, cast
import gym
import networkx as nx
import numpy as np
from gym.utils import seeding
from gym_minigrid.envs import CrossingEnv
from gym_minigrid.minigrid import (
DIR_TO_VEC,
IDX_TO_OBJECT,
MiniGridEnv,
OBJECT_TO_IDX,
)
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.system import get_logger
from allenact_plugins.minigrid_plugin.minigrid_environments import (
AskForHelpSimpleCrossing,
)
class MiniGridTask(Task[CrossingEnv]):
_ACTION_NAMES: Tuple[str, ...] = ("left", "right", "forward")
_ACTION_IND_TO_MINIGRID_IND = tuple(
MiniGridEnv.Actions.__members__[name].value for name in _ACTION_NAMES
)
_CACHED_GRAPHS: Dict[str, nx.DiGraph] = {}
_NEIGHBOR_OFFSETS = tuple(
[(-1, 0, 0), (0, -1, 0), (0, 0, -1), (1, 0, 0), (0, 1, 0), (0, 0, 1),]
)
_XY_DIFF_TO_AGENT_DIR = {
tuple(vec): dir_ind for dir_ind, vec in enumerate(DIR_TO_VEC)
}
""" Task around a MiniGrid Env, allows interfacing allenact with
MiniGrid tasks. (currently focussed towards LavaCrossing)
"""
def __init__(
self,
env: Union[CrossingEnv],
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
task_cache_uid: Optional[str] = None,
corrupt_expert_within_actions_of_goal: Optional[int] = None,
**kwargs,
):
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._graph: Optional[nx.DiGraph] = None
self._minigrid_done = False
self._task_cache_uid = task_cache_uid
self.corrupt_expert_within_actions_of_goal = (
corrupt_expert_within_actions_of_goal
)
self.closest_agent_has_been_to_goal: Optional[float] = None
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(len(self._ACTION_NAMES))
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
return self.env.render(mode=mode)
def _step(self, action: int) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
minigrid_obs, reward, self._minigrid_done, info = self.env.step(
action=self._ACTION_IND_TO_MINIGRID_IND[action]
)
# self.env.render()
return RLStepResult(
observation=self.get_observations(minigrid_output_obs=minigrid_obs),
reward=reward,
done=self.is_done(),
info=info,
)
def get_observations(
self, *args, minigrid_output_obs: Optional[Dict[str, Any]] = None, **kwargs
) -> Any:
return self.sensor_suite.get_observations(
env=self.env, task=self, minigrid_output_obs=minigrid_output_obs
)
def reached_terminal_state(self) -> bool:
return self._minigrid_done
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._ACTION_NAMES
def close(self) -> None:
pass
def metrics(self) -> Dict[str, Any]:
# noinspection PyUnresolvedReferences,PyCallingNonCallable
env_metrics = self.env.metrics() if hasattr(self.env, "metrics") else {}
return {
**super(MiniGridTask, self).metrics(),
**{k: float(v) for k, v in env_metrics.items()},
"success": int(
self.env.was_successful
if hasattr(self.env, "was_successful")
else self.cumulative_reward > 0
),
}
@property
def graph_created(self):
return self._graph is not None
@property
def graph(self):
if self._graph is None:
if self._task_cache_uid is not None:
if self._task_cache_uid not in self._CACHED_GRAPHS:
self._CACHED_GRAPHS[self._task_cache_uid] = self.generate_graph()
self._graph = self._CACHED_GRAPHS[self._task_cache_uid]
else:
self._graph = self.generate_graph()
return self._graph
@graph.setter
def graph(self, graph: nx.DiGraph):
self._graph = graph
@classmethod
def possible_neighbor_offsets(cls) -> Tuple[Tuple[int, int, int], ...]:
# Tuples of format:
# (X translation, Y translation, rotation by 90 degrees)
# A constant is returned, this function can be changed if anything
# more complex needs to be done.
# offsets_superset = itertools.product(
# [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]
# )
#
# valid_offsets = []
# for off in offsets_superset:
# if (int(off[0] != 0) + int(off[1] != 0) + int(off[2] != 0)) == 1:
# valid_offsets.append(off)
#
# return tuple(valid_offsets)
return cls._NEIGHBOR_OFFSETS
@classmethod
def _add_from_to_edge(
cls, g: nx.DiGraph, s: Tuple[int, int, int], t: Tuple[int, int, int],
):
"""Adds nodes and corresponding edges to existing nodes.
This approach avoids adding the same edge multiple times.
Pre-requisite knowledge about MiniGrid:
DIR_TO_VEC = [
# Pointing right (positive X)
np.array((1, 0)),
# Down (positive Y)
np.array((0, 1)),
# Pointing left (negative X)
np.array((-1, 0)),
# Up (negative Y)
np.array((0, -1)),
]
or
AGENT_DIR_TO_STR = {
0: '>',
1: 'V',
2: '<',
3: '^'
}
This also implies turning right (clockwise) means:
agent_dir += 1
"""
s_x, s_y, s_rot = s
t_x, t_y, t_rot = t
x_diff = t_x - s_x
y_diff = t_y - s_y
angle_diff = (t_rot - s_rot) % 4
# If source and target differ by more than one action, continue
if (x_diff != 0) + (y_diff != 0) + (angle_diff != 0) != 1 or angle_diff == 2:
return
action = None
if angle_diff == 1:
action = "right"
elif angle_diff == 3:
action = "left"
elif cls._XY_DIFF_TO_AGENT_DIR[(x_diff, y_diff)] == s_rot:
# if translation is the same direction as source
# orientation, then it's a valid forward action
action = "forward"
else:
# This is when the source and target aren't one action
# apart, despite having dx=1 or dy=1
pass
if action is not None:
g.add_edge(s, t, action=action)
def _add_node_to_graph(
self,
graph: nx.DiGraph,
s: Tuple[int, int, int],
valid_node_types: Tuple[str, ...],
attr_dict: Dict[Any, Any] = None,
include_rotation_free_leaves: bool = False,
):
if s in graph:
return
if attr_dict is None:
get_logger().warning("adding a node with neighbor checks and no attributes")
graph.add_node(s, **attr_dict)
if include_rotation_free_leaves:
rot_free_leaf = (*s[:-1], None)
if rot_free_leaf not in graph:
graph.add_node(rot_free_leaf)
graph.add_edge(s, rot_free_leaf, action="NA")
if attr_dict["type"] in valid_node_types:
for o in self.possible_neighbor_offsets():
t = (s[0] + o[0], s[1] + o[1], (s[2] + o[2]) % 4)
if t in graph and graph.nodes[t]["type"] in valid_node_types:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
def generate_graph(self,) -> nx.DiGraph:
"""The generated graph is based on the fully observable grid (as the
expert sees it all).
env: environment to generate the graph over
"""
image = self.env.grid.encode()
width, height, _ = image.shape
graph = nx.DiGraph()
# In fully observable grid, there shouldn't be any "unseen"
# Currently dealing with "empty", "wall", "goal", "lava"
valid_object_ids = np.sort(
[OBJECT_TO_IDX[o] for o in ["empty", "wall", "lava", "goal"]]
)
assert np.all(np.union1d(image[:, :, 0], valid_object_ids) == valid_object_ids)
# Grid to nodes
for x in range(width):
for y in range(height):
for rotation in range(4):
type, color, state = image[x, y]
self._add_node_to_graph(
graph,
(x, y, rotation),
attr_dict={
"type": IDX_TO_OBJECT[type],
"color": color,
"state": state,
},
valid_node_types=("empty", "goal"),
)
if IDX_TO_OBJECT[type] == "goal":
if not graph.has_node("unified_goal"):
graph.add_node("unified_goal")
graph.add_edge((x, y, rotation), "unified_goal")
return graph
def query_expert(self, **kwargs) -> Tuple[int, bool]:
if self._minigrid_done:
get_logger().warning("Episode is completed, but expert is still queried.")
return -1, False
paths = []
agent_x, agent_y = self.env.agent_pos
agent_rot = self.env.agent_dir
source_state_key = (agent_x, agent_y, agent_rot)
assert source_state_key in self.graph
paths.append(nx.shortest_path(self.graph, source_state_key, "unified_goal"))
if len(paths) == 0:
return -1, False
shortest_path_ind = int(np.argmin([len(p) for p in paths]))
if self.closest_agent_has_been_to_goal is None:
self.closest_agent_has_been_to_goal = len(paths[shortest_path_ind]) - 1
else:
self.closest_agent_has_been_to_goal = min(
len(paths[shortest_path_ind]) - 1, self.closest_agent_has_been_to_goal
)
if (
self.corrupt_expert_within_actions_of_goal is not None
and self.corrupt_expert_within_actions_of_goal
>= self.closest_agent_has_been_to_goal
):
return (
int(self.env.np_random.randint(0, len(self.class_action_names()))),
True,
)
if len(paths[shortest_path_ind]) == 2:
# Since "unified_goal" is 1 step away from actual goals
# if a path like [actual_goal, unified_goal] exists, then
# you are already at a goal.
get_logger().warning(
"Shortest path computations suggest we are at"
" the target but episode does not think so."
)
return -1, False
next_key_on_shortest_path = paths[shortest_path_ind][1]
return (
self.class_action_names().index(
self.graph.get_edge_data(source_state_key, next_key_on_shortest_path)[
"action"
]
),
True,
)
class AskForHelpSimpleCrossingTask(MiniGridTask):
_ACTION_NAMES = ("left", "right", "forward", "toggle")
_ACTION_IND_TO_MINIGRID_IND = tuple(
MiniGridEnv.Actions.__members__[name].value for name in _ACTION_NAMES
)
_CACHED_GRAPHS: Dict[str, nx.DiGraph] = {}
def __init__(
self,
env: AskForHelpSimpleCrossing,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
):
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.did_toggle: List[bool] = []
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
self.did_toggle.append(self._ACTION_NAMES[action] == "toggle")
return super(AskForHelpSimpleCrossingTask, self)._step(action=action)
def metrics(self) -> Dict[str, Any]:
return {
**super(AskForHelpSimpleCrossingTask, self).metrics(),
"toggle_percent": float(
sum(self.did_toggle) / max(len(self.did_toggle), 1)
),
}
class MiniGridTaskSampler(TaskSampler):
def __init__(
self,
env_class: Callable[..., Union[MiniGridEnv]],
sensors: Union[SensorSuite, List[Sensor]],
env_info: Optional[Dict[str, Any]] = None,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
cache_graphs: Optional[bool] = False,
task_class: Callable[..., MiniGridTask] = MiniGridTask,
repeat_failed_task_for_min_steps: int = 0,
extra_task_kwargs: Optional[Dict] = None,
**kwargs,
):
super(MiniGridTaskSampler, self).__init__()
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_tasks = max_tasks
self.num_unique_seeds = num_unique_seeds
self.cache_graphs = cache_graphs
self.deterministic_sampling = deterministic_sampling
self.repeat_failed_task_for_min_steps = repeat_failed_task_for_min_steps
self.extra_task_kwargs = (
extra_task_kwargs if extra_task_kwargs is not None else {}
)
self._last_env_seed: Optional[int] = None
self._last_task: Optional[MiniGridTask] = None
self._number_of_steps_taken_with_task_seed = 0
assert (not deterministic_sampling) or repeat_failed_task_for_min_steps <= 0, (
"If `deterministic_sampling` is True then we require"
" `repeat_failed_task_for_min_steps <= 0`"
)
assert (not self.cache_graphs) or self.num_unique_seeds is not None, (
"When caching graphs you must specify"
" a number of unique tasks to sample from."
)
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
if num_unique_seeds is not None and repeat_failed_task_for_min_steps > 0:
raise NotImplementedError(
"`repeat_failed_task_for_min_steps` must be <=0 if number"
" of unique seeds is not None."
)
assert (
not self.cache_graphs
) or self.num_unique_seeds <= 1000, "Too many tasks (graphs) to cache"
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
self.env = env_class(**env_info)
self.task_class = task_class
self.np_seeded_random_gen, _ = seeding.np_random(random.randint(0, 2 ** 31 - 1))
self.num_tasks_generated = 0
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None if self.num_unique_seeds is None else self.num_unique_seeds
@property
def last_sampled_task(self) -> Optional[Task]:
raise NotImplementedError
def next_task(self, force_advance_scene: bool = False) -> Optional[MiniGridTask]:
if self.length <= 0:
return None
task_cache_uid = None
repeating = False
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
self._last_env_seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
self._last_env_seed = self.np_seeded_random_gen.choice(
self.task_seeds_list
)
else:
if self._last_task is not None:
self._number_of_steps_taken_with_task_seed += (
self._last_task.num_steps_taken()
)
if (
self._last_env_seed is not None
and self._number_of_steps_taken_with_task_seed
< self.repeat_failed_task_for_min_steps
and self._last_task.cumulative_reward == 0
):
repeating = True
else:
self._number_of_steps_taken_with_task_seed = 0
self._last_env_seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
task_has_same_seed_reset = hasattr(self.env, "same_seed_reset")
if self.cache_graphs:
task_cache_uid = str(self._last_env_seed)
if repeating and task_has_same_seed_reset:
# noinspection PyUnresolvedReferences
self.env.same_seed_reset()
else:
self.env.seed(self._last_env_seed)
self.env.saved_seed = self._last_env_seed
self.env.reset()
self.num_tasks_generated += 1
task = self.task_class(
**dict(
env=self.env,
sensors=self.sensors,
task_info={},
max_steps=self.env.max_steps,
task_cache_uid=task_cache_uid,
),
**self.extra_task_kwargs,
)
if repeating and self._last_task.graph_created:
task.graph = self._last_task.graph
self._last_task = task
return task
def close(self) -> None:
self.env.close()
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.env.reset()
def set_seed(self, seed: int) -> None:
self.np_seeded_random_gen, _ = seeding.np_random(seed)
| ask4help-main | allenact_plugins/minigrid_plugin/minigrid_tasks.py |
import copy
from typing import Optional, Set
import numpy as np
from gym import register
from gym_minigrid.envs import CrossingEnv
from gym_minigrid.minigrid import Lava, Wall
class FastCrossing(CrossingEnv):
"""Similar to `CrossingEnv`, but to support faster task sampling as per
`repeat_failed_task_for_min_steps` flag in MiniGridTaskSampler."""
def __init__(self, size=9, num_crossings=1, obstacle_type=Lava, seed=None):
self.init_agent_pos: Optional[np.ndarray] = None
self.init_agent_dir: Optional[int] = None
self.step_count: Optional[int] = None
super(FastCrossing, self).__init__(
size=size,
num_crossings=num_crossings,
obstacle_type=obstacle_type,
seed=seed,
)
def same_seed_reset(self):
assert self.init_agent_pos is not None
# Current position and direction of the agent
self.agent_pos = self.init_agent_pos
self.agent_dir = self.init_agent_dir
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
assert self.carrying is None
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def reset(self, partial_reset: bool = False):
super(FastCrossing, self).reset()
self.init_agent_pos = copy.deepcopy(self.agent_pos)
self.init_agent_dir = self.agent_dir
class AskForHelpSimpleCrossing(CrossingEnv):
"""Corresponds to WC FAULTY SWITCH environment."""
def __init__(
self,
size=9,
num_crossings=1,
obstacle_type=Wall,
seed=None,
exploration_reward: Optional[float] = None,
death_penalty: Optional[float] = None,
toggle_is_permenant: bool = False,
):
self.init_agent_pos: Optional[np.ndarray] = None
self.init_agent_dir: Optional[int] = None
self.should_reveal_image: bool = False
self.exploration_reward = exploration_reward
self.death_penalty = death_penalty
self.explored_points: Set = set()
self._was_successful = False
self.toggle_is_permanent = toggle_is_permenant
self.step_count: Optional[int] = None
super(AskForHelpSimpleCrossing, self).__init__(
size=size,
num_crossings=num_crossings,
obstacle_type=obstacle_type,
seed=seed,
)
@property
def was_successful(self) -> bool:
return self._was_successful
def gen_obs(self):
obs = super(AskForHelpSimpleCrossing, self).gen_obs()
if not self.should_reveal_image:
obs["image"] *= 0
return obs
def metrics(self):
return {
"explored_count": len(self.explored_points),
"final_distance": float(
min(
abs(x - (self.width - 2)) + abs(y - (self.height - 2))
for x, y in self.explored_points
)
),
}
def step(self, action: int):
"""Reveal the observation only if the `toggle` action is executed."""
if action == self.actions.toggle:
self.should_reveal_image = True
else:
self.should_reveal_image = (
self.should_reveal_image and self.toggle_is_permanent
)
minigrid_obs, reward, done, info = super(AskForHelpSimpleCrossing, self).step(
action=action
)
assert not self._was_successful, "Called step after done."
self._was_successful = self._was_successful or (reward > 0)
if (
done
and self.steps_remaining != 0
and (not self._was_successful)
and self.death_penalty is not None
):
reward += self.death_penalty
t = tuple(self.agent_pos)
if self.exploration_reward is not None:
if t not in self.explored_points:
reward += self.exploration_reward
self.explored_points.add(t)
return minigrid_obs, reward, done, info
def same_seed_reset(self):
assert self.init_agent_pos is not None
self._was_successful = False
# Current position and direction of the agent
self.agent_pos = self.init_agent_pos
self.agent_dir = self.init_agent_dir
self.explored_points.clear()
self.explored_points.add(tuple(self.agent_pos))
self.should_reveal_image = False
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
assert self.carrying is None
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def reset(self, partial_reset: bool = False):
super(AskForHelpSimpleCrossing, self).reset()
self.explored_points.clear()
self.explored_points.add(tuple(self.agent_pos))
self.init_agent_pos = copy.deepcopy(self.agent_pos)
self.init_agent_dir = self.agent_dir
self._was_successful = False
self.should_reveal_image = False
class LavaCrossingS25N10(CrossingEnv):
def __init__(self):
super(LavaCrossingS25N10, self).__init__(size=25, num_crossings=10)
class LavaCrossingS15N7(CrossingEnv):
def __init__(self):
super(LavaCrossingS15N7, self).__init__(size=15, num_crossings=7)
class LavaCrossingS11N7(CrossingEnv):
def __init__(self):
super(LavaCrossingS11N7, self).__init__(size=9, num_crossings=4)
register(
id="MiniGrid-LavaCrossingS25N10-v0",
entry_point="allenact_plugins.minigrid_plugin.minigrid_environments:LavaCrossingS25N10",
)
register(
id="MiniGrid-LavaCrossingS15N7-v0",
entry_point="allenact_plugins.minigrid_plugin.minigrid_environments:LavaCrossingS15N7",
)
register(
id="MiniGrid-LavaCrossingS11N7-v0",
entry_point="allenact_plugins.minigrid_plugin.minigrid_environments:LavaCrossingS11N7",
)
| ask4help-main | allenact_plugins/minigrid_plugin/minigrid_environments.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"\n\nPlease install babyai with:\n\n"
"pip install -e git+https://github.com/Lucaweihs/babyai.git@0b450eeb3a2dc7116c67900d51391986bdbb84cd#egg=babyai\n",
):
import babyai
| ask4help-main | allenact_plugins/minigrid_plugin/__init__.py |
import os
import queue
import random
from collections import defaultdict
from typing import Dict, Tuple, Any, cast, Iterator, List, Union, Optional
import babyai
import blosc
import numpy as np
import pickle5 as pickle
import torch
from gym_minigrid.minigrid import MiniGridEnv
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (
AbstractOffPolicyLoss,
Memory,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel, ObservationType
from allenact.utils.misc_utils import partition_limits
from allenact.utils.system import get_logger
from allenact_plugins.minigrid_plugin.minigrid_sensors import MiniGridMissionSensor
_DATASET_CACHE: Dict[str, Any] = {}
class MiniGridOffPolicyExpertCELoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(self, total_episodes_in_epoch: Optional[int] = None):
super().__init__()
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss( # type:ignore
self,
model: ActorCriticModel,
batch: ObservationType,
memory: Memory,
*args,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts = cast(torch.Tensor, batch["minigrid_ego_image"]).shape[
:2
]
# Initialize Memory if empty
if len(memory) == 0:
spec = model.recurrent_memory_specification
for key in spec:
dims_template, dtype = spec[key]
# get sampler_dim and all_dims from dims_template (and nrollouts)
dim_names = [d[0] for d in dims_template]
sampler_dim = dim_names.index("sampler")
all_dims = [d[1] for d in dims_template]
all_dims[sampler_dim] = nrollouts
memory.check_append(
key=key,
tensor=torch.zeros(
*all_dims,
dtype=dtype,
device=cast(torch.Tensor, batch["minigrid_ego_image"]).device
),
sampler_dim=sampler_dim,
)
# Forward data (through the actor and critic)
ac_out, memory = model.forward(
observations=batch,
memory=memory,
prev_actions=None, # type:ignore
masks=cast(torch.FloatTensor, batch["masks"]),
)
# Compute the loss from the actor's output and expert action
expert_ce_loss = -ac_out.distributions.log_prob(batch["expert_action"]).mean()
info = {"expert_ce": expert_ce_loss.item()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) # type: ignore
- batch["masks"].sum().item() # type: ignore
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return expert_ce_loss, info, memory, rollout_len * nrollouts
def transform_demos(demos):
# A modified version of babyai.utils.demos.transform_demos
# where we use pickle 5 instead of standard pickle
new_demos = []
for demo in demos:
new_demo = []
mission = demo[0]
all_images = demo[1]
directions = demo[2]
actions = demo[3]
# First decompress the pickle
pickled_array = blosc.blosc_extension.decompress(all_images, False)
# ... and unpickle
all_images = pickle.loads(pickled_array)
n_observations = all_images.shape[0]
assert (
len(directions) == len(actions) == n_observations
), "error transforming demos"
for i in range(n_observations):
obs = {
"image": all_images[i],
"direction": directions[i],
"mission": mission,
}
action = actions[i]
done = i == n_observations - 1
new_demo.append((obs, action, done))
new_demos.append(new_demo)
return new_demos
class ExpertTrajectoryIterator(Iterator):
def __init__(
self,
data: List[Tuple[str, bytes, List[int], MiniGridEnv.Actions]],
nrollouts: int,
rollout_len: int,
instr_len: Optional[int],
restrict_max_steps_in_dataset: Optional[int] = None,
num_data_length_clusters: int = 8,
current_worker: Optional[int] = None,
num_workers: Optional[int] = None,
):
super(ExpertTrajectoryIterator, self).__init__()
self.restrict_max_steps_in_dataset = restrict_max_steps_in_dataset
if restrict_max_steps_in_dataset is not None:
restricted_data = []
cur_len = 0
for i, d in enumerate(data):
if cur_len >= restrict_max_steps_in_dataset:
break
restricted_data.append(d)
cur_len += len(d[2])
data = restricted_data
if num_workers is not None:
parts = partition_limits(len(data), num_workers)
new_data = data[parts[current_worker] : parts[current_worker + 1]]
data = new_data
self.num_data_lengths = min(num_data_length_clusters, len(data) // nrollouts)
data_lengths = sorted(
[(len(d), it) for it, d in enumerate(data)], key=lambda x: (x[0], x[1])
)
sorted_inds = [l[1] for l in data_lengths]
data_limits = partition_limits(
num_items=len(data_lengths), num_parts=self.num_data_lengths
)
# get_logger().debug("Using cluster limits {}".format(data_limits))
self.data = data
self.instr_len = instr_len
self.trajectory_inds = [
sorted_inds[data_limits[i] : data_limits[i + 1]]
for i in range(self.num_data_lengths)
]
for i in range(self.num_data_lengths):
random.shuffle(self.trajectory_inds[i])
assert nrollouts <= sum(
len(ti) for ti in self.trajectory_inds
), "Too many rollouts requested."
self.nrollouts = nrollouts
self.rollout_len = rollout_len
self.current_data_length = [
random.randint(0, self.num_data_lengths - 1) for _ in range(nrollouts)
]
self.rollout_queues: List[queue.Queue] = [
queue.Queue() for _ in range(nrollouts)
]
for it, q in enumerate(self.rollout_queues):
self.add_data_to_rollout_queue(q, it)
self.minigrid_mission_sensor: Optional[MiniGridMissionSensor] = None
if instr_len is not None:
self.minigrid_mission_sensor = MiniGridMissionSensor(instr_len)
def add_data_to_rollout_queue(self, q: queue.Queue, sampler: int) -> bool:
assert q.empty()
start = self.current_data_length[sampler]
cond = True
while cond:
self.current_data_length[sampler] = (
self.current_data_length[sampler] + 1
) % self.num_data_lengths
cond = (
len(self.trajectory_inds[self.current_data_length[sampler]]) == 0
and self.current_data_length[sampler] != start
)
if len(self.trajectory_inds[self.current_data_length[sampler]]) == 0:
return False
for i, step in enumerate(
transform_demos(
[
self.data[
self.trajectory_inds[self.current_data_length[sampler]].pop()
]
]
)[0]
):
q.put((*step, i == 0))
return True
def get_data_for_rollout_ind(self, rollout_ind: int) -> Dict[str, np.ndarray]:
masks: List[bool] = []
minigrid_ego_image = []
minigrid_mission = []
expert_actions = []
q = self.rollout_queues[rollout_ind]
while len(masks) != self.rollout_len:
if q.empty():
if not self.add_data_to_rollout_queue(q, rollout_ind):
raise StopIteration()
obs, expert_action, _, is_first_obs = cast(
Tuple[
Dict[str, Union[np.array, int, str]],
MiniGridEnv.Actions,
bool,
bool,
],
q.get_nowait(),
)
masks.append(not is_first_obs)
minigrid_ego_image.append(obs["image"])
if self.minigrid_mission_sensor is not None:
# noinspection PyTypeChecker
minigrid_mission.append(
self.minigrid_mission_sensor.get_observation(
env=None, task=None, minigrid_output_obs=obs
)
)
expert_actions.append([expert_action])
to_return = {
"masks": np.array(masks, dtype=np.float32).reshape(
(self.rollout_len, 1) # steps x mask
),
"minigrid_ego_image": np.stack(
minigrid_ego_image, axis=0
), # steps x height x width x channels
"expert_action": np.array(expert_actions, dtype=np.int64).reshape(
self.rollout_len # steps
),
}
if self.minigrid_mission_sensor is not None:
to_return["minigrid_mission"] = np.stack(
minigrid_mission, axis=0
) # steps x mission_dims
return to_return
def __next__(self) -> Dict[str, torch.Tensor]:
all_data = defaultdict(lambda: [])
for rollout_ind in range(self.nrollouts):
data_for_ind = self.get_data_for_rollout_ind(rollout_ind=rollout_ind)
for key in data_for_ind:
all_data[key].append(data_for_ind[key])
return {
key: torch.from_numpy(np.stack(all_data[key], axis=1)) # new sampler dim
for key in all_data
}
def create_minigrid_offpolicy_data_iterator(
path: str,
nrollouts: int,
rollout_len: int,
instr_len: Optional[int],
restrict_max_steps_in_dataset: Optional[int] = None,
current_worker: Optional[int] = None,
num_workers: Optional[int] = None,
) -> ExpertTrajectoryIterator:
path = os.path.abspath(path)
assert (current_worker is None) == (
num_workers is None
), "both current_worker and num_workers must be simultaneously defined or undefined"
if path not in _DATASET_CACHE:
get_logger().info(
"Loading minigrid dataset from {} for first time...".format(path)
)
_DATASET_CACHE[path] = babyai.utils.load_demos(path)
assert _DATASET_CACHE[path] is not None and len(_DATASET_CACHE[path]) != 0
get_logger().info(
"Loading minigrid dataset complete, it contains {} trajectories".format(
len(_DATASET_CACHE[path])
)
)
return ExpertTrajectoryIterator(
data=_DATASET_CACHE[path],
nrollouts=nrollouts,
rollout_len=rollout_len,
instr_len=instr_len,
restrict_max_steps_in_dataset=restrict_max_steps_in_dataset,
current_worker=current_worker,
num_workers=num_workers,
)
| ask4help-main | allenact_plugins/minigrid_plugin/minigrid_offpolicy.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.