python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
allenact-main | tests/vision/__init__.py |
|
import io
import math
import os
import pathlib
from contextlib import redirect_stdout, redirect_stderr
from typing import Optional, List, Dict, Any
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.algorithms.onpolicy_sync.runner import OnPolicyRunner
from allenact.algorithms.onpolicy_sync.storage import (
StreamingStorageMixin,
ExperienceStorage,
RolloutBlockStorage,
)
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.misc import (
Memory,
GenericAbstractLoss,
ModelType,
LossOutput,
)
from allenact.utils.experiment_utils import PipelineStage, StageComponent
from allenact.utils.misc_utils import prepare_locals_for_super
from projects.babyai_baselines.experiments.go_to_obj.ppo import (
PPOBabyAIGoToObjExperimentConfig,
)
SILLY_STORAGE_VALUES = [1.0, 2.0, 3.0, 4.0]
SILLY_STORAGE_REPEATS = [1, 2, 3, 4]
class FixedConstantLoss(AbstractActorCriticLoss):
def __init__(self, name: str, value: float):
super().__init__()
self.name = name
self.value = value
def loss( # type: ignore
self, *args, **kwargs,
):
return self.value, {self.name: self.value}
class SillyStorage(ExperienceStorage, StreamingStorageMixin):
def __init__(self, values_to_return: List[float], repeats: List[int]):
self.values_to_return = values_to_return
self.repeats = repeats
assert len(self.values_to_return) == len(self.repeats)
self.index = 0
def initialize(self, *, observations: ObservationType, **kwargs):
pass
def add(
self,
observations: ObservationType,
memory: Optional[Memory],
actions: torch.Tensor,
action_log_probs: torch.Tensor,
value_preds: torch.Tensor,
rewards: torch.Tensor,
masks: torch.Tensor,
):
pass
def to(self, device: torch.device):
pass
def set_partition(self, index: int, num_parts: int):
pass
@property
def total_experiences(self) -> int:
return 0
@total_experiences.setter
def total_experiences(self, value: int):
pass
def next_batch(self) -> Dict[str, Any]:
if self.index >= len(self.values_to_return):
raise EOFError
to_return = {
"value": torch.tensor(
[self.values_to_return[self.index]] * self.repeats[self.index]
),
}
self.index += 1
return to_return
def reset_stream(self):
self.index = 0
def empty(self) -> bool:
return len(self.values_to_return) == 0
class AverageBatchValueLoss(GenericAbstractLoss):
def loss(
self,
*,
model: ModelType,
batch: ObservationType,
batch_memory: Memory,
stream_memory: Memory,
) -> LossOutput:
v = batch["value"].mean()
return LossOutput(
value=v,
info={"avg_batch_val": v},
per_epoch_info={},
batch_memory=batch_memory,
stream_memory=stream_memory,
bsize=batch["value"].shape[0],
)
class PPOBabyAIGoToObjTestExperimentConfig(PPOBabyAIGoToObjExperimentConfig):
NUM_CKPTS_TO_SAVE = 2
@classmethod
def tag(cls):
return "BabyAIGoToObjPPO-TESTING"
@classmethod
def machine_params(cls, mode="train", **kwargs):
mp = super().machine_params(mode=mode, **kwargs)
if mode == "valid":
mp = MachineParams(
nprocesses=1,
devices=mp.devices,
sensor_preprocessor_graph=mp.sensor_preprocessor_graph,
sampler_devices=mp.sampler_devices,
visualizer=mp.visualizer,
local_worker_ids=mp.local_worker_ids,
)
return mp
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_RL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=total_train_steps)
tp = cls._training_pipeline(
named_losses={
"ppo_loss": ppo_info["loss"],
"3_loss": FixedConstantLoss("3_loss", 3.0),
"avg_value_loss": AverageBatchValueLoss(),
},
named_storages={
"onpolicy": RolloutBlockStorage(),
"silly_storage": SillyStorage(
values_to_return=SILLY_STORAGE_VALUES, repeats=SILLY_STORAGE_REPEATS
),
},
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss", "3_loss"],
max_stage_steps=total_train_steps,
stage_components=[
StageComponent(
uuid="onpolicy",
storage_uuid="onpolicy",
loss_names=["ppo_loss", "3_loss"],
)
],
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
total_train_steps=total_train_steps,
valid_pipeline_stage=PipelineStage(
loss_names=["ppo_loss", "3_loss"],
max_stage_steps=-1,
update_repeats=1,
num_mini_batch=1,
),
test_pipeline_stage=PipelineStage(
loss_names=["avg_value_loss"],
stage_components=[
StageComponent(
uuid="debug",
storage_uuid="silly_storage",
loss_names=["avg_value_loss"],
),
],
max_stage_steps=-1,
update_repeats=1,
num_mini_batch=1,
),
)
tp.training_settings.save_interval = int(
math.ceil(cls.TOTAL_RL_TRAIN_STEPS / cls.NUM_CKPTS_TO_SAVE)
)
return tp
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
# Also run validation
return self.test_task_sampler_args(**prepare_locals_for_super(locals()))
# Wrapper context manager to redirect stdout and stderr to a file when potentially
# using pytest capsys
class RedirectOutput:
def __init__(self, capsys: Optional, capfd: Optional):
self.capsys = capsys
self.capfd = capfd
self.f = io.StringIO()
self.redirect_stdout = redirect_stdout(self.f)
self.redirect_stderr = redirect_stderr(self.f)
self.capsys_output = ""
self.capfd_output = ""
# self.capsys_disabler = None
def get_output(self):
return self.f.getvalue() + self.capsys_output + self.capfd_output
def __enter__(self):
if self.capsys is not None:
self.capsys.readouterr() # Clear out any existing output
if self.capfd is not None:
self.capfd.readouterr() # Clear out any existing output
# self.capsys_disabler = self.capsys.disabled()
# self.capsys_disabler.__enter__()
self.redirect_stdout.__enter__()
self.redirect_stderr.__enter__()
def __exit__(self, *args):
if self.capsys is not None:
captured = self.capsys.readouterr()
self.capsys_output = captured.out + captured.err
# self.capsys_disabler.__exit__(*args)
if self.capfd is not None:
captured = self.capfd.readouterr()
self.capfd_output = captured.out + captured.err
self.redirect_stdout.__exit__(*args)
self.redirect_stderr.__exit__(*args)
class TestGoToObjTrains:
def test_ppo_trains(self, capfd, tmpdir):
cfg = PPOBabyAIGoToObjTestExperimentConfig()
d = tmpdir / "test_ppo_trains"
if isinstance(d, pathlib.Path):
d.mkdir(parents=True, exist_ok=True)
else:
d.mkdir()
output_dir = str(d)
train_runner = OnPolicyRunner(
config=cfg,
output_dir=output_dir,
loaded_config_src_files=None,
seed=1,
mode="train",
deterministic_cudnn=True,
)
output_redirector = RedirectOutput(capsys=None, capfd=capfd)
with output_redirector:
start_time_str = train_runner.start_train(
max_sampler_processes_per_worker=1
)
s = output_redirector.get_output()
def extract_final_metrics_from_log(s: str, mode: str):
lines = s.splitlines()
lines = [l for l in lines if mode.upper() in l]
try:
metrics_and_losses_list = (
lines[-1].split(")")[-1].split("[")[0].strip().split(" ")
)
except IndexError:
raise RuntimeError(f"Failed to parse log:\n{s}")
def try_float(f):
try:
return float(f)
except ValueError:
return f
metrics_and_losses_dict = {
k: try_float(v)
for k, v in zip(
metrics_and_losses_list[::2], metrics_and_losses_list[1::2]
)
}
return metrics_and_losses_dict
train_metrics = extract_final_metrics_from_log(s, "train")
assert train_metrics["global_batch_size"] == 256
valid_metrics = extract_final_metrics_from_log(s, "valid")
assert valid_metrics["3_loss/3_loss"] == 3, "Incorrect validation loss"
assert (
valid_metrics["new_tasks_completed"] == cfg.NUM_TEST_TASKS
), "Incorrect number of tasks evaluated in validation"
test_runner = OnPolicyRunner(
config=cfg,
output_dir=output_dir,
loaded_config_src_files=None,
seed=1,
mode="test",
deterministic_cudnn=True,
)
test_results = test_runner.start_test(
checkpoint_path_dir_or_pattern=os.path.join(
output_dir, "checkpoints", "**", start_time_str, "*.pt"
),
max_sampler_processes_per_worker=1,
)
assert (
len(test_results) == 2
), f"Too many or too few test results ({test_results})"
tr = test_results[-1]
assert (
tr["training_steps"]
== round(
math.ceil(
cfg.TOTAL_RL_TRAIN_STEPS
/ (cfg.ROLLOUT_STEPS * cfg.NUM_TRAIN_SAMPLERS)
)
)
* cfg.ROLLOUT_STEPS
* cfg.NUM_TRAIN_SAMPLERS
), "Incorrect number of training steps"
assert len(tr["tasks"]) == cfg.NUM_TEST_TASKS, "Incorrect number of test tasks"
assert tr["test-metrics/success"] == sum(
task["success"] for task in tr["tasks"]
) / len(tr["tasks"]), "Success counts don't seem to match"
assert (
tr["test-metrics/success"] > 0.95
), f"PPO did not seem to converge for the go_to_obj task (success {tr['success']})."
assert tr["test-debug-losses/avg_value_loss/avg_batch_val"] == sum(
ssv * ssr for ssv, ssr in zip(SILLY_STORAGE_VALUES, SILLY_STORAGE_REPEATS)
) / sum(SILLY_STORAGE_REPEATS)
assert tr["test-debug-losses/avg_value_loss/avg_batch_val"] == sum(
ssv * ssr for ssv, ssr in zip(SILLY_STORAGE_VALUES, SILLY_STORAGE_REPEATS)
) / sum(SILLY_STORAGE_REPEATS)
assert tr["test-debug-misc/worker_batch_size"] == sum(
SILLY_STORAGE_VALUES
) / len(SILLY_STORAGE_VALUES)
if __name__ == "__main__":
TestGoToObjTrains().test_ppo_trains(
pathlib.Path("experiment_output/testing"), capsys=None, capfd=None
) # type:ignore
| allenact-main | tests/sync_algs_cpu/test_to_to_obj_trains.py |
allenact-main | tests/sync_algs_cpu/__init__.py |
|
from allenact_plugins.manipulathor_plugin.arm_calculation_utils import (
world_coords_to_agent_coords,
)
class TestArmCalculationUtils(object):
def test_translation_functions(self):
agent_coordinate = {
"position": {"x": 1, "y": 0, "z": 2},
"rotation": {"x": 0, "y": -45, "z": 0},
}
obj_coordinate = {
"position": {"x": 0, "y": 1, "z": 0},
"rotation": {"x": 0, "y": 0, "z": 0},
}
rotated = world_coords_to_agent_coords(obj_coordinate, agent_coordinate)
eps = 0.01
assert (
abs(rotated["position"]["x"] - (-2.12)) < eps
and abs(rotated["position"]["y"] - (1.0)) < eps
and abs(rotated["position"]["z"] - (-0.70)) < eps
)
if __name__ == "__main__":
TestArmCalculationUtils().test_translation_functions()
| allenact-main | tests/manipulathor_plugin/test_utils.py |
allenact-main | tests/manipulathor_plugin/__init__.py |
|
import os
from tempfile import mkdtemp
from typing import Dict, Optional, List, Any, cast
import gym
from gym_minigrid.envs import EmptyRandomEnv5x5
from torch import nn
from torch import optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPO, PPOConfig
from allenact.algorithms.onpolicy_sync.runner import OnPolicyRunner
from allenact.base_abstractions.experiment_config import ExperimentConfig, TaskSampler
from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor
from allenact.utils.experiment_utils import (
TrainingPipeline,
Builder,
PipelineStage,
LinearDecay,
)
from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor
from allenact_plugins.minigrid_plugin.minigrid_tasks import MiniGridTaskSampler
from projects.tutorials.minigrid_tutorial_conds import (
ConditionedMiniGridSimpleConvRNN,
ConditionedMiniGridTask,
)
class MiniGridCondTestExperimentConfig(ExperimentConfig):
@classmethod
def tag(cls) -> str:
return "MiniGridCondTest"
SENSORS = [
EgocentricMiniGridSensor(agent_view_size=5, view_channels=3),
ExpertActionSensor(
action_space=gym.spaces.Dict(
higher=gym.spaces.Discrete(2), lower=gym.spaces.Discrete(2)
)
),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return ConditionedMiniGridSimpleConvRNN(
action_space=gym.spaces.Dict(
higher=gym.spaces.Discrete(2), lower=gym.spaces.Discrete(2)
),
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
num_objects=cls.SENSORS[0].num_objects,
num_colors=cls.SENSORS[0].num_colors,
num_states=cls.SENSORS[0].num_states,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return MiniGridTaskSampler(**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="train")
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="valid")
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="test")
def _get_sampler_args(self, process_ind: int, mode: str) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 20 + 20 * (
mode == "test"
) # 20 tasks for valid, 40 for test (per sampler)
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
max_tasks=max_tasks, # see above
env_class=self.make_env, # builder for third-party environment (defined below)
sensors=self.SENSORS, # sensors used to return observations to the agent
env_info=dict(), # parameters for environment builder (none for now)
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
task_class=ConditionedMiniGridTask,
)
@staticmethod
def make_env(*args, **kwargs):
return EmptyRandomEnv5x5()
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
return {
"nprocesses": 4 if mode == "train" else 1,
"devices": [],
}
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
ppo_steps = int(512)
return TrainingPipeline(
named_losses=dict(
imitation_loss=Imitation(
cls.SENSORS[1]
), # 0 is Minigrid, 1 is ExpertActionSensor
ppo_loss=PPO(**PPOConfig, entropy_method_name="conditional_entropy"),
), # type:ignore
pipeline_stages=[
PipelineStage(
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=ppo_steps // 2,
),
loss_names=["imitation_loss", "ppo_loss"],
max_stage_steps=ppo_steps,
)
],
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=1e-4)),
num_mini_batch=4,
update_repeats=3,
max_grad_norm=0.5,
num_steps=16,
gamma=0.99,
use_gae=True,
gae_lambda=0.95,
advance_scene_rollout_period=None,
save_interval=10000,
metric_accumulate_interval=1,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} # type:ignore
),
)
class TestMiniGridCond:
def test_train(self, tmpdir):
cfg = MiniGridCondTestExperimentConfig()
train_runner = OnPolicyRunner(
config=cfg,
output_dir=tmpdir,
loaded_config_src_files=None,
seed=12345,
mode="train",
deterministic_cudnn=False,
deterministic_agents=False,
extra_tag="",
disable_tensorboard=True,
disable_config_saving=True,
)
start_time_str, valid_results = train_runner.start_train(
checkpoint=None,
restart_pipeline=False,
max_sampler_processes_per_worker=1,
collect_valid_results=True,
)
assert len(valid_results) > 0
test_runner = OnPolicyRunner(
config=cfg,
output_dir=tmpdir,
loaded_config_src_files=None,
seed=12345,
mode="test",
deterministic_cudnn=False,
deterministic_agents=False,
extra_tag="",
disable_tensorboard=True,
disable_config_saving=True,
)
test_results = test_runner.start_test(
checkpoint_path_dir_or_pattern=os.path.join(
tmpdir, "checkpoints", "**", start_time_str, "*.pt"
),
max_sampler_processes_per_worker=1,
inference_expert=True,
)
assert test_results[-1]["test-metrics/ep_length"] < 4
if __name__ == "__main__":
TestMiniGridCond().test_train(mkdtemp()) # type:ignore
| allenact-main | tests/hierarchical_policies/test_minigrid_conditional.py |
allenact-main | tests/hierarchical_policies/__init__.py |
|
import os
from pathlib import Path
ALLENACT_INSTALL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
| allenact-main | allenact/_constants.py |
try:
# noinspection PyProtectedMember,PyUnresolvedReferences
from allenact._version import __version__
except ModuleNotFoundError:
__version__ = None
| allenact-main | allenact/__init__.py |
import os
from pathlib import Path
from setuptools import find_packages, setup
def parse_req_file(fname, initial=None):
"""Reads requires.txt file generated by setuptools and outputs a
new/updated dict of extras as keys and corresponding lists of dependencies
as values.
The input file's contents are similar to a `ConfigParser` file, e.g.
pkg_1
pkg_2
pkg_3
[extras1]
pkg_4
pkg_5
[extras2]
pkg_6
pkg_7
"""
reqs = {} if initial is None else initial
cline = None
with open(fname, "r") as f:
for line in f.readlines():
line = line[:-1].strip()
if len(line) == 0:
continue
if line[0] == "[":
# Add new key for current extras (if missing in dict)
cline = line[1:-1].strip()
if cline not in reqs:
reqs[cline] = []
else:
# Only keep dependencies from extras
if cline is not None:
reqs[cline].append(line)
return reqs
def get_version(fname):
"""Reads PKG-INFO file generated by setuptools and extracts the Version
number."""
res = "UNK"
with open(fname, "r") as f:
for line in f.readlines():
line = line[:-1]
if line.startswith("Version:"):
res = line.replace("Version:", "").strip()
break
if res in ["UNK", ""]:
raise ValueError(f"Missing Version number in {fname}")
return res
def _do_setup():
base_dir = os.path.abspath(os.path.dirname(Path(__file__)))
if not os.path.exists(
os.path.join(base_dir, "allenact.egg-info/dependency_links.txt")
):
# Build mode for sdist
os.chdir(os.path.join(base_dir, ".."))
with open(".VERSION", "r") as f:
__version__ = f.readline().strip()
# Extra dependencies for development (actually unnecessary)
extras = {
"dev": [
l.strip()
for l in open("dev_requirements.txt", "r").readlines()
if l.strip() != ""
]
}
else:
# Install mode from sdist
__version__ = get_version(os.path.join(base_dir, "allenact.egg-info/PKG-INFO"))
extras = parse_req_file(
os.path.join(base_dir, "allenact.egg-info/requires.txt")
)
setup(
name="allenact",
version=__version__,
description="AllenAct framework",
long_description=(
"AllenAct is a modular and flexible learning framework designed with"
" a focus on the unique requirements of Embodied-AI research."
),
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords=["reinforcement learning", "embodied-AI", "AI", "RL", "SLAM"],
url="https://github.com/allenai/allenact",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="MIT",
packages=find_packages(include=["allenact", "allenact.*"]),
install_requires=[
"gym==0.17.*", # Newer versions of gym are now broken with updates to setuptools
"torch>=1.6.0,!=1.8.0,<2.0.0",
"tensorboardx>=2.1",
"torchvision>=0.7.0",
"setproctitle",
"moviepy>=1.0.3",
"filelock",
"numpy>=1.19.1",
"Pillow>=8.2.0,<9.0.0",
"matplotlib>=3.3.1",
"networkx",
"opencv-python",
"wheel>=0.36.2",
"attrs>=21.4.0",
"scipy>=1.5.4",
],
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-cov", "compress_pickle"],
entry_points={"console_scripts": ["allenact=allenact.main:main"]},
extras_require=extras,
)
if __name__ == "__main__":
_do_setup()
| allenact-main | allenact/setup.py |
"""Entry point to training/validating/testing for a user given experiment
name."""
import os
if "CUDA_DEVICE_ORDER" not in os.environ:
# Necessary to order GPUs correctly in some cases
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
import argparse
import ast
import importlib
import inspect
import json
from typing import Dict, List, Optional, Tuple, Type
from setproctitle import setproctitle as ptitle
from allenact import __version__
from allenact.algorithms.onpolicy_sync.runner import (
CONFIG_KWARGS_STR,
OnPolicyRunner,
SaveDirFormat,
)
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.utils.system import HUMAN_LOG_LEVELS, get_logger, init_logging
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="allenact", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"experiment",
type=str,
help="the path to experiment config file relative the 'experiment_base' directory"
" (see the `--experiment_base` flag).",
)
parser.add_argument(
"--eval",
dest="eval",
action="store_true",
required=False,
help="if you pass the `--eval` flag, AllenAct will run inference on your experiment configuration."
" You will need to specify which experiment checkpoints to run evaluation using the `--checkpoint`"
" flag.",
)
parser.set_defaults(eval=False)
parser.add_argument(
"--config_kwargs",
type=str,
default=None,
required=False,
help="sometimes it is useful to be able to pass additional key-word arguments"
" to `__init__` when initializing an experiment configuration. This flag can be used"
" to pass such key-word arugments by specifying them with json, e.g."
'\n\t--config_kwargs \'{"gpu_id": 0, "my_important_variable": [1,2,3]}\''
"\nTo see which arguments are supported for your experiment see the experiment"
" config's `__init__` function. If the value passed to this function is a file path"
" then we will try to load this file path as a json object and use this json object"
" as key-word arguments.",
)
parser.add_argument(
"--extra_tag",
type=str,
default="",
required=False,
help="Add an extra tag to the experiment when trying out new ideas (will be used"
" as a subdirectory of the tensorboard path so you will be able to"
" search tensorboard logs using this extra tag). This can also be used to add an extra"
" organization when running evaluation (e.g. `--extra_tag running_eval_on_great_idea_12`)",
)
parser.add_argument(
"-o",
"--output_dir",
required=False,
type=str,
default="experiment_output",
help="experiment output folder",
)
parser.add_argument(
"--save_dir_fmt",
required=False,
type=lambda s: SaveDirFormat[s.upper()],
default="flat",
help="The file structure to use when saving results from allenact."
" See documentation o f`SaveDirFormat` for more details."
" Allowed values are ('flat' and 'nested'). Default: 'flat'.",
)
parser.add_argument(
"-s", "--seed", required=False, default=None, type=int, help="random seed",
)
parser.add_argument(
"-b",
"--experiment_base",
required=False,
default=os.getcwd(),
type=str,
help="experiment configuration base folder (default: working directory)",
)
parser.add_argument(
"-c",
"--checkpoint",
required=False,
default=None,
type=str,
help="optional checkpoint file name to resume training on or run testing with. When testing (see the `--eval` flag) this"
" argument can be used very flexibly as:"
"\n(1) the path to a particular individual checkpoint file,"
"\n(2) the path to a directory of checkpoint files all of which you'd like to be evaluated"
" (checkpoints are expected to have a `.pt` file extension),"
'\n(3) a "glob" pattern (https://tldp.org/LDP/abs/html/globbingref.html) that will be expanded'
" using python's `glob.glob` function and should return a collection of checkpoint files."
"\nIf you'd like to only evaluate a subset of the checkpoints specified by the above directory/glob"
" (e.g. every checkpoint saved after 5mil steps) you'll likely want to use the `--approx_ckpt_step_interval`"
" flag.",
)
parser.add_argument(
"--infer_output_dir",
dest="infer_output_dir",
action="store_true",
required=False,
help="applied when evaluating checkpoint(s) in nested save_dir_fmt: if specified, the output dir will be inferred from checkpoint path.",
)
parser.add_argument(
"--approx_ckpt_step_interval",
required=False,
default=None,
type=float,
help="if running tests on a collection of checkpoints (see the `--checkpoint` flag) this argument can be"
" used to skip checkpoints. In particular, if this value is specified and equals `n` then we will"
" only evaluate checkpoints whose step count is closest to each of `0*n`, `1*n`, `2*n`, `3*n`, ... "
" n * ceil(max training steps in ckpts / n). Note that 'closest to' is important here as AllenAct does"
" not generally save checkpoints at exact intervals (doing so would result in performance degregation"
" in distributed training).",
)
parser.add_argument(
"-r",
"--restart_pipeline",
dest="restart_pipeline",
action="store_true",
required=False,
help="for training, if checkpoint is specified, DO NOT continue the training pipeline from where"
" training had previously ended. Instead restart the training pipeline from scratch but"
" with the model weights from the checkpoint.",
)
parser.set_defaults(restart_pipeline=False)
parser.add_argument(
"-d",
"--deterministic_cudnn",
dest="deterministic_cudnn",
action="store_true",
required=False,
help="sets CuDNN to deterministic mode",
)
parser.set_defaults(deterministic_cudnn=False)
parser.add_argument(
"-m",
"--max_sampler_processes_per_worker",
required=False,
default=None,
type=int,
help="maximal number of sampler processes to spawn for each worker",
)
parser.add_argument(
"-e",
"--deterministic_agents",
dest="deterministic_agents",
action="store_true",
required=False,
help="enable deterministic agents (i.e. always taking the mode action) during validation/testing",
)
parser.set_defaults(deterministic_agents=False)
parser.add_argument(
"-l",
"--log_level",
default="info",
type=str,
required=False,
help="sets the log_level. it must be one of {}.".format(
", ".join(HUMAN_LOG_LEVELS)
),
)
parser.add_argument(
"-i",
"--disable_tensorboard",
dest="disable_tensorboard",
action="store_true",
required=False,
help="disable tensorboard logging",
)
parser.set_defaults(disable_tensorboard=False)
parser.add_argument(
"-a",
"--disable_config_saving",
dest="disable_config_saving",
action="store_true",
required=False,
help="disable saving the used config in the output directory",
)
parser.set_defaults(disable_config_saving=False)
parser.add_argument(
"--collect_valid_results",
dest="collect_valid_results",
action="store_true",
required=False,
help="enables returning and saving valid results during training",
)
parser.set_defaults(collect_valid_results=False)
parser.add_argument(
"--valid_on_initial_weights",
dest="valid_on_initial_weights",
action="store_true",
required=False,
help="enables running validation on the model with initial weights",
)
parser.set_defaults(valid_on_initial_weights=False)
parser.add_argument(
"--test_expert",
dest="test_expert",
action="store_true",
required=False,
help="use expert during test",
)
parser.set_defaults(test_expert=False)
parser.add_argument(
"--version", action="version", version=f"allenact {__version__}"
)
parser.add_argument(
"--distributed_ip_and_port",
dest="distributed_ip_and_port",
required=False,
type=str,
default="127.0.0.1:0",
help="IP address and port of listener for distributed process with rank 0."
" Port number 0 lets runner choose a free port. For more details, please follow the"
" tutorial https://allenact.org/tutorials/distributed-objectnav-tutorial/.",
)
parser.add_argument(
"--machine_id",
dest="machine_id",
required=False,
type=int,
default=0,
help="ID for machine in distributed runs. For more details, please follow the"
" tutorial https://allenact.org/tutorials/distributed-objectnav-tutorial/",
)
parser.add_argument(
"--callbacks",
dest="callbacks",
required=False,
type=str,
default="",
help="Comma-separated list of files with Callback classes to use.",
)
parser.add_argument(
"--enable_crash_recovery",
dest="enable_crash_recovery",
default=False,
action="store_true",
required=False,
help="Whether or not to try recovering when a task crashes (use at your own risk).",
)
### DEPRECATED FLAGS
parser.add_argument(
"-t",
"--test_date",
default=None,
type=str,
required=False,
help="`--test_date` has been deprecated. Please use `--eval` instead.",
)
parser.add_argument(
"--approx_ckpt_steps_count",
required=False,
default=None,
type=float,
help="`--approx_ckpt_steps_count` has been deprecated."
" Please specify the checkpoint directly using the '--checkpoint' flag.",
)
parser.add_argument(
"-k",
"--skip_checkpoints",
required=False,
default=0,
type=int,
help="`--skip_checkpoints` has been deprecated. Please use `--approx_ckpt_steps_count` instead.",
)
### END DEPRECATED FLAGS
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
# check for deprecated
deprecated_flags = ["test_date", "skip_checkpoints", "approx_ckpt_steps_count"]
for df in deprecated_flags:
df_info = parser._option_string_actions[f"--{df}"]
if getattr(args, df) is not df_info.default:
raise RuntimeError(df_info.help)
return args
def _config_source(config_type: Type) -> Dict[str, str]:
if config_type is ExperimentConfig:
return {}
try:
module_file_path = inspect.getfile(config_type)
module_dot_path = config_type.__module__
sources_dict = {module_file_path: module_dot_path}
for super_type in config_type.__bases__:
sources_dict.update(_config_source(super_type))
return sources_dict
except TypeError as _:
return {}
def find_sub_modules(path: str, module_list: Optional[List] = None):
if module_list is None:
module_list = []
path = os.path.abspath(path)
if path[-3:] == ".py":
module_list.append(path)
elif os.path.isdir(path):
contents = os.listdir(path)
if any(key in contents for key in ["__init__.py", "setup.py"]):
new_paths = [os.path.join(path, f) for f in os.listdir(path)]
for new_path in new_paths:
find_sub_modules(new_path, module_list)
return module_list
def load_config(args) -> Tuple[ExperimentConfig, Dict[str, str]]:
assert os.path.exists(
args.experiment_base
), "The path '{}' does not seem to exist (your current working directory is '{}').".format(
args.experiment_base, os.getcwd()
)
rel_base_dir = os.path.relpath( # Normalizing string representation of path
os.path.abspath(args.experiment_base), os.getcwd()
)
rel_base_dot_path = rel_base_dir.replace("/", ".")
if rel_base_dot_path == ".":
rel_base_dot_path = ""
exp_dot_path = args.experiment
if exp_dot_path[-3:] == ".py":
exp_dot_path = exp_dot_path[:-3]
exp_dot_path = exp_dot_path.replace("/", ".")
module_path = (
f"{rel_base_dot_path}.{exp_dot_path}"
if len(rel_base_dot_path) != 0
else exp_dot_path
)
try:
importlib.invalidate_caches()
module = importlib.import_module(module_path)
except ModuleNotFoundError as e:
if not any(isinstance(arg, str) and module_path in arg for arg in e.args):
raise e
all_sub_modules = set(find_sub_modules(os.getcwd()))
desired_config_name = module_path.split(".")[-1]
relevant_submodules = [
sm for sm in all_sub_modules if desired_config_name in os.path.basename(sm)
]
raise ModuleNotFoundError(
f"Could not import experiment '{module_path}', are you sure this is the right path?"
f" Possibly relevant files include {relevant_submodules}."
f" Note that the experiment must be reachable along your `PYTHONPATH`, it might"
f" be helpful for you to run `export PYTHONPATH=$PYTHONPATH:$PWD` in your"
f" project's top level directory."
) from e
experiments = [
m[1]
for m in inspect.getmembers(module, inspect.isclass)
if m[1].__module__ == module.__name__ and issubclass(m[1], ExperimentConfig)
]
assert (
len(experiments) == 1
), "Too many or two few experiments defined in {}".format(module_path)
config_kwargs = {}
if args.config_kwargs is not None:
if os.path.exists(args.config_kwargs):
with open(args.config_kwargs, "r") as f:
config_kwargs = json.load(f)
else:
try:
config_kwargs = json.loads(args.config_kwargs)
except json.JSONDecodeError:
get_logger().warning(
f"The input for --config_kwargs ('{args.config_kwargs}')"
f" does not appear to be valid json. Often this is due to"
f" json requiring very specific syntax (e.g. double quoted strings)"
f" we'll try to get around this by evaluating with `ast.literal_eval`"
f" (a safer version of the standard `eval` function)."
)
config_kwargs = ast.literal_eval(args.config_kwargs)
assert isinstance(
config_kwargs, Dict
), "`--config_kwargs` must be a json string (or a path to a .json file) that evaluates to a dictionary."
config = experiments[0](**config_kwargs)
sources = _config_source(config_type=experiments[0])
sources[CONFIG_KWARGS_STR] = json.dumps(config_kwargs)
return config, sources
def main():
args = get_args()
init_logging(args.log_level)
get_logger().info("Running with args {}".format(args))
ptitle("Master: {}".format("Training" if args.eval is None else "Evaluation"))
cfg, srcs = load_config(args)
if not args.eval:
OnPolicyRunner(
config=cfg,
output_dir=args.output_dir,
save_dir_fmt=args.save_dir_fmt,
loaded_config_src_files=srcs,
seed=args.seed,
mode="train",
deterministic_cudnn=args.deterministic_cudnn,
deterministic_agents=args.deterministic_agents,
extra_tag=args.extra_tag,
disable_tensorboard=args.disable_tensorboard,
disable_config_saving=args.disable_config_saving,
distributed_ip_and_port=args.distributed_ip_and_port,
machine_id=args.machine_id,
callbacks_paths=args.callbacks,
).start_train(
checkpoint=args.checkpoint,
restart_pipeline=args.restart_pipeline,
max_sampler_processes_per_worker=args.max_sampler_processes_per_worker,
collect_valid_results=args.collect_valid_results,
valid_on_initial_weights=args.valid_on_initial_weights,
try_restart_after_task_error=args.enable_crash_recovery,
)
else:
OnPolicyRunner(
config=cfg,
output_dir=args.output_dir,
save_dir_fmt=args.save_dir_fmt,
loaded_config_src_files=srcs,
seed=args.seed,
mode="test",
deterministic_cudnn=args.deterministic_cudnn,
deterministic_agents=args.deterministic_agents,
extra_tag=args.extra_tag,
disable_tensorboard=args.disable_tensorboard,
disable_config_saving=args.disable_config_saving,
distributed_ip_and_port=args.distributed_ip_and_port,
machine_id=args.machine_id,
callbacks_paths=args.callbacks,
).start_test(
checkpoint_path_dir_or_pattern=args.checkpoint,
infer_output_dir=args.infer_output_dir,
approx_ckpt_step_interval=args.approx_ckpt_step_interval,
max_sampler_processes_per_worker=args.max_sampler_processes_per_worker,
inference_expert=args.test_expert,
)
if __name__ == "__main__":
main()
| allenact-main | allenact/main.py |
allenact-main | allenact/embodiedai/__init__.py |
|
allenact-main | allenact/embodiedai/mapping/__init__.py |
|
import torch
from torch.nn import functional as F
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class BinnedPointCloudMapLoss(AbstractActorCriticLoss):
"""A (binary cross entropy) loss for training metric maps for free space
prediction."""
def __init__(
self, binned_pc_uuid: str, map_logits_uuid: str,
):
"""Initializer.
# Parameters
binned_pc_uuid : The uuid of a sensor returning
a dictionary with an "egocentric_update"
key with the same format as returned by
`allenact.embodied_ai.mapping_utils.map_builders.BinnedPointCloudMapBuilder`. Such a sensor
can be found in the `allenact_plugins` library: see
`allenact_plugins.ithor_plugin.ithor_sensors.BinnedPointCloudMapTHORSensor`.
map_logits_uuid : key used to index into `actor_critic_output.extras` (returned by the model)
whose value should be a tensor of the same shape as the tensor corresponding to the above
"egocentric_update" key.
"""
super().__init__()
self.binned_pc_uuid = binned_pc_uuid
self.map_logits_uuid = map_logits_uuid
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
):
ego_map_gt = batch["observations"][self.binned_pc_uuid][
"egocentric_update"
].float()
*_, h, w, c = ego_map_gt.shape
ego_map_gt = ego_map_gt.view(-1, h, w, c).permute(0, 3, 1, 2).contiguous()
ego_map_logits = actor_critic_output.extras[self.map_logits_uuid]
vision_range = ego_map_logits.shape[-1]
ego_map_logits = ego_map_logits.view(-1, c, vision_range, vision_range)
assert ego_map_gt.shape == ego_map_logits.shape
ego_map_gt_thresholded = (ego_map_gt > 0.5).float()
total_loss = F.binary_cross_entropy_with_logits(
ego_map_logits, ego_map_gt_thresholded
)
return (
total_loss,
{"binned_pc_map_ce": total_loss.item()},
)
# FOR DEBUGGING: Save all the ground-truth & predicted maps side by side
# import numpy as np
# import imageio
# for i in range(ego_map_gt_thresholded.shape[0]):
# a = ego_map_gt_thresholded[i].permute(1, 2, 0).flip(0).detach().numpy()
# b = torch.sigmoid(ego_map_logits)[i].permute(1, 2, 0).flip(0).detach().numpy()
#
# imageio.imwrite(
# f"z_occupancy_maps/{i}.png",
# np.concatenate((a, 1 + 0 * a[:, :10], b), axis=1),
# )
class SemanticMapFocalLoss(AbstractActorCriticLoss):
"""A (focal-loss based) loss for training metric maps for free space
prediction.
As semantic maps tend to be quite sparse this loss uses the focal
loss (https://arxiv.org/abs/1708.02002) rather than binary cross
entropy (BCE). If the `gamma` parameter is 0.0 then this is just the
normal BCE, larger values of `gamma` result less and less emphasis
being paid to examples that are already well classified.
"""
def __init__(
self, semantic_map_uuid: str, map_logits_uuid: str, gamma: float = 2.0
):
"""Initializer.
# Parameters
semantic_map_uuid : The uuid of a sensor returning
a dictionary with an "egocentric_update"
key with the same format as returned by
`allenact.embodied_ai.mapping_utils.map_builders.SemanticMapBuilder`. Such a sensor
can be found in the `allenact_plugins` library: see
`allenact_plugins.ithor_plugin.ithor_sensors.SemanticMapTHORSensor`.
map_logits_uuid : key used to index into `actor_critic_output.extras` (returned by the model)
whose value should be a tensor of the same shape as the tensor corresponding to the above
"egocentric_update" key.
"""
super().__init__()
assert gamma >= 0, f"`gamma` (=={gamma}) must be >= 0"
self.semantic_map_uuid = semantic_map_uuid
self.map_logits_uuid = map_logits_uuid
self.gamma = gamma
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
):
ego_map_gt = batch["observations"][self.semantic_map_uuid]["egocentric_update"]
ego_map_gt = (
ego_map_gt.view(-1, *ego_map_gt.shape[-3:]).permute(0, 3, 1, 2).contiguous()
)
ego_map_logits = actor_critic_output.extras[self.map_logits_uuid]
ego_map_logits = ego_map_logits.view(-1, *ego_map_logits.shape[-3:])
assert ego_map_gt.shape == ego_map_logits.shape
p = torch.sigmoid(ego_map_logits)
one_minus_p = torch.sigmoid(-ego_map_logits)
log_p = F.logsigmoid(ego_map_logits)
log_one_minus_p = F.logsigmoid(-ego_map_logits)
ego_map_gt = ego_map_gt.float()
total_loss = -(
ego_map_gt * (log_p * (one_minus_p ** self.gamma))
+ (1 - ego_map_gt) * (log_one_minus_p * (p ** self.gamma))
).mean()
return (
total_loss,
{"sem_map_focal_loss": total_loss.item()},
)
# FOR DEBUGGING: Save all the ground-truth & predicted maps side by side
# import numpy as np
# import imageio
# from allenact.embodiedai.mapping.mapping_utils.map_builders import SemanticMapBuilder
#
# print("\n" * 3)
# for i in range(ego_map_gt.shape[0]):
# pred_sem_map = torch.sigmoid(ego_map_logits)[i].permute(1, 2, 0).flip(0).detach()
# a = SemanticMapBuilder.randomly_color_semantic_map(ego_map_gt[i].permute(1, 2, 0).flip(0).detach())
# b = SemanticMapBuilder.randomly_color_semantic_map(pred_sem_map)
# imageio.imwrite(
# f"z_semantic_maps/{i}.png",
# np.concatenate((a, 255 + a[:, :10] * 0, b), axis=1),
# )
#
| allenact-main | allenact/embodiedai/mapping/mapping_losses.py |
allenact-main | allenact/embodiedai/mapping/mapping_utils/__init__.py |
|
# MIT License
#
# Original Copyright (c) 2020 Devendra Chaplot
#
# Modified work Copyright (c) 2021 Allen Institute for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
from typing import Optional, Sequence, Union, Dict
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from allenact.embodiedai.mapping.mapping_utils.point_cloud_utils import (
depth_frame_to_world_space_xyz,
project_point_cloud_to_map,
)
class BinnedPointCloudMapBuilder(object):
"""Class used to iteratively construct a map of "free space" based on input
depth maps (i.e. pointclouds).
Adapted from https://github.com/devendrachaplot/Neural-SLAM
This class can be used to (iteratively) construct a metric map of free space in an environment as
an agent moves around. After every step the agent takes, you should call the `update` function and
pass the agent's egocentric depth image along with the agent's new position. This depth map will
be converted into a pointcloud, binned along the up/down axis, and then projected
onto a 3-dimensional tensor of shape (HxWxC) whose where HxW represent the ground plane
and where C equals the number of bins the up-down coordinate was binned into. This 3d map counts the
number of points in each bin. Thus a lack of points within a region can be used to infer that
that region is free space.
# Attributes
fov : FOV of the camera used to produce the depth images given when calling `update`.
vision_range_in_map_units : The maximum distance (in number of rows/columns) that will
be updated when calling `update`, points outside of this map vision range are ignored.
map_size_in_cm : Total map size in cm.
resolution_in_cm : Number of cm per row/column in the map.
height_bins : The bins used to bin the up-down coordinate (for us the y-coordinate). For example,
if `height_bins = [0.1, 1]` then
all y-values < 0.1 will be mapped to 0, all y values in [0.1, 1) will be mapped to 1, and
all y-values >= 1 will be mapped to 2.
**Importantly:** these y-values will first be recentered by the `min_xyz` value passed when
calling `reset(...)`.
device : A `torch.device` on which to run computations. If this device is a GPU you can potentially
obtain significant speed-ups.
"""
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
height_bins: Sequence[float],
return_egocentric_local_context: bool = False,
device: torch.device = torch.device("cpu"),
):
assert vision_range_in_cm % resolution_in_cm == 0
self.fov = fov
self.vision_range_in_map_units = vision_range_in_cm // resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.height_bins = height_bins
self.device = device
self.return_egocentric_local_context = return_egocentric_local_context
self.binned_point_cloud_map = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
len(self.height_bins) + 1,
),
dtype=np.float32,
)
self.min_xyz: Optional[np.ndarray] = None
def update(
self,
depth_frame: np.ndarray,
camera_xyz: np.ndarray,
camera_rotation: float,
camera_horizon: float,
) -> Dict[str, np.ndarray]:
"""Updates the map with the input depth frame from the agent.
See the `allenact.embodiedai.mapping.mapping_utils.point_cloud_utils.project_point_cloud_to_map`
function for more information input parameter definitions. **We assume that the input
`depth_frame` has depths recorded in meters**.
# Returns
Let `map_size = self.map_size_in_cm // self.resolution_in_cm`. Returns a dictionary with keys-values:
* `"egocentric_update"` - A tensor of shape
`(vision_range_in_map_units)x(vision_range_in_map_units)x(len(self.height_bins) + 1)` corresponding
to the binned pointcloud after having been centered on the agent and rotated so that
points ahead of the agent correspond to larger row indices and points further to the right of the agent
correspond to larger column indices. Note that by "centered" we mean that one can picture
the agent as being positioned at (0, vision_range_in_map_units/2) and facing downward. Each entry in this tensor
is a count equaling the number of points in the pointcloud that, once binned, fell into this
entry. This is likely the output you want to use if you want to build a model to predict free space from an image.
* `"allocentric_update"` - A `(map_size)x(map_size)x(len(self.height_bins) + 1)` corresponding
to `"egocentric_update"` but rotated to the world-space coordinates. This `allocentric_update`
is what is used to update the internally stored representation of the map.
* `"map"` - A `(map_size)x(map_size)x(len(self.height_bins) + 1)` tensor corresponding
to the sum of all `"allocentric_update"` values since the last `reset()`.
```
"""
with torch.no_grad():
assert self.min_xyz is not None, "Please call `reset` before `update`."
camera_xyz = (
torch.from_numpy(camera_xyz - self.min_xyz).float().to(self.device)
)
try:
depth_frame = torch.from_numpy(depth_frame).to(self.device)
except ValueError:
depth_frame = torch.from_numpy(depth_frame.copy()).to(self.device)
depth_frame[
depth_frame
> self.vision_range_in_map_units * self.resolution_in_cm / 100
] = np.NaN
world_space_point_cloud = depth_frame_to_world_space_xyz(
depth_frame=depth_frame,
camera_world_xyz=camera_xyz,
rotation=camera_rotation,
horizon=camera_horizon,
fov=self.fov,
)
world_binned_map_update = project_point_cloud_to_map(
xyz_points=world_space_point_cloud,
bin_axis="y",
bins=self.height_bins,
map_size=self.binned_point_cloud_map.shape[0],
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
# Center the cloud on the agent
recentered_point_cloud = world_space_point_cloud - (
torch.FloatTensor([1.0, 0.0, 1.0]).to(self.device) * camera_xyz
).reshape((1, 1, 3))
# Rotate the cloud so that positive-z is the direction the agent is looking
theta = (
np.pi * camera_rotation / 180
) # No negative since THOR rotations are already backwards
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_transform = torch.FloatTensor(
[
[cos_theta, 0, -sin_theta],
[0, 1, 0], # unchanged
[sin_theta, 0, cos_theta],
]
).to(self.device)
rotated_point_cloud = recentered_point_cloud @ rotation_transform.T
xoffset = (self.map_size_in_cm / 100) / 2
agent_centric_point_cloud = rotated_point_cloud + torch.FloatTensor(
[xoffset, 0, 0]
).to(self.device)
allocentric_update_numpy = world_binned_map_update.cpu().numpy()
self.binned_point_cloud_map = (
self.binned_point_cloud_map + allocentric_update_numpy
)
agent_centric_binned_map = project_point_cloud_to_map(
xyz_points=agent_centric_point_cloud,
bin_axis="y",
bins=self.height_bins,
map_size=self.binned_point_cloud_map.shape[0],
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
vr = self.vision_range_in_map_units
vr_div_2 = self.vision_range_in_map_units // 2
width_div_2 = agent_centric_binned_map.shape[1] // 2
agent_centric_binned_map = agent_centric_binned_map[
:vr, (width_div_2 - vr_div_2) : (width_div_2 + vr_div_2), :
]
to_return = {
"egocentric_update": agent_centric_binned_map.cpu().numpy(),
"allocentric_update": allocentric_update_numpy,
"map": self.binned_point_cloud_map,
}
if self.return_egocentric_local_context:
# See the update function of the semantic map sensor for in depth comments regarding the below
# Essentially we are simply rotating the full map into the orientation of the agent and then
# selecting a smaller region around the agent.
theta = -np.pi * camera_rotation / 180
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rot_mat = torch.FloatTensor(
[[cos_theta, -sin_theta], [sin_theta, cos_theta]]
).to(self.device)
move_back_offset = (
-0.5
* (self.vision_range_in_map_units * self.resolution_in_cm / 100)
) * (
rot_mat
@ torch.tensor(
[0, 1], dtype=torch.float, device=self.device
).unsqueeze(-1)
)
map_size = self.binned_point_cloud_map.shape[0]
scaler = 2 * (100 / (self.resolution_in_cm * map_size))
offset_to_center_the_agent = (
scaler
* (
torch.tensor(
[camera_xyz[0], camera_xyz[2],],
dtype=torch.float,
device=self.device,
).unsqueeze(-1)
+ move_back_offset
)
- 1
)
offset_to_top_of_image = rot_mat @ torch.FloatTensor(
[0, 1.0]
).unsqueeze(1).to(self.device)
rotation_and_translate_mat = torch.cat(
(rot_mat, offset_to_top_of_image + offset_to_center_the_agent,),
dim=1,
)
full_map_tensor = (
torch.tensor(
self.binned_point_cloud_map,
dtype=torch.float,
device=self.device,
)
.unsqueeze(0)
.permute(0, 3, 1, 2)
)
full_ego_map = (
F.grid_sample(
full_map_tensor,
F.affine_grid(
rotation_and_translate_mat.to(self.device).unsqueeze(0),
full_map_tensor.shape,
align_corners=False,
),
align_corners=False,
)
.squeeze(0)
.permute(1, 2, 0)
)
egocentric_local_context = full_ego_map[
:vr, (width_div_2 - vr_div_2) : (width_div_2 + vr_div_2), :
]
to_return[
"egocentric_local_context"
] = egocentric_local_context.cpu().numpy()
return to_return
def reset(self, min_xyz: np.ndarray):
"""Reset the map.
Resets the internally stored map.
# Parameters
min_xyz : An array of size (3,) corresponding to the minimum possible x, y, and z values that will be observed
as a point in a pointcloud when calling `.update(...)`. The (world-space) maps returned by calls to `update`
will have been normalized so the (0,0,:) entry corresponds to these minimum values.
"""
self.min_xyz = min_xyz
self.binned_point_cloud_map = np.zeros_like(self.binned_point_cloud_map)
class ObjectHull2d:
def __init__(
self,
object_id: str,
object_type: str,
hull_points: Union[np.ndarray, Sequence[Sequence[float]]],
):
"""A class used to represent 2d convex hulls of objects when projected
to the ground plane.
# Parameters
object_id : A unique id for the object.
object_type : The type of the object.
hull_points : A Nx2 matrix with `hull_points[:, 0]` being the x coordinates and `hull_points[:, 1]` being
the `z` coordinates (this is using the Unity game engine conventions where the `y` axis is up/down).
"""
self.object_id = object_id
self.object_type = object_type
self.hull_points = (
hull_points
if isinstance(hull_points, np.ndarray)
else np.array(hull_points)
)
class SemanticMapBuilder(object):
"""Class used to iteratively construct a semantic map based on input depth
maps (i.e. pointclouds).
Adapted from https://github.com/devendrachaplot/Neural-SLAM
This class can be used to (iteratively) construct a semantic map of objects in the environment.
This map is similar to that generated by `BinnedPointCloudMapBuilder` (see its documentation for
more information) but the various channels correspond to different object types. Thus
if the `(i,j,k)` entry of a map generated by this function is `True`, this means that an
object of type `k` is present in position `i,j` in the map. In particular, by "present" we mean that,
after projecting the object to the ground plane and taking the convex hull of the resulting
2d object, a non-trivial portion of this convex hull overlaps the `i,j` position.
For attribute information, see the documentation of the `BinnedPointCloudMapBuilder` class. The
only attribute present in this class that is not present in `BinnedPointCloudMapBuilder` is
`ordered_object_types` which corresponds to a list of unique object types where
object type `ordered_object_types[i]` will correspond to the `i`th channel of the map
generated by this class.
"""
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
ordered_object_types: Sequence[str],
device: torch.device = torch.device("cpu"),
):
self.fov = fov
self.vision_range_in_map_units = vision_range_in_cm // resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.ordered_object_types = tuple(ordered_object_types)
self.device = device
self.object_type_to_index = {
ot: i for i, ot in enumerate(self.ordered_object_types)
}
self.ground_truth_semantic_map = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
len(self.ordered_object_types),
),
dtype=np.uint8,
)
self.explored_mask = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
1,
),
dtype=bool,
)
self.min_xyz: Optional[np.ndarray] = None
@staticmethod
def randomly_color_semantic_map(
map: Union[np.ndarray, torch.Tensor], threshold: float = 0.5, seed: int = 1
) -> np.ndarray:
if not isinstance(map, np.ndarray):
map = np.array(map)
rnd = random.Random(seed)
semantic_int_mat = (
(map >= threshold)
* np.array(list(range(1, map.shape[-1] + 1))).reshape((1, 1, -1))
).max(-1)
# noinspection PyTypeChecker
return np.uint8(
np.array(
[(0, 0, 0)]
+ [
tuple(rnd.randint(0, 256) for _ in range(3))
for _ in range(map.shape[-1])
]
)[semantic_int_mat]
)
def _xzs_to_colrows(self, xzs: np.ndarray):
height, width, _ = self.ground_truth_semantic_map.shape
return np.clip(
np.int32(
(
(100 / self.resolution_in_cm)
* (xzs - np.array([[self.min_xyz[0], self.min_xyz[2]]]))
)
),
a_min=0,
a_max=np.array(
[width - 1, height - 1]
), # width then height as we're returns cols then rows
)
def build_ground_truth_map(self, object_hulls: Sequence[ObjectHull2d]):
self.ground_truth_semantic_map.fill(0)
height, width, _ = self.ground_truth_semantic_map.shape
for object_hull in object_hulls:
ot = object_hull.object_type
if ot in self.object_type_to_index:
ind = self.object_type_to_index[ot]
self.ground_truth_semantic_map[
:, :, ind : (ind + 1)
] = cv2.fillConvexPoly(
img=np.array(
self.ground_truth_semantic_map[:, :, ind : (ind + 1)],
dtype=np.uint8,
),
points=self._xzs_to_colrows(np.array(object_hull.hull_points)),
color=255,
)
def update(
self,
depth_frame: np.ndarray,
camera_xyz: np.ndarray,
camera_rotation: float,
camera_horizon: float,
) -> Dict[str, np.ndarray]:
"""Updates the map with the input depth frame from the agent.
See the documentation for `BinnedPointCloudMapBuilder.update`,
the inputs and outputs are similar except that channels are used
to represent the presence/absence of objects of given types.
Unlike `BinnedPointCloudMapBuilder.update`, this function also
returns two masks with keys `"egocentric_mask"` and `"mask"`
that can be used to determine what portions of the map have been
observed by the agent so far in the egocentric and world-space
reference frames respectively.
"""
with torch.no_grad():
assert self.min_xyz is not None
camera_xyz = torch.from_numpy(camera_xyz - self.min_xyz).to(self.device)
map_size = self.ground_truth_semantic_map.shape[0]
depth_frame = torch.from_numpy(depth_frame).to(self.device)
depth_frame[
depth_frame
> self.vision_range_in_map_units * self.resolution_in_cm / 100
] = np.NaN
world_space_point_cloud = depth_frame_to_world_space_xyz(
depth_frame=depth_frame,
camera_world_xyz=camera_xyz,
rotation=camera_rotation,
horizon=camera_horizon,
fov=self.fov,
)
world_newly_explored = (
project_point_cloud_to_map(
xyz_points=world_space_point_cloud,
bin_axis="y",
bins=[],
map_size=map_size,
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
> 0.001
)
world_update_and_mask = torch.cat(
(
torch.logical_and(
torch.from_numpy(self.ground_truth_semantic_map).to(
self.device
),
world_newly_explored,
),
world_newly_explored,
),
dim=-1,
).float()
world_update_and_mask_for_sample = world_update_and_mask.unsqueeze(
0
).permute(0, 3, 1, 2)
# We now use grid sampling to rotate world_update_for_sample into the egocentric coordinate
# frame of the agent so that the agent's forward direction is downwards in the tensor
# (and it's right side is to the right in the image, this means that right/left
# when taking the perspective of the agent in the image). This convention aligns with
# what's expected by grid_sample where +x corresponds to +cols and +z corresponds to +rows.
# Here also the rows/cols have been normalized so that the center of the image is at (0,0)
# and the bottom right is at (1,1).
# Mentally you can think of the output from the F.affine_grid function as you wanting
# rotating/translating an axis-aligned square on the image-to-be-sampled and then
# copying whatever is in this square to a new image. Note that the translation always
# happens in the global reference frame after the rotation. We'll start by rotating
# the square so that the the agent's z direction is downwards in the image.
# Since the global axis of the map and the grid sampling are aligned, this requires
# rotating the square by the rotation of the agent. As rotation is negative the usual
# standard in THOR, we need to negate the rotation of the agent.
theta = -np.pi * camera_rotation / 180
# Here form the rotation matrix
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rot_mat = torch.FloatTensor(
[[cos_theta, -sin_theta], [sin_theta, cos_theta]]
).to(self.device)
# Now we need to figure out the translation. For an intuitive understanding, we break this
# translation into two different "offsets". The first offset centers the square on the
# agent's current location:
scaler = 2 * (100 / (self.resolution_in_cm * map_size))
offset_to_center_the_agent = (
scaler
* torch.FloatTensor([camera_xyz[0], camera_xyz[2]])
.unsqueeze(-1)
.to(self.device)
- 1
)
# The second offset moves the square in the direction of the agent's z direction
# so that the output image will have the agent's view starting directly at the
# top of the image.
offset_to_top_of_image = rot_mat @ torch.FloatTensor([0, 1.0]).unsqueeze(
1
).to(self.device)
rotation_and_translate_mat = torch.cat(
(rot_mat, offset_to_top_of_image + offset_to_center_the_agent,), dim=1,
)
ego_update_and_mask = F.grid_sample(
world_update_and_mask_for_sample.to(self.device),
F.affine_grid(
rotation_and_translate_mat.to(self.device).unsqueeze(0),
world_update_and_mask_for_sample.shape,
align_corners=False,
),
align_corners=False,
)
# All that's left now is to crop out the portion of the transformed tensor that we actually
# care about (i.e. the portion corresponding to the agent's `self.vision_range_in_map_units`.
vr = self.vision_range_in_map_units
half_vr = vr // 2
center = self.map_size_in_cm // (2 * self.resolution_in_cm)
cropped = ego_update_and_mask[
:, :, :vr, (center - half_vr) : (center + half_vr)
]
np.logical_or(
self.explored_mask,
world_newly_explored.cpu().numpy(),
out=self.explored_mask,
)
return {
"egocentric_update": cropped[0, :-1].permute(1, 2, 0).cpu().numpy(),
"egocentric_mask": (cropped[0, -1:].view(vr, vr, 1) > 0.001)
.cpu()
.numpy(),
"explored_mask": np.array(self.explored_mask),
"map": np.logical_and(
self.explored_mask, (self.ground_truth_semantic_map > 0)
),
}
def reset(self, min_xyz: np.ndarray, object_hulls: Sequence[ObjectHull2d]):
"""Reset the map.
Resets the internally stored map.
# Parameters
min_xyz : An array of size (3,) corresponding to the minimum possible x, y, and z values that will be observed
as a point in a pointcloud when calling `.update(...)`. The (world-space) maps returned by calls to `update`
will have been normalized so the (0,0,:) entry corresponds to these minimum values.
object_hulls : The object hulls corresponding to objects in the scene. These will be used to
construct the map.
"""
self.min_xyz = min_xyz
self.build_ground_truth_map(object_hulls=object_hulls)
| allenact-main | allenact/embodiedai/mapping/mapping_utils/map_builders.py |
# MIT License
#
# Original Copyright (c) 2020 Devendra Chaplot
#
# Modified work Copyright (c) 2021 Allen Institute for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from typing import Optional, Sequence, cast
import numpy as np
import torch
from allenact_plugins.ithor_plugin.ithor_util import vertical_to_horizontal_fov
def camera_space_xyz_to_world_xyz(
camera_space_xyzs: torch.Tensor,
camera_world_xyz: torch.Tensor,
rotation: float,
horizon: float,
) -> torch.Tensor:
"""Transforms xyz coordinates in the camera's coordinate frame to world-
space (global) xyz frame.
This code has been adapted from https://github.com/devendrachaplot/Neural-SLAM.
**IMPORTANT:** We use the conventions from the Unity game engine. In particular:
* A rotation of 0 corresponds to facing north.
* Positive rotations correspond to CLOCKWISE rotations. That is a rotation of 90 degrees corresponds
to facing east. **THIS IS THE OPPOSITE CONVENTION OF THE ONE GENERALLY USED IN MATHEMATICS.**
* When facing NORTH (rotation==0) moving ahead by 1 meter results in the the z coordinate
increasing by 1. Moving to the right by 1 meter corresponds to increasing the x coordinate by 1.
Finally moving upwards by 1 meter corresponds to increasing the y coordinate by 1.
**Having x,z as the ground plane in this way is common in computer graphics but is different than
the usual mathematical convention of having z be "up".**
* The horizon corresponds to how far below the horizontal the camera is facing. I.e. a horizon
of 30 corresponds to the camera being angled downwards at an angle of 30 degrees.
# Parameters
camera_space_xyzs : A 3xN matrix of xyz coordinates in the camera's reference frame.
Here `x, y, z = camera_space_xyzs[:, i]` should equal the xyz coordinates for the ith point.
camera_world_xyz : The camera's xyz position in the world reference frame.
rotation : The world-space rotation (in degrees) of the camera.
horizon : The horizon (in degrees) of the camera.
# Returns
3xN tensor with entry [:, i] is the xyz world-space coordinate corresponding to the camera-space
coordinate camera_space_xyzs[:, i]
"""
# Adapted from https://github.com/devendrachaplot/Neural-SLAM.
# First compute the transformation that points undergo
# due to the camera's horizon
psi = -horizon * np.pi / 180
cos_psi = np.cos(psi)
sin_psi = np.sin(psi)
# fmt: off
horizon_transform = camera_space_xyzs.new(
[
[1, 0, 0], # unchanged
[0, cos_psi, sin_psi],
[0, -sin_psi, cos_psi,],
],
)
# fmt: on
# Next compute the transformation that points undergo
# due to the agent's rotation about the y-axis
phi = -rotation * np.pi / 180
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
# fmt: off
rotation_transform = camera_space_xyzs.new(
[
[cos_phi, 0, -sin_phi],
[0, 1, 0], # unchanged
[sin_phi, 0, cos_phi],],
)
# fmt: on
# Apply the above transformations
view_points = (rotation_transform @ horizon_transform) @ camera_space_xyzs
# Translate the points w.r.t. the camera's position in world space.
world_points = view_points + camera_world_xyz[:, None]
return world_points
def depth_frame_to_camera_space_xyz(
depth_frame: torch.Tensor, mask: Optional[torch.Tensor], fov: float = 90
) -> torch.Tensor:
"""Transforms a input depth map into a collection of xyz points (i.e. a
point cloud) in the camera's coordinate frame.
# Parameters
depth_frame : A square depth map, i.e. an MxM matrix with entry `depth_frame[i, j]` equaling
the distance from the camera to nearest surface at pixel (i,j).
mask : An optional boolean mask of the same size (MxM) as the input depth. Only values
where this mask are true will be included in the returned matrix of xyz coordinates. If
`None` then no pixels will be masked out (so the returned matrix of xyz points will have
dimension 3x(M*M)
fov: The field of view of the camera.
# Returns
A 3xN matrix with entry [:, i] equalling a the xyz coordinates (in the camera's coordinate
frame) of a point in the point cloud corresponding to the input depth frame.
"""
h, w = depth_frame.shape[:2]
if mask is None:
mask = torch.ones_like(depth_frame, dtype=torch.bool)
# pixel centers
camera_space_yx_offsets = (
torch.stack(torch.where(mask))
+ 0.5 # Offset by 0.5 so that we are in the middle of the pixel
)
# Subtract center
camera_space_yx_offsets[:1] -= h / 2.0
camera_space_yx_offsets[1:] -= w / 2.0
# Make "up" in y be positive
camera_space_yx_offsets[0, :] *= -1
# Put points on the clipping plane
camera_space_yx_offsets[:1] *= (2.0 / h) * math.tan((fov / 2) / 180 * math.pi)
camera_space_yx_offsets[1:] *= (2.0 / w) * math.tan(
(vertical_to_horizontal_fov(fov, height=h, width=w) / 2) / 180 * math.pi
)
# noinspection PyArgumentList
camera_space_xyz = torch.cat(
[
camera_space_yx_offsets[1:, :], # This is x
camera_space_yx_offsets[:1, :], # This is y
torch.ones_like(camera_space_yx_offsets[:1, :]),
],
axis=0,
)
return camera_space_xyz * depth_frame[mask][None, :]
def depth_frame_to_world_space_xyz(
depth_frame: torch.Tensor,
camera_world_xyz: torch.Tensor,
rotation: float,
horizon: float,
fov: float,
):
"""Transforms a input depth map into a collection of xyz points (i.e. a
point cloud) in the world-space coordinate frame.
**IMPORTANT:** We use the conventions from the Unity game engine. In particular:
* A rotation of 0 corresponds to facing north.
* Positive rotations correspond to CLOCKWISE rotations. That is a rotation of 90 degrees corresponds
to facing east. **THIS IS THE OPPOSITE CONVENTION OF THE ONE GENERALLY USED IN MATHEMATICS.**
* When facing NORTH (rotation==0) moving ahead by 1 meter results in the the z coordinate
increasing by 1. Moving to the right by 1 meter corresponds to increasing the x coordinate by 1.
Finally moving upwards by 1 meter corresponds to increasing the y coordinate by 1.
**Having x,z as the ground plane in this way is common in computer graphics but is different than
the usual mathematical convention of having z be "up".**
* The horizon corresponds to how far below the horizontal the camera is facing. I.e. a horizon
of 30 corresponds to the camera being angled downwards at an angle of 30 degrees.
# Parameters
depth_frame : A square depth map, i.e. an MxM matrix with entry `depth_frame[i, j]` equaling
the distance from the camera to nearest surface at pixel (i,j).
mask : An optional boolean mask of the same size (MxM) as the input depth. Only values
where this mask are true will be included in the returned matrix of xyz coordinates. If
`None` then no pixels will be masked out (so the returned matrix of xyz points will have
dimension 3x(M*M)
camera_space_xyzs : A 3xN matrix of xyz coordinates in the camera's reference frame.
Here `x, y, z = camera_space_xyzs[:, i]` should equal the xyz coordinates for the ith point.
camera_world_xyz : The camera's xyz position in the world reference frame.
rotation : The world-space rotation (in degrees) of the camera.
horizon : The horizon (in degrees) of the camera.
fov: The field of view of the camera.
# Returns
A 3xN matrix with entry [:, i] equalling a the xyz coordinates (in the world coordinate
frame) of a point in the point cloud corresponding to the input depth frame.
"""
camera_space_xyz = depth_frame_to_camera_space_xyz(
depth_frame=depth_frame, mask=None, fov=fov
)
world_points = camera_space_xyz_to_world_xyz(
camera_space_xyzs=camera_space_xyz,
camera_world_xyz=camera_world_xyz,
rotation=rotation,
horizon=horizon,
)
return world_points.view(3, *depth_frame.shape).permute(1, 2, 0)
def project_point_cloud_to_map(
xyz_points: torch.Tensor,
bin_axis: str,
bins: Sequence[float],
map_size: int,
resolution_in_cm: int,
flip_row_col: bool,
):
"""Bins an input point cloud into a map tensor with the bins equaling the
channels.
This code has been adapted from https://github.com/devendrachaplot/Neural-SLAM.
# Parameters
xyz_points : (x,y,z) pointcloud(s) as a torch.Tensor of shape (... x height x width x 3).
All operations are vectorized across the `...` dimensions.
bin_axis : Either "x", "y", or "z", the axis which should be binned by the values in `bins`.
If you have generated your point clouds with any of the other functions in the `point_cloud_utils`
module you almost certainly want this to be "y" as this is the default upwards dimension.
bins: The values by which to bin along `bin_axis`, see the `bins` parameter of `np.digitize`
for more info.
map_size : The axes not specified by `bin_axis` will be be divided by `resolution_in_cm / 100`
and then rounded to the nearest integer. They are then expected to have their values
within the interval [0, ..., map_size - 1].
resolution_in_cm: The resolution_in_cm, in cm, of the map output from this function. Every
grid square of the map corresponds to a (`resolution_in_cm`x`resolution_in_cm`) square
in space.
flip_row_col: Should the rows/cols of the map be flipped? See the 'Returns' section below for more
info.
# Returns
A collection of maps of shape (... x map_size x map_size x (len(bins)+1)), note that bin_axis
has been moved to the last index of this returned map, the other two axes stay in their original
order unless `flip_row_col` has been called in which case they are reversed (useful as often
rows should correspond to y or z instead of x).
"""
bin_dim = ["x", "y", "z"].index(bin_axis)
start_shape = xyz_points.shape
xyz_points = xyz_points.reshape([-1, *start_shape[-3:]])
num_clouds, h, w, _ = xyz_points.shape
if not flip_row_col:
new_order = [i for i in [0, 1, 2] if i != bin_dim] + [bin_dim]
else:
new_order = [i for i in [2, 1, 0] if i != bin_dim] + [bin_dim]
uvw_points = cast(
torch.Tensor, torch.stack([xyz_points[..., i] for i in new_order], dim=-1)
)
num_bins = len(bins) + 1
isnotnan = ~torch.isnan(xyz_points[..., 0])
uvw_points_binned: torch.Tensor = torch.cat(
(
torch.round(100 * uvw_points[..., :-1] / resolution_in_cm).long(),
torch.bucketize(
uvw_points[..., -1:].contiguous(), boundaries=uvw_points.new(bins)
),
),
dim=-1,
)
maxes = (
xyz_points.new()
.long()
.new([map_size, map_size, num_bins])
.reshape((1, 1, 1, 3))
)
isvalid = torch.logical_and(
torch.logical_and(
(uvw_points_binned >= 0).all(-1), (uvw_points_binned < maxes).all(-1),
),
isnotnan,
)
uvw_points_binned_with_index_mat = torch.cat(
(
torch.repeat_interleave(
torch.arange(0, num_clouds).to(xyz_points.device), h * w
).reshape(-1, 1),
uvw_points_binned.reshape(-1, 3),
),
dim=1,
)
uvw_points_binned_with_index_mat[~isvalid.reshape(-1), :] = 0
ind = (
uvw_points_binned_with_index_mat[:, 0] * (map_size * map_size * num_bins)
+ uvw_points_binned_with_index_mat[:, 1] * (map_size * num_bins)
+ uvw_points_binned_with_index_mat[:, 2] * num_bins
+ uvw_points_binned_with_index_mat[:, 3]
)
ind[~isvalid.reshape(-1)] = 0
count = torch.bincount(
ind.view(-1),
isvalid.view(-1).long(),
minlength=num_clouds * map_size * map_size * num_bins,
)
return count.view(*start_shape[:-3], map_size, map_size, num_bins)
################
# FOR DEBUGGNG #
################
# The below functions are versions of the above which, because of their reliance on
# numpy functions, cannot use GPU acceleration. These are possibly useful for debugging,
# performance comparisons, or for validating that the above GPU variants work properly.
def _cpu_only_camera_space_xyz_to_world_xyz(
camera_space_xyzs: np.ndarray,
camera_world_xyz: np.ndarray,
rotation: float,
horizon: float,
):
# Adapted from https://github.com/devendrachaplot/Neural-SLAM.
# view_position = 3, world_points = 3 x N
# NOTE: camera_position is not equal to agent_position!!
# First compute the transformation that points undergo
# due to the camera's horizon
psi = -horizon * np.pi / 180
cos_psi = np.cos(psi)
sin_psi = np.sin(psi)
# fmt: off
horizon_transform = np.array(
[
[1, 0, 0], # unchanged
[0, cos_psi, sin_psi],
[0, -sin_psi, cos_psi,],
],
np.float64,
)
# fmt: on
# Next compute the transformation that points undergo
# due to the agent's rotation about the y-axis
phi = -rotation * np.pi / 180
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
# fmt: off
rotation_transform = np.array(
[
[cos_phi, 0, -sin_phi],
[0, 1, 0], # unchanged
[sin_phi, 0, cos_phi],],
np.float64,
)
# fmt: on
# Apply the above transformations
view_points = (rotation_transform @ horizon_transform) @ camera_space_xyzs
# Translate the points w.r.t. the camera's position in world space.
world_points = view_points + camera_world_xyz[:, None]
return world_points
def _cpu_only_depth_frame_to_camera_space_xyz(
depth_frame: np.ndarray, mask: Optional[np.ndarray], fov: float = 90
):
""""""
assert (
len(depth_frame.shape) == 2 and depth_frame.shape[0] == depth_frame.shape[1]
), f"depth has shape {depth_frame.shape}, we only support (N, N) shapes for now."
resolution = depth_frame.shape[0]
if mask is None:
mask = np.ones(depth_frame.shape, dtype=bool)
# pixel centers
camera_space_yx_offsets = (
np.stack(np.where(mask))
+ 0.5 # Offset by 0.5 so that we are in the middle of the pixel
)
# Subtract center
camera_space_yx_offsets -= resolution / 2.0
# Make "up" in y be positive
camera_space_yx_offsets[0, :] *= -1
# Put points on the clipping plane
camera_space_yx_offsets *= (2.0 / resolution) * math.tan((fov / 2) / 180 * math.pi)
camera_space_xyz = np.concatenate(
[
camera_space_yx_offsets[1:, :], # This is x
camera_space_yx_offsets[:1, :], # This is y
np.ones_like(camera_space_yx_offsets[:1, :]),
],
axis=0,
)
return camera_space_xyz * depth_frame[mask][None, :]
def _cpu_only_depth_frame_to_world_space_xyz(
depth_frame: np.ndarray,
camera_world_xyz: np.ndarray,
rotation: float,
horizon: float,
fov: float,
):
camera_space_xyz = _cpu_only_depth_frame_to_camera_space_xyz(
depth_frame=depth_frame, mask=None, fov=fov
)
world_points = _cpu_only_camera_space_xyz_to_world_xyz(
camera_space_xyzs=camera_space_xyz,
camera_world_xyz=camera_world_xyz,
rotation=rotation,
horizon=horizon,
)
return world_points.reshape((3, *depth_frame.shape)).transpose((1, 2, 0))
def _cpu_only_project_point_cloud_to_map(
xyz_points: np.ndarray,
bin_axis: str,
bins: Sequence[float],
map_size: int,
resolution_in_cm: int,
flip_row_col: bool,
):
"""Bins points into bins.
Adapted from https://github.com/devendrachaplot/Neural-SLAM.
# Parameters
xyz_points : (x,y,z) point clouds as a np.ndarray of shape (... x height x width x 3). (x,y,z)
should be coordinates specified in meters.
bin_axis : Either "x", "y", or "z", the axis which should be binned by the values in `bins`
bins: The values by which to bin along `bin_axis`, see the `bins` parameter of `np.digitize`
for more info.
map_size : The axes not specified by `bin_axis` will be be divided by `resolution_in_cm / 100`
and then rounded to the nearest integer. They are then expected to have their values
within the interval [0, ..., map_size - 1].
resolution_in_cm: The resolution_in_cm, in cm, of the map output from this function. Every
grid square of the map corresponds to a (`resolution_in_cm`x`resolution_in_cm`) square
in space.
flip_row_col: Should the rows/cols of the map be flipped
# Returns
A collection of maps of shape (... x map_size x map_size x (len(bins)+1)), note that bin_axis
has been moved to the last index of this returned map, the other two axes stay in their original
order unless `flip_row_col` has been called in which case they are reversed (useful if you give
points as often rows should correspond to y or z instead of x).
"""
bin_dim = ["x", "y", "z"].index(bin_axis)
start_shape = xyz_points.shape
xyz_points = xyz_points.reshape([-1, *start_shape[-3:]])
num_clouds, h, w, _ = xyz_points.shape
if not flip_row_col:
new_order = [i for i in [0, 1, 2] if i != bin_dim] + [bin_dim]
else:
new_order = [i for i in [2, 1, 0] if i != bin_dim] + [bin_dim]
uvw_points: np.ndarray = np.stack([xyz_points[..., i] for i in new_order], axis=-1)
num_bins = len(bins) + 1
isnotnan = ~np.isnan(xyz_points[..., 0])
uvw_points_binned = np.concatenate(
(
np.round(100 * uvw_points[..., :-1] / resolution_in_cm).astype(np.int32),
np.digitize(uvw_points[..., -1:], bins=bins).astype(np.int32),
),
axis=-1,
)
maxes = np.array([map_size, map_size, num_bins]).reshape((1, 1, 1, 3))
isvalid = np.logical_and.reduce(
(
(uvw_points_binned >= 0).all(-1),
(uvw_points_binned < maxes).all(-1),
isnotnan,
)
)
uvw_points_binned_with_index_mat = np.concatenate(
(
np.repeat(np.arange(0, num_clouds), h * w).reshape(-1, 1),
uvw_points_binned.reshape(-1, 3),
),
axis=1,
)
uvw_points_binned_with_index_mat[~isvalid.reshape(-1), :] = 0
ind = np.ravel_multi_index(
uvw_points_binned_with_index_mat.transpose(),
(num_clouds, map_size, map_size, num_bins),
)
ind[~isvalid.reshape(-1)] = 0
count = np.bincount(
ind.ravel(),
isvalid.ravel().astype(np.int32),
minlength=num_clouds * map_size * map_size * num_bins,
)
return count.reshape([*start_shape[:-3], map_size, map_size, num_bins])
| allenact-main | allenact/embodiedai/mapping/mapping_utils/point_cloud_utils.py |
# MIT License
#
# Original Copyright (c) 2020 Devendra Chaplot
#
# Modified work Copyright (c) 2021 Allen Institute for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from typing import Optional, Tuple, Dict, Any, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from allenact.utils.model_utils import simple_conv_and_linear_weights_init
DEGREES_TO_RADIANS = np.pi / 180.0
RADIANS_TO_DEGREES = 180.0 / np.pi
def _inv_sigmoid(x: torch.Tensor):
return torch.log(x) - torch.log1p(-x)
class ActiveNeuralSLAM(nn.Module):
"""Active Neural SLAM module.
This is an implementation of the Active Neural SLAM module
from:
```
Chaplot, D.S., Gandhi, D., Gupta, S., Gupta, A. and Salakhutdinov, R., 2020.
Learning To Explore Using Active Neural SLAM.
In International Conference on Learning Representations (ICLR).
```
Note that this is purely the mapping component and does not include the planning
components from the above paper.
This implementation is adapted from `https://github.com/devendrachaplot/Neural-SLAM`,
we have extended this implementation to allow for an arbitrary number of output map
channels (enabling semantic mapping).
At a high level, this model takes as input RGB egocentric images and outputs metric
map tensors of shape (# channels) x height x width where height/width correspond to the
ground plane of the environment.
"""
def __init__(
self,
frame_height: int,
frame_width: int,
n_map_channels: int,
resolution_in_cm: int = 5,
map_size_in_cm: int = 2400,
vision_range_in_cm: int = 300,
use_pose_estimation: bool = False,
pretrained_resnet: bool = True,
freeze_resnet_batchnorm: bool = True,
use_resnet_layernorm: bool = False,
):
"""Initialize an Active Neural SLAM module.
# Parameters
frame_height : The height of the RGB images given to this module on calls to `forward`.
frame_width : The width of the RGB images given to this module on calls to `forward`.
n_map_channels : The number of output channels in the output maps.
resolution_in_cm : The resolution of the output map, see `map_size_in_cm`.
map_size_in_cm : The height & width of the map in centimeters. The size of the map
tensor returned on calls to forward will be `map_size_in_cm/resolution_in_cm`. Note
that `map_size_in_cm` must be an divisible by resolution_in_cm.
vision_range_in_cm : Given an RGB image input, this module will transform this image into
an "egocentric map" with height and width equaling `vision_range_in_cm/resolution_in_cm`.
This egocentr map corresponds to the area of the world directly in front of the agent.
This "egocentric map" will be rotated/translated into the allocentric reference frame and
used to update the larger, allocentric, map whose
height and width equal `map_size_in_cm/resolution_in_cm`. Thus this parameter controls
how much of the map will be updated on every step.
use_pose_estimation : Whether or not we should estimate the agent's change in position/rotation.
If `False`, you'll need to provide the ground truth changes in position/rotation.
pretrained_resnet : Whether or not to use ImageNet pre-trained model weights for the ResNet18
backbone.
freeze_resnet_batchnorm : Whether or not the batch normalization layers in the ResNet18 backbone
should be frozen and batchnorm updates disabled. You almost certainly want this to be `True`
as using batch normalization during RL training results in all sorts of issues unless you're
very careful.
use_resnet_layernorm : If you've enabled `freeze_resnet_batchnorm` (recommended) you'll likely want
to normalize the output from the ResNet18 model as we've found that these values can otherwise
grow quite large harming learning.
"""
super(ActiveNeuralSLAM, self).__init__()
self.frame_height = frame_height
self.frame_width = frame_width
self.n_map_channels = n_map_channels
self.resolution_in_cm = resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.input_channels = 3
self.vision_range_in_cm = vision_range_in_cm
self.dropout = 0.5
self.use_pose_estimation = use_pose_estimation
self.freeze_resnet_batchnorm = freeze_resnet_batchnorm
self.max_abs_map_logit_value = 20
# Visual Encoding
resnet = models.resnet18(pretrained=pretrained_resnet)
self.resnet_l5 = nn.Sequential(*list(resnet.children())[0:8])
self.conv = nn.Sequential(
*filter(bool, [nn.Conv2d(512, 64, (1, 1), stride=(1, 1)), nn.ReLU()])
)
self.bn_modules = [
module
for module in self.resnet_l5.modules()
if "BatchNorm" in type(module).__name__
]
if freeze_resnet_batchnorm:
for bn in self.bn_modules:
bn.momentum = 0
# Layernorm (if requested)
self.use_resnet_layernorm = use_resnet_layernorm
if self.use_resnet_layernorm:
assert (
self.freeze_resnet_batchnorm
), "When using layernorm, we require that set `freeze_resnet_batchnorm` to True."
self.resnet_normalizer = nn.Sequential(
nn.Conv2d(512, 512, 1),
nn.LayerNorm(normalized_shape=[512, 7, 7], elementwise_affine=True,),
)
self.resnet_normalizer.apply(simple_conv_and_linear_weights_init)
else:
self.resnet_normalizer = nn.Identity()
# convolution output size
input_test = torch.randn(
1, self.input_channels, self.frame_height, self.frame_width
)
# Have to explicitly call .forward to get past LGTM checks as it thinks nn.Sequential isn't callable
conv_output = self.conv.forward(self.resnet_l5.forward(input_test))
self.conv_output_size = conv_output.view(-1).size(0)
# projection layer
self.proj1 = nn.Linear(self.conv_output_size, 1024)
assert self.vision_range % 8 == 0
self.deconv_in_height = self.vision_range // 8
self.deconv_in_width = self.deconv_in_height
self.n_input_channels_for_deconv = 64
proj2_out_size = 64 * self.deconv_in_height * self.deconv_in_width
self.proj2 = nn.Linear(1024, proj2_out_size)
if self.dropout > 0:
self.dropout1 = nn.Dropout(self.dropout)
self.dropout2 = nn.Dropout(self.dropout)
# Deconv layers to predict map
self.deconv = nn.Sequential(
*filter(
bool,
[
nn.ConvTranspose2d(
self.n_input_channels_for_deconv,
32,
(4, 4),
stride=(2, 2),
padding=(1, 1),
),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, (4, 4), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.ConvTranspose2d(
16, self.n_map_channels, (4, 4), stride=(2, 2), padding=(1, 1)
),
],
)
)
# Pose Estimator
self.pose_conv = nn.Sequential(
nn.Conv2d(2 * self.n_map_channels, 64, (4, 4), stride=(2, 2)),
nn.ReLU(inplace=True),
nn.Conv2d(64, 32, (4, 4), stride=(2, 2)),
nn.ReLU(inplace=True),
nn.Conv2d(32, 16, (3, 3), stride=(1, 1)),
nn.ReLU(inplace=True),
nn.Flatten(),
)
self.pose_conv_output_dim = (
self.pose_conv.forward(
torch.zeros(
1, 2 * self.n_map_channels, self.vision_range, self.vision_range
)
)
.view(-1)
.size(0)
)
# projection layer
self.pose_proj1 = nn.Linear(self.pose_conv_output_dim, 1024)
self.pose_proj2_x = nn.Linear(1024, 128)
self.pose_proj2_z = nn.Linear(1024, 128)
self.pose_proj2_o = nn.Linear(1024, 128)
self.pose_proj3_x = nn.Linear(128, 1)
self.pose_proj3_y = nn.Linear(128, 1)
self.pose_proj3_o = nn.Linear(128, 1)
if self.dropout > 0:
self.pose_dropout1 = nn.Dropout(self.dropout)
self.train()
@property
def device(self):
d = self.pose_proj1.weight.get_device()
if d < 0:
return torch.device("cpu")
return torch.device(d)
def train(self, mode: bool = True):
super().train(mode=mode)
if mode and self.freeze_resnet_batchnorm:
for module in self.bn_modules:
module.eval()
@property
def map_size(self):
return self.map_size_in_cm // self.resolution_in_cm
@property
def vision_range(self):
return self.vision_range_in_cm // self.resolution_in_cm
def image_to_egocentric_map_logits(
self,
images: Optional[torch.Tensor],
resnet_image_features: Optional[torch.Tensor] = None,
):
if resnet_image_features is None:
bs, _, _, _ = images.size()
resnet_image_features = self.resnet_normalizer(
self.resnet_l5(images[:, :3, :, :])
)
else:
bs = resnet_image_features.shape[0]
conv_output = self.conv(resnet_image_features)
proj1 = F.relu(self.proj1(conv_output.reshape(-1, self.conv_output_size)))
if self.dropout > 0:
proj1 = self.dropout1(proj1)
proj3 = F.relu(self.proj2(proj1))
deconv_input = proj3.view(
bs,
self.n_input_channels_for_deconv,
self.deconv_in_height,
self.deconv_in_width,
)
deconv_output = self.deconv(deconv_input)
return deconv_output
def allocentric_map_to_egocentric_view(
self, allocentric_map: torch.Tensor, xzr: torch.Tensor, padding_mode: str
):
# Index the egocentric viewpoints at the given xzr locations
with torch.no_grad():
allocentric_map = allocentric_map.float()
xzr = xzr.float()
theta = xzr[:, 2].float() * float(np.pi / 180)
# Here form the rotation matrix
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
rot_mat = torch.stack(
(
torch.stack((cos_theta, -sin_theta), -1),
torch.stack((sin_theta, cos_theta), -1),
),
1,
)
scaler = 2 * (100 / (self.resolution_in_cm * self.map_size))
offset_to_center_the_agent = scaler * xzr[:, :2].unsqueeze(-1) - 1
offset_to_top_of_image = rot_mat @ torch.FloatTensor([0, 1.0]).unsqueeze(
1
).to(self.device)
rotation_and_translate_mat = torch.cat(
(rot_mat, offset_to_top_of_image + offset_to_center_the_agent,), dim=-1,
)
ego_map = F.grid_sample(
allocentric_map,
F.affine_grid(
rotation_and_translate_mat.to(self.device), allocentric_map.shape,
),
padding_mode=padding_mode,
align_corners=False,
)
vr = self.vision_range
half_vr = vr // 2
center = self.map_size_in_cm // (2 * self.resolution_in_cm)
cropped = ego_map[:, :, :vr, (center - half_vr) : (center + half_vr)]
return cropped
def estimate_egocentric_dx_dz_dr(
self,
map_probs_egocentric: torch.Tensor,
last_map_probs_egocentric: torch.Tensor,
):
assert last_map_probs_egocentric.shape == map_probs_egocentric.shape
pose_est_input = torch.cat(
(map_probs_egocentric.detach(), last_map_probs_egocentric.detach()), dim=1
)
pose_conv_output = self.pose_conv(pose_est_input)
proj1 = F.relu(self.pose_proj1(pose_conv_output))
if self.dropout > 0:
proj1 = self.pose_dropout1(proj1)
proj2_x = F.relu(self.pose_proj2_x(proj1))
pred_dx = self.pose_proj3_x(proj2_x)
proj2_z = F.relu(self.pose_proj2_z(proj1))
pred_dz = self.pose_proj3_y(proj2_z)
proj2_o = F.relu(self.pose_proj2_o(proj1))
pred_do = self.pose_proj3_o(proj2_o)
return torch.cat((pred_dx, pred_dz, pred_do), dim=1)
@staticmethod
def update_allocentric_xzrs_with_egocentric_movement(
last_xzrs_allocentric: torch.Tensor, dx_dz_drs_egocentric: torch.Tensor,
):
new_xzrs_allocentric = last_xzrs_allocentric.clone()
theta = new_xzrs_allocentric[:, 2] * DEGREES_TO_RADIANS
sin_theta = torch.sin(theta)
cos_theta = torch.cos(theta)
new_xzrs_allocentric[:, :2] += torch.matmul(
torch.stack([cos_theta, -sin_theta, sin_theta, cos_theta], dim=-1).view(
-1, 2, 2
),
dx_dz_drs_egocentric[:, :2].unsqueeze(-1),
).squeeze(-1)
new_xzrs_allocentric[:, 2] += dx_dz_drs_egocentric[:, 2]
new_xzrs_allocentric[:, 2] = (
torch.fmod(new_xzrs_allocentric[:, 2] - 180.0, 360.0) + 180.0
)
new_xzrs_allocentric[:, 2] = (
torch.fmod(new_xzrs_allocentric[:, 2] + 180.0, 360.0) - 180.0
)
return new_xzrs_allocentric
def forward(
self,
images: Optional[torch.Tensor],
last_map_probs_allocentric: Optional[torch.Tensor],
last_xzrs_allocentric: Optional[torch.Tensor],
dx_dz_drs_egocentric: Optional[torch.Tensor],
last_map_logits_egocentric: Optional[torch.Tensor],
return_allocentric_maps=True,
resnet_image_features: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Create allocentric/egocentric maps predictions given RGB image
inputs.
Here it is assumed that `last_xzrs_allocentric` has been re-centered so that (x, z) == (0,0)
corresponds to the top left of the returned map (with increasing x/z moving to the bottom right of the map).
Note that all maps are oriented so that:
* **Increasing x values** correspond to **increasing columns** in the map(s).
* **Increasing z values** correspond to **increasing rows** in the map(s).
Note that this may seem a bit weird as:
* "north" is pointing downwards in the map,
* if you picture yourself as the agent facing north (i.e. down) in the map, then moving to the right from
the agent's perspective will correspond to **increasing** which column the agent is at:
```
agent facing downwards - - > (dir. to the right of the agent, i.e. moving right corresponds to +cols)
|
|
v (dir. agent faces, i.e. moving ahead corresponds to +rows)
```
This may be the opposite of what you expect.
# Parameters
images : A (# batches) x 3 x height x width tensor of RGB images. These should be
normalized for use with a resnet model. See [here](https_DOC_COLON_//pytorch.org/vision/stable/models.html)
for information (see also the `use_resnet_normalization` parameter of the
`allenact.base_abstractions.sensor.RGBSensor` sensor).
last_map_probs_allocentric : A (# batches) x (map channels) x (map height) x (map width)
tensor representing the colllection of allocentric maps to be updated.
last_xzrs_allocentric : A (# batches) x 3 tensor where `last_xzrs_allocentric[_DOC_COLON_, 0]`
are the agent's (allocentric) x-coordinates on the previous step,
`last_xzrs_allocentric[_DOC_COLON_, 1]` are the agent's (allocentric) z-coordinates from the previous
step, and `last_xzrs_allocentric[_DOC_COLON_, 2]` are the agent's rotations (allocentric, in degrees)
from the prevoius step.
dx_dz_drs_egocentric : A (# batches) x 3 tensor representing the agent's change in x (in meters), z (in meters),
and rotation (in degrees) from the previous step. Note that these changes are "egocentric" so that if the
agent moved 1 meter ahead from it's perspective this should correspond to a dz of +1.0 regardless of
the agent's orientation (similarly moving right would result in a dx of +1.0). This
is ignored (and thus can be `None`) if you are using pose estimation
(i.e. `self.use_pose_estimation` is `True`) or if `return_allocentric_maps` is `False`.
last_map_logits_egocentric : The "egocentric_update" output when calling this function
on the last agent's step. I.e. this should be the egocentric map view of the agent
from the last step. This is used to compute the change in the agent's position rotation.
This is ignored (and thus can be `None`) if you do not wish to estimate the agent's pose
(i.e. `self.use_pose_estimation` is `False`).
return_allocentric_maps : Whether or not to generate new allocentric maps given `last_map_probs_allocentric`
and the new map estimates. Creating these new allocentric maps is expensive so better avoided when
not needed.
resnet_image_features : Sometimes you may wish to compute the ResNet image features yourself for use
in another part of your model. Rather than having to recompute them multiple times, you can
instead compute them once and pass them into this forward call (in this case the input `images`
parameter is ignored). Note that if you're using the `self.resnet_l5` module to compute these
features, be sure to also normalize them with `self.resnet_normalizer` if you have opted to
`use_resnet_layernorm` when initializing this module).
# Returns
A dictionary with keys/values:
* "egocentric_update" - The egocentric map view for the given RGB image. This is what should
be used for computing losses in general.
* "map_logits_probs_update_no_grad" - The egocentric map view after it has been
rotated, translated, and moved into a full-sized allocentric map. This map has been
detached from the computation graph and so should not be used for gradient computations.
This will be `None` if `return_allocentric_maps` was `False`.
* "map_logits_probs_no_grad" - The newly updated allocentric map, this corresponds to
performing a pointwise maximum between `last_map_probs_allocentric` and the
above returned `map_probs_allocentric_update_no_grad`.
This will be `None` if `return_allocentric_maps` was `False`.
* "dx_dz_dr_egocentric_preds" - The predicted change in x, z, and rotation of the agent (from the
egocentric perspective of the agent).
* "xzr_allocentric_preds" - The (predicted if `self.use_pose_estimation == True`) allocentric
(x, z) position and rotation of the agent. This will equal `None` if `self.use_pose_estimation == False`
and `dx_dz_drs_egocentric` is `None`.
"""
# TODO: For consistency we should update things so that:
# "Furthermore, the rotation component of `last_xzrs_allocentric` and `dx_dz_drs_egocentric`
# should be specified in **degrees* with positive rotation corresponding to a **CLOCKWISE**
# rotation (this is the default used by the many game engines)."
map_logits_egocentric = self.image_to_egocentric_map_logits(
images=images, resnet_image_features=resnet_image_features
)
map_probs_egocentric = torch.sigmoid(map_logits_egocentric)
dx_dz_dr_egocentric_preds = None
if last_map_logits_egocentric is not None:
dx_dz_dr_egocentric_preds = self.estimate_egocentric_dx_dz_dr(
map_probs_egocentric=map_probs_egocentric,
last_map_probs_egocentric=torch.sigmoid(last_map_logits_egocentric),
)
if self.use_pose_estimation:
updated_xzrs_allocentrc = self.update_allocentric_xzrs_with_egocentric_movement(
last_xzrs_allocentric=last_xzrs_allocentric,
dx_dz_drs_egocentric=dx_dz_dr_egocentric_preds,
)
elif dx_dz_drs_egocentric is not None:
updated_xzrs_allocentrc = self.update_allocentric_xzrs_with_egocentric_movement(
last_xzrs_allocentric=last_xzrs_allocentric,
dx_dz_drs_egocentric=dx_dz_drs_egocentric,
)
else:
updated_xzrs_allocentrc = None
if return_allocentric_maps:
# Aggregate egocentric map prediction in the allocentric map
# using the predicted pose (if `self.use_pose_estimation`) or the ground
# truth pose (if not `self.use_pose_estimation`)
with torch.no_grad():
# Rotate and translate the egocentric map view, we do this grid sampling
# at the level of probabilities as bad results can occur at the logit level
full_size_allocentric_map_probs_update = _move_egocentric_map_view_into_allocentric_position(
map_probs_egocentric=map_probs_egocentric,
xzrs_allocentric=updated_xzrs_allocentrc,
allocentric_map_height_width=(self.map_size, self.map_size),
resolution_in_cm=self.resolution_in_cm,
)
map_probs_allocentric = torch.max(
last_map_probs_allocentric, full_size_allocentric_map_probs_update
)
else:
full_size_allocentric_map_probs_update = None
map_probs_allocentric = None
return {
"egocentric_update": map_logits_egocentric,
"map_probs_allocentric_update_no_grad": full_size_allocentric_map_probs_update,
"map_probs_allocentric_no_grad": map_probs_allocentric,
"dx_dz_dr_egocentric_preds": dx_dz_dr_egocentric_preds,
"xzr_allocentric_preds": updated_xzrs_allocentrc,
}
def _move_egocentric_map_view_into_allocentric_position(
map_probs_egocentric: torch.Tensor,
xzrs_allocentric: torch.Tensor,
allocentric_map_height_width: Tuple[int, int],
resolution_in_cm: float,
):
"""Translate/rotate an egocentric map view into an allocentric map.
Let's say you have a collection of egocentric maps in a tensor of shape
`(# batches) x (# channels) x (# ego rows) x (# ego columns)`
where these are "egocentric" as we assume the agent is always
at the center of the map and facing "downwards", namely
* **ahead** of the agent should correspond to **increasing rows** in the map(s).
* **right** of the agent should correspond to **increasing columns** in the map(s).
Note that the above is a bit weird as, if you picture yourself as the agent facing
downwards in the map, then moving to the right from the agent perspective. Here's how things
should look if you plotted one of these egocentric maps:
```
center of map - - > (dir. to the right of the agent, i.e. moving right corresponds to +cols)
|
|
v (dir. agent faces, i.e. moving ahead corresponds to +rows)
```
This function is used to translate/rotate the above ego maps so that
they are in the right position/rotation in an allocentric map of size
`(# batches) x (# channels) x (# allocentric_map_height_width[0]) x (# allocentric_map_height_width[1])`.
Adapted from the get_grid function in https://github.com/devendrachaplot/Neural-SLAM.
# Parameters
map_probs_egocentric : Egocentric map views.
xzrs_allocentric : (# batches)x3 tensor with `xzrs_allocentric[:, 0]` being the x-coordinates (in meters),
`xzrs_allocentric[:, 1]` being the z-coordinates (in meters), and `xzrs_allocentric[:, 2]` being the rotation
(in degrees) of the agent in the allocentric reference frame. Here it is assumed that `xzrs_allocentric` has
been re-centered so that (x, z) == (0,0) corresponds to the top left of the returned map (with increasing
x/z moving to the bottom right of the map). Note that positive rotations are in the counterclockwise direction.
allocentric_map_height_width : Height/width of the allocentric map to be returned
resolution_in_cm : Resolution (in cm) of map to be returned (and of map_probs_egocentric). I.e.
`map_probs_egocentric[0,0,0:1,0:1]` should correspond to a `resolution_in_cm x resolution_in_cm`
square on the ground plane in the world.
# Returns
`(# batches) x (# channels) x (# allocentric_map_height_width[0]) x (# allocentric_map_height_width[1])`
tensor where the input `map_probs_egocentric` maps have been rotated/translated so that they
are in the positions specified by `xzrs_allocentric`.
"""
# TODO: For consistency we should update the rotations so they are in the clockwise direction.
# First we place the egocentric map view into the center
# of a map that has the same size as the allocentric map
nbatch, c, ego_h, ego_w = cast(
Tuple[int, int, int, int], map_probs_egocentric.shape
)
allo_h, allo_w = allocentric_map_height_width
max_view_range = math.sqrt((ego_w / 2.0) ** 2 + ego_h ** 2)
if min(allo_h, allo_w) / 2.0 < max_view_range:
raise NotImplementedError(
f"The shape of your egocentric view (ego_h, ego_w)==({ego_h, ego_w})"
f" is too large relative the size of the allocentric map (allo_h, allo_w)==({allo_h}, {allo_w})."
f" The height/width of your allocentric map should be at least {2 * max_view_range} to allow"
f" for no information to be lost when rotating the egocentric map."
)
full_size_ego_map_update_probs = map_probs_egocentric.new(
nbatch, c, *allocentric_map_height_width
).fill_(0)
assert (ego_h % 2, ego_w % 2, allo_h % 2, allo_w % 2) == (
0,
) * 4, "All map heights/widths should be divisible by 2."
x1 = allo_w // 2 - ego_w // 2
x2 = x1 + ego_w
z1 = allo_h // 2
z2 = z1 + ego_h
full_size_ego_map_update_probs[:, :, z1:z2, x1:x2] = map_probs_egocentric
# Now we'll rotate and translate `full_size_ego_map_update_probs`
# so that the egocentric map view is positioned where it should be
# in the allocentric coordinate frame
# To do this we first need to rescale our allocentric xz coordinates
# so that the center of the map is (0,0) and the top left corner is (-1, -1)
# as this is what's expected by the `affine_grid` function below.
rescaled_xzrs_allocentric = xzrs_allocentric.clone().detach().float()
rescaled_xzrs_allocentric[:, :2] *= (
100.0 / resolution_in_cm
) # Put x / z into map units rather than meters
rescaled_xzrs_allocentric[:, 0] /= allo_w / 2 # x corresponds to columns
rescaled_xzrs_allocentric[:, 1] /= allo_h / 2 # z corresponds to rows
rescaled_xzrs_allocentric[:, :2] -= 1.0 # Re-center
x = rescaled_xzrs_allocentric[:, 0]
z = rescaled_xzrs_allocentric[:, 1]
theta = (
-rescaled_xzrs_allocentric[:, 2] * DEGREES_TO_RADIANS
) # Notice the negative sign
cos_theta = theta.cos()
sin_theta = theta.sin()
zeroes = torch.zeros_like(cos_theta)
ones = torch.ones_like(cos_theta)
theta11 = torch.stack([cos_theta, -sin_theta, zeroes], 1)
theta12 = torch.stack([sin_theta, cos_theta, zeroes], 1)
theta1 = torch.stack([theta11, theta12], 1)
theta21 = torch.stack([ones, zeroes, x], 1)
theta22 = torch.stack([zeroes, ones, z], 1)
theta2 = torch.stack([theta21, theta22], 1)
grid_size = [nbatch, c, allo_h, allo_w]
rot_grid = F.affine_grid(theta1, grid_size)
trans_grid = F.affine_grid(theta2, grid_size)
return F.grid_sample(
F.grid_sample(
full_size_ego_map_update_probs,
rot_grid,
padding_mode="zeros",
align_corners=False,
),
trans_grid,
padding_mode="zeros",
align_corners=False,
)
| allenact-main | allenact/embodiedai/mapping/mapping_models/active_neural_slam.py |
allenact-main | allenact/embodiedai/mapping/mapping_models/__init__.py |
|
allenact-main | allenact/embodiedai/preprocessors/__init__.py |
|
from typing import List, Callable, Optional, Any, cast, Dict
import gym
import numpy as np
import torch
import torch.nn as nn
from torchvision import models
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.misc_utils import prepare_locals_for_super
class ResNetEmbedder(nn.Module):
def __init__(self, resnet, pool=True):
super().__init__()
self.model = resnet
self.pool = pool
self.eval()
def forward(self, x):
with torch.no_grad():
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
if not self.pool:
return x
else:
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
return x
class ResNetPreprocessor(Preprocessor):
"""Preprocess RGB or depth image using a ResNet model."""
def __init__(
self,
input_uuids: List[str],
output_uuid: str,
input_height: int,
input_width: int,
output_height: int,
output_width: int,
output_dims: int,
pool: bool,
torchvision_resnet_model: Callable[..., models.ResNet] = models.resnet18,
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
**kwargs: Any,
):
def f(x, k):
assert k in x, "{} must be set in ResNetPreprocessor".format(k)
return x[k]
def optf(x, k, default):
return x[k] if k in x else default
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.output_dims = output_dims
self.pool = pool
self.make_model = torchvision_resnet_model
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
self._resnet: Optional[ResNetEmbedder] = None
low = -np.inf
high = np.inf
shape = (self.output_dims, self.output_height, self.output_width)
assert (
len(input_uuids) == 1
), "resnet preprocessor can only consume one observation type"
observation_space = gym.spaces.Box(low=low, high=high, shape=shape)
super().__init__(**prepare_locals_for_super(locals()))
@property
def resnet(self) -> ResNetEmbedder:
if self._resnet is None:
self._resnet = ResNetEmbedder(
self.make_model(pretrained=True).to(self.device), pool=self.pool
)
return self._resnet
def to(self, device: torch.device) -> "ResNetPreprocessor":
self._resnet = self.resnet.to(device)
self.device = device
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
x = obs[self.input_uuids[0]].to(self.device).permute(0, 3, 1, 2) # bhwc -> bchw
# If the input is depth, repeat it across all 3 channels
if x.shape[1] == 1:
x = x.repeat(1, 3, 1, 1)
return self.resnet(x.to(self.device))
| allenact-main | allenact/embodiedai/preprocessors/resnet.py |
from abc import abstractmethod, ABC
from typing import Optional, Tuple, Any, cast, Union, Sequence
import PIL
import gym
import numpy as np
from torchvision import transforms
from allenact.base_abstractions.misc import EnvType
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import SubTaskType
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.tensor_utils import ScaleBothSides
IMAGENET_RGB_MEANS: Tuple[float, float, float] = (0.485, 0.456, 0.406)
IMAGENET_RGB_STDS: Tuple[float, float, float] = (0.229, 0.224, 0.225)
class VisionSensor(Sensor[EnvType, SubTaskType]):
def __init__(
self,
mean: Union[Sequence[float], np.ndarray, None] = None,
stdev: Union[Sequence[float], np.ndarray, None] = None,
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "vision",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: Optional[int] = None,
unnormalized_infimum: float = -np.inf,
unnormalized_supremum: float = np.inf,
scale_first: bool = True,
**kwargs: Any
):
"""Initializer.
# Parameters
mean : The images will be normalized with the given mean
stdev : The images will be normalized with the given standard deviations.
height : If it's a non-negative integer and `width` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
width : If it's a non-negative integer and `height` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
uuid : The universally unique identifier for the sensor.
output_shape : Optional observation space shape (alternative to `output_channels`).
output_channels : Optional observation space number of channels (alternative to `output_shape`).
unnormalized_infimum : Lower limit(s) for the observation space range.
unnormalized_supremum : Upper limit(s) for the observation space range.
scale_first : Whether to scale image before normalization (if needed).
kwargs : Extra kwargs. Currently unused.
"""
self._norm_means = np.array(mean) if mean is not None else None
self._norm_sds = np.array(stdev) if stdev is not None else None
assert (self._norm_means is None) == (self._norm_sds is None), (
"In VisionSensor's config, "
"either both mean/stdev must be None or neither."
)
self._should_normalize = self._norm_means is not None
self._height = height
self._width = width
assert (self._width is None) == (self._height is None), (
"In VisionSensor's config, "
"either both height/width must be None or neither."
)
self._scale_first = scale_first
self.scaler: Optional[ScaleBothSides] = None
if self._width is not None:
self.scaler = ScaleBothSides(
width=cast(int, self._width), height=cast(int, self._height)
)
self.to_pil = transforms.ToPILImage() # assumes mode="RGB" for 3 channels
self._observation_space = self._make_observation_space(
output_shape=output_shape,
output_channels=output_channels,
unnormalized_infimum=unnormalized_infimum,
unnormalized_supremum=unnormalized_supremum,
)
assert int(PIL.__version__.split(".")[0]) != 7, (
"We found that Pillow version >=7.* has broken scaling,"
" please downgrade to version 6.2.1 or upgrade to >=8.0.0"
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _make_observation_space(
self,
output_shape: Optional[Tuple[int, ...]],
output_channels: Optional[int],
unnormalized_infimum: float,
unnormalized_supremum: float,
) -> gym.spaces.Box:
assert output_shape is None or output_channels is None, (
"In VisionSensor's config, "
"only one of output_shape and output_channels can be not None."
)
shape: Optional[Tuple[int, ...]] = None
if output_shape is not None:
shape = output_shape
elif self._height is not None and output_channels is not None:
shape = (
cast(int, self._height),
cast(int, self._width),
cast(int, output_channels),
)
if not self._should_normalize or shape is None or len(shape) == 1:
return gym.spaces.Box(
low=np.float32(unnormalized_infimum),
high=np.float32(unnormalized_supremum),
shape=shape,
)
else:
out_shape = shape[:-1] + (1,)
low = np.tile(
(unnormalized_infimum - cast(np.ndarray, self._norm_means))
/ cast(np.ndarray, self._norm_sds),
out_shape,
)
high = np.tile(
(unnormalized_supremum - cast(np.ndarray, self._norm_means))
/ cast(np.ndarray, self._norm_sds),
out_shape,
)
return gym.spaces.Box(low=np.float32(low), high=np.float32(high))
def _get_observation_space(self):
return self._observation_space
@property
def height(self) -> Optional[int]:
"""Height that input image will be rescale to have.
# Returns
The height as a non-negative integer or `None` if no rescaling is done.
"""
return self._height
@property
def width(self) -> Optional[int]:
"""Width that input image will be rescale to have.
# Returns
The width as a non-negative integer or `None` if no rescaling is done.
"""
return self._width
@abstractmethod
def frame_from_env(self, env: EnvType, task: Optional[SubTaskType]) -> np.ndarray:
raise NotImplementedError
def process_img(self, img: np.ndarray):
assert (
np.issubdtype(img.dtype, np.float32)
and (len(img.shape) == 2 or img.shape[-1] == 1)
) or (img.shape[-1] == 3 and np.issubdtype(img.dtype, np.uint8)), (
"Input frame must either have 3 channels and be of"
" type np.uint8 or have one channel and be of type np.float32"
)
if (
self._scale_first
and self.scaler is not None
and img.shape[:2] != (self._height, self._width)
):
img = np.array(self.scaler(self.to_pil(img)), dtype=img.dtype) # hwc
elif np.issubdtype(img.dtype, np.float32):
img = img.copy()
assert img.dtype in [np.uint8, np.float32]
if np.issubdtype(img.dtype, np.uint8):
img = img.astype(np.float32) / 255.0
if self._should_normalize:
img -= self._norm_means
img /= self._norm_sds
if (
(not self._scale_first)
and self.scaler is not None
and img.shape[:2] != (self._height, self._width)
):
img = np.array(self.scaler(self.to_pil(img)), dtype=np.float32) # hwc
return img
def get_observation(
self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any
) -> Any:
return self.process_img(self.frame_from_env(env=env, task=task))
class RGBSensor(VisionSensor[EnvType, SubTaskType], ABC):
def __init__(
self,
use_resnet_normalization: bool = False,
mean: Optional[Union[np.ndarray, Sequence[float]]] = IMAGENET_RGB_MEANS,
stdev: Optional[Union[np.ndarray, Sequence[float]]] = IMAGENET_RGB_STDS,
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "rgb",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 3,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 1.0,
scale_first: bool = True,
**kwargs: Any
):
"""Initializer.
# Parameters
use_resnet_normalization : Whether to apply image normalization with the given `mean` and `stdev`.
mean : The images will be normalized with the given mean if `use_resnet_normalization` is True (default
`[0.485, 0.456, 0.406]`, i.e. the standard resnet normalization mean).
stdev : The images will be normalized with the given standard deviation if `use_resnet_normalization` is True
(default `[0.229, 0.224, 0.225]`, i.e. the standard resnet normalization standard deviation).
height: If it's a non-negative integer and `width` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
width: If it's a non-negative integer and `height` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
uuid: The universally unique identifier for the sensor.
output_shape: Optional observation space shape (alternative to `output_channels`).
output_channels: Optional observation space number of channels (alternative to `output_shape`).
unnormalized_infimum: Lower limit(s) for the observation space range.
unnormalized_supremum: Upper limit(s) for the observation space range.
scale_first: Whether to scale image before normalization (if needed).
kwargs : Extra kwargs. Currently unused.
"""
if not use_resnet_normalization:
mean, stdev = None, None
if isinstance(mean, tuple):
mean = np.array(mean, dtype=np.float32).reshape((1, 1, len(mean)))
if isinstance(stdev, tuple):
stdev = np.array(stdev, dtype=np.float32).reshape((1, 1, len(stdev)))
super().__init__(**prepare_locals_for_super(locals()))
class DepthSensor(VisionSensor[EnvType, SubTaskType], ABC):
def __init__(
self,
use_normalization: bool = False,
mean: Optional[Union[np.ndarray, float]] = 0.5,
stdev: Optional[Union[np.ndarray, float]] = 0.25,
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "depth",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 1,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 5.0,
scale_first: bool = True,
**kwargs: Any
):
"""Initializer.
# Parameters
config : If `config["use_normalization"]` is `True` then the depth images will be normalized
with mean 0.5 and standard deviation 0.25. If both `config["height"]` and `config["width"]` are
non-negative integers then the depth image returned from the environment will be rescaled to have shape
(config["height"], config["width"]) using bilinear sampling.
use_normalization : Whether to apply image normalization with the given `mean` and `stdev`.
mean : The images will be normalized with the given mean if `use_normalization` is True (default 0.5).
stdev : The images will be normalized with the given standard deviation if `use_normalization` is True
(default 0.25).
height: If it's a non-negative integer and `width` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
width: If it's a non-negative integer and `height` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
uuid: The universally unique identifier for the sensor.
output_shape: Optional observation space shape (alternative to `output_channels`).
output_channels: Optional observation space number of channels (alternative to `output_shape`).
unnormalized_infimum: Lower limit(s) for the observation space range.
unnormalized_supremum: Upper limit(s) for the observation space range.
scale_first: Whether to scale image before normalization (if needed).
kwargs : Extra kwargs. Currently unused.
"""
if not use_normalization:
mean, stdev = None, None
if isinstance(mean, float):
mean = np.array(mean, dtype=np.float32).reshape(1, 1)
if isinstance(stdev, float):
stdev = np.array(stdev, dtype=np.float32).reshape(1, 1)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation( # type: ignore
self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any
) -> Any:
depth = super().get_observation(env, task, *args, **kwargs)
depth = np.expand_dims(depth, 2)
return depth
| allenact-main | allenact/embodiedai/sensors/vision_sensors.py |
allenact-main | allenact/embodiedai/sensors/__init__.py |
|
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Several of the models defined in this file are modified versions of those
found in https://github.com/joel99/habitat-pointnav-
aux/blob/master/habitat_baselines/"""
import torch
import torch.nn as nn
from allenact.embodiedai.aux_losses.losses import (
InverseDynamicsLoss,
TemporalDistanceLoss,
CPCALoss,
CPCASoftMaxLoss,
)
from allenact.utils.model_utils import FeatureEmbedding
class AuxiliaryModel(nn.Module):
"""The class of defining the models for all kinds of self-supervised
auxiliary tasks."""
def __init__(
self,
aux_uuid: str,
action_dim: int,
obs_embed_dim: int,
belief_dim: int,
action_embed_size: int = 4,
cpca_classifier_hidden_dim: int = 32,
cpca_softmax_dim: int = 128,
):
super().__init__()
self.aux_uuid = aux_uuid
self.action_dim = action_dim
self.obs_embed_dim = obs_embed_dim
self.belief_dim = belief_dim
self.action_embed_size = action_embed_size
self.cpca_classifier_hidden_dim = cpca_classifier_hidden_dim
self.cpca_softmax_dim = cpca_softmax_dim
self.initialize_model_given_aux_uuid(self.aux_uuid)
def initialize_model_given_aux_uuid(self, aux_uuid: str):
if aux_uuid == InverseDynamicsLoss.UUID:
self.init_inverse_dynamics()
elif aux_uuid == TemporalDistanceLoss.UUID:
self.init_temporal_distance()
elif CPCALoss.UUID in aux_uuid: # the CPCA family with various k
self.init_cpca()
elif CPCASoftMaxLoss.UUID in aux_uuid:
self.init_cpca_softmax()
else:
raise ValueError("Unknown Auxiliary Loss UUID")
def init_inverse_dynamics(self):
self.decoder = nn.Linear(
2 * self.obs_embed_dim + self.belief_dim, self.action_dim
)
def init_temporal_distance(self):
self.decoder = nn.Linear(2 * self.obs_embed_dim + self.belief_dim, 1)
def init_cpca(self):
## Auto-regressive model to predict future context
self.action_embedder = FeatureEmbedding(
self.action_dim + 1, self.action_embed_size
)
# NOTE: add extra 1 in embedding dict cuz we will pad zero actions?
self.context_model = nn.GRU(self.action_embed_size, self.belief_dim)
## Classifier to estimate mutual information
self.classifier = nn.Sequential(
nn.Linear(
self.belief_dim + self.obs_embed_dim, self.cpca_classifier_hidden_dim
),
nn.ReLU(),
nn.Linear(self.cpca_classifier_hidden_dim, 1),
)
def init_cpca_softmax(self):
# same as CPCA with extra MLP for contrastive losses.
###
self.action_embedder = FeatureEmbedding(
self.action_dim + 1, self.action_embed_size
)
# NOTE: add extra 1 in embedding dict cuz we will pad zero actions?
self.context_model = nn.GRU(self.action_embed_size, self.belief_dim)
## Classifier to estimate mutual information
self.visual_mlp = nn.Sequential(
nn.Linear(self.obs_embed_dim, self.cpca_classifier_hidden_dim),
nn.ReLU(),
nn.Linear(self.cpca_classifier_hidden_dim, self.cpca_softmax_dim),
)
self.belief_mlp = nn.Sequential(
nn.Linear(self.belief_dim, self.cpca_classifier_hidden_dim),
nn.ReLU(),
nn.Linear(self.cpca_classifier_hidden_dim, self.cpca_softmax_dim),
)
def forward(self, features: torch.FloatTensor):
if self.aux_uuid in [InverseDynamicsLoss.UUID, TemporalDistanceLoss.UUID]:
return self.decoder(features)
else:
raise NotImplementedError(
f"Auxiliary model with UUID {self.aux_uuid} does not support `forward` call."
)
| allenact-main | allenact/embodiedai/models/aux_models.py |
allenact-main | allenact/embodiedai/models/__init__.py |
|
"""Basic building block torch networks that can be used across a variety of
tasks."""
from typing import (
Sequence,
Dict,
Union,
cast,
List,
Callable,
Optional,
Tuple,
Any,
)
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel, DistributionType
from allenact.base_abstractions.distributions import CategoricalDistr, Distr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact.utils.model_utils import make_cnn, compute_cnn_output
from allenact.utils.system import get_logger
class SimpleCNN(nn.Module):
"""A Simple N-Conv CNN followed by a fully connected layer. Takes in
observations (of type gym.spaces.dict) and produces an embedding of the
`rgb_uuid` and/or `depth_uuid` components.
# Attributes
observation_space : The observation_space of the agent, should have `rgb_uuid` or `depth_uuid` as
a component (otherwise it is a blind model).
output_size : The size of the embedding vector to produce.
"""
def __init__(
self,
observation_space: SpaceDict,
output_size: int,
rgb_uuid: Optional[str],
depth_uuid: Optional[str],
layer_channels: Sequence[int] = (32, 64, 32),
kernel_sizes: Sequence[Tuple[int, int]] = ((8, 8), (4, 4), (3, 3)),
layers_stride: Sequence[Tuple[int, int]] = ((4, 4), (2, 2), (1, 1)),
paddings: Sequence[Tuple[int, int]] = ((0, 0), (0, 0), (0, 0)),
dilations: Sequence[Tuple[int, int]] = ((1, 1), (1, 1), (1, 1)),
flatten: bool = True,
output_relu: bool = True,
):
"""Initializer.
# Parameters
observation_space : See class attributes documentation.
output_size : See class attributes documentation.
"""
super().__init__()
self.rgb_uuid = rgb_uuid
if self.rgb_uuid is not None:
assert self.rgb_uuid in observation_space.spaces
self._n_input_rgb = observation_space.spaces[self.rgb_uuid].shape[2]
assert self._n_input_rgb >= 0
else:
self._n_input_rgb = 0
self.depth_uuid = depth_uuid
if self.depth_uuid is not None:
assert self.depth_uuid in observation_space.spaces
self._n_input_depth = observation_space.spaces[self.depth_uuid].shape[2]
assert self._n_input_depth >= 0
else:
self._n_input_depth = 0
if not self.is_blind:
# hyperparameters for layers
self._cnn_layers_channels = list(layer_channels)
self._cnn_layers_kernel_size = list(kernel_sizes)
self._cnn_layers_stride = list(layers_stride)
self._cnn_layers_paddings = list(paddings)
self._cnn_layers_dilations = list(dilations)
if self._n_input_rgb > 0:
input_rgb_cnn_dims = np.array(
observation_space.spaces[self.rgb_uuid].shape[:2], dtype=np.float32
)
self.rgb_cnn = self.make_cnn_from_params(
output_size=output_size,
input_dims=input_rgb_cnn_dims,
input_channels=self._n_input_rgb,
flatten=flatten,
output_relu=output_relu,
)
if self._n_input_depth > 0:
input_depth_cnn_dims = np.array(
observation_space.spaces[self.depth_uuid].shape[:2],
dtype=np.float32,
)
self.depth_cnn = self.make_cnn_from_params(
output_size=output_size,
input_dims=input_depth_cnn_dims,
input_channels=self._n_input_depth,
flatten=flatten,
output_relu=output_relu,
)
def make_cnn_from_params(
self,
output_size: int,
input_dims: np.ndarray,
input_channels: int,
flatten: bool,
output_relu: bool,
) -> nn.Module:
output_dims = input_dims
for kernel_size, stride, padding, dilation in zip(
self._cnn_layers_kernel_size,
self._cnn_layers_stride,
self._cnn_layers_paddings,
self._cnn_layers_dilations,
):
# noinspection PyUnboundLocalVariable
output_dims = self._conv_output_dim(
dimension=output_dims,
padding=np.array(padding, dtype=np.float32),
dilation=np.array(dilation, dtype=np.float32),
kernel_size=np.array(kernel_size, dtype=np.float32),
stride=np.array(stride, dtype=np.float32),
)
# noinspection PyUnboundLocalVariable
cnn = make_cnn(
input_channels=input_channels,
layer_channels=self._cnn_layers_channels,
kernel_sizes=self._cnn_layers_kernel_size,
strides=self._cnn_layers_stride,
paddings=self._cnn_layers_paddings,
dilations=self._cnn_layers_dilations,
output_height=output_dims[0],
output_width=output_dims[1],
output_channels=output_size,
flatten=flatten,
output_relu=output_relu,
)
self.layer_init(cnn)
return cnn
@staticmethod
def _conv_output_dim(
dimension: Sequence[int],
padding: Sequence[int],
dilation: Sequence[int],
kernel_size: Sequence[int],
stride: Sequence[int],
) -> Tuple[int, ...]:
"""Calculates the output height and width based on the input height and
width to the convolution layer. For parameter definitions see.
[here](https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d).
# Parameters
dimension : See above link.
padding : See above link.
dilation : See above link.
kernel_size : See above link.
stride : See above link.
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
@staticmethod
def layer_init(cnn) -> None:
"""Initialize layer parameters using Kaiming normal."""
for layer in cnn:
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(layer.weight, nn.init.calculate_gain("relu"))
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
@property
def is_blind(self):
"""True if the observation space doesn't include `self.rgb_uuid` or
`self.depth_uuid`."""
return self._n_input_rgb + self._n_input_depth == 0
def forward(self, observations: Dict[str, torch.Tensor]): # type: ignore
if self.is_blind:
return None
def check_use_agent(new_setting):
if use_agent is not None:
assert (
use_agent is new_setting
), "rgb and depth must both use an agent dim or none"
return new_setting
cnn_output_list: List[torch.Tensor] = []
use_agent: Optional[bool] = None
if self.rgb_uuid is not None:
use_agent = check_use_agent(len(observations[self.rgb_uuid].shape) == 6)
cnn_output_list.append(
compute_cnn_output(self.rgb_cnn, observations[self.rgb_uuid])
)
if self.depth_uuid is not None:
use_agent = check_use_agent(len(observations[self.depth_uuid].shape) == 6)
cnn_output_list.append(
compute_cnn_output(self.depth_cnn, observations[self.depth_uuid])
)
if use_agent:
channels_dim = 3 # [step, sampler, agent, channel (, height, width)]
else:
channels_dim = 2 # [step, sampler, channel (, height, width)]
return torch.cat(cnn_output_list, dim=channels_dim)
class RNNStateEncoder(nn.Module):
"""A simple RNN-based model playing a role in many baseline embodied-
navigation agents.
See `seq_forward` for more details of how this model is used.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
rnn_type: str = "GRU",
trainable_masked_hidden_state: bool = False,
):
"""An RNN for encoding the state in RL. Supports masking the hidden
state during various timesteps in the forward lass.
# Parameters
input_size : The input size of the RNN.
hidden_size : The hidden size.
num_layers : The number of recurrent layers.
rnn_type : The RNN cell type. Must be GRU or LSTM.
trainable_masked_hidden_state : If `True` the initial hidden state (used at the start of a Task)
is trainable (as opposed to being a vector of zeros).
"""
super().__init__()
self._num_recurrent_layers = num_layers
self._rnn_type = rnn_type
self.rnn = getattr(torch.nn, rnn_type)(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers
)
self.trainable_masked_hidden_state = trainable_masked_hidden_state
if trainable_masked_hidden_state:
self.init_hidden_state = nn.Parameter(
0.1 * torch.randn((num_layers, 1, hidden_size)), requires_grad=True
)
self.layer_init()
def layer_init(self):
"""Initialize the RNN parameters in the model."""
for name, param in self.rnn.named_parameters():
if "weight" in name:
nn.init.orthogonal_(param)
elif "bias" in name:
nn.init.constant_(param, 0)
@property
def num_recurrent_layers(self) -> int:
"""The number of recurrent layers in the network."""
return self._num_recurrent_layers * (2 if "LSTM" in self._rnn_type else 1)
def _pack_hidden(
self, hidden_states: Union[torch.FloatTensor, Sequence[torch.FloatTensor]]
) -> torch.FloatTensor:
"""Stacks hidden states in an LSTM together (if using a GRU rather than
an LSTM this is just the identity).
# Parameters
hidden_states : The hidden states to (possibly) stack.
"""
if "LSTM" in self._rnn_type:
hidden_states = cast(
torch.FloatTensor,
torch.cat([hidden_states[0], hidden_states[1]], dim=0),
)
return cast(torch.FloatTensor, hidden_states)
def _unpack_hidden(
self, hidden_states: torch.FloatTensor
) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
"""Partial inverse of `_pack_hidden` (exact if there are 2 hidden
layers)."""
if "LSTM" in self._rnn_type:
new_hidden_states = (
hidden_states[0 : self._num_recurrent_layers],
hidden_states[self._num_recurrent_layers :],
)
return cast(Tuple[torch.FloatTensor, torch.FloatTensor], new_hidden_states)
return cast(torch.FloatTensor, hidden_states)
def _mask_hidden(
self,
hidden_states: Union[Tuple[torch.FloatTensor, ...], torch.FloatTensor],
masks: torch.FloatTensor,
) -> Union[Tuple[torch.FloatTensor, ...], torch.FloatTensor]:
"""Mask input hidden states given `masks`. Useful when masks represent
steps on which a task has completed.
# Parameters
hidden_states : The hidden states.
masks : Masks to apply to hidden states (see seq_forward).
# Returns
Masked hidden states. Here masked hidden states will be replaced with
either all zeros (if `trainable_masked_hidden_state` was False) and will
otherwise be a learnable collection of parameters.
"""
if not self.trainable_masked_hidden_state:
if isinstance(hidden_states, tuple):
hidden_states = tuple(
cast(torch.FloatTensor, v * masks) for v in hidden_states
)
else:
hidden_states = cast(torch.FloatTensor, masks * hidden_states)
else:
if isinstance(hidden_states, tuple):
# noinspection PyTypeChecker
hidden_states = tuple(
v * masks # type:ignore
+ (1.0 - masks) * (self.init_hidden_state.repeat(1, v.shape[1], 1)) # type: ignore
for v in hidden_states # type:ignore
) # type: ignore
else:
# noinspection PyTypeChecker
hidden_states = masks * hidden_states + (1 - masks) * ( # type: ignore
self.init_hidden_state.repeat(1, hidden_states.shape[1], 1)
)
return hidden_states
def single_forward(
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]
]:
"""Forward for a single-step input."""
(
x,
hidden_states,
masks,
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
) = self.adapt_input(x, hidden_states, masks)
unpacked_hidden_states = self._unpack_hidden(hidden_states)
x, unpacked_hidden_states = self.rnn(
x,
self._mask_hidden(
unpacked_hidden_states, cast(torch.FloatTensor, masks[0].view(1, -1, 1))
),
)
return self.adapt_result(
x,
self._pack_hidden(unpacked_hidden_states),
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
)
def adapt_input(
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor,
torch.FloatTensor,
torch.FloatTensor,
bool,
bool,
int,
int,
int,
]:
nsteps, nsamplers = masks.shape[:2]
assert len(hidden_states.shape) in [
3,
4,
], "hidden_states must be [layer, sampler, hidden] or [layer, sampler, agent, hidden]"
assert len(x.shape) in [
3,
4,
], "observations must be [step, sampler, data] or [step, sampler, agent, data]"
nagents = 1
mem_agent: bool
if len(hidden_states.shape) == 4: # [layer, sampler, agent, hidden]
mem_agent = True
nagents = hidden_states.shape[2]
else: # [layer, sampler, hidden]
mem_agent = False
obs_agent: bool
if len(x.shape) == 4: # [step, sampler, agent, dims]
obs_agent = True
else: # [step, sampler, dims]
obs_agent = False
# Flatten (nsamplers, nagents)
x = x.view(nsteps, nsamplers * nagents, -1) # type:ignore
masks = masks.expand(-1, -1, nagents).reshape( # type:ignore
nsteps, nsamplers * nagents
)
# Flatten (nsamplers, nagents) and remove step dim
hidden_states = hidden_states.view( # type:ignore
self.num_recurrent_layers, nsamplers * nagents, -1
)
# noinspection PyTypeChecker
return x, hidden_states, masks, mem_agent, obs_agent, nsteps, nsamplers, nagents
def adapt_result(
self,
outputs: torch.FloatTensor,
hidden_states: torch.FloatTensor,
mem_agent: bool,
obs_agent: bool,
nsteps: int,
nsamplers: int,
nagents: int,
) -> Tuple[
torch.FloatTensor, torch.FloatTensor,
]:
output_dims = (nsteps, nsamplers) + ((nagents, -1) if obs_agent else (-1,))
hidden_dims = (self.num_recurrent_layers, nsamplers) + (
(nagents, -1) if mem_agent else (-1,)
)
outputs = cast(torch.FloatTensor, outputs.view(*output_dims))
hidden_states = cast(torch.FloatTensor, hidden_states.view(*hidden_dims),)
return outputs, hidden_states
def seq_forward( # type: ignore
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]
]:
"""Forward for a sequence of length T.
# Parameters
x : (Steps, Samplers, Agents, -1) tensor.
hidden_states : The starting hidden states.
masks : A (Steps, Samplers, Agents) tensor.
The masks to be applied to hidden state at every timestep, equal to 0 whenever the previous step finalized
the task, 1 elsewhere.
"""
(
x,
hidden_states,
masks,
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
) = self.adapt_input(x, hidden_states, masks)
# steps in sequence which have zero for any episode. Assume t=0 has
# a zero in it.
has_zeros = (masks[1:] == 0.0).any(dim=-1).nonzero().squeeze().cpu()
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# handle scalar
has_zeros = [has_zeros.item() + 1] # type: ignore
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = cast(List[int], [0] + has_zeros + [nsteps])
unpacked_hidden_states = self._unpack_hidden(
cast(torch.FloatTensor, hidden_states)
)
outputs = []
for i in range(len(has_zeros) - 1):
# process steps that don't have any zeros in masks together
start_idx = int(has_zeros[i])
end_idx = int(has_zeros[i + 1])
# noinspection PyTypeChecker
rnn_scores, unpacked_hidden_states = self.rnn(
x[start_idx:end_idx],
self._mask_hidden(
unpacked_hidden_states,
cast(torch.FloatTensor, masks[start_idx].view(1, -1, 1)),
),
)
outputs.append(rnn_scores)
return self.adapt_result(
cast(torch.FloatTensor, torch.cat(outputs, dim=0)),
self._pack_hidden(unpacked_hidden_states),
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
)
def forward( # type: ignore
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]
]:
nsteps = masks.shape[0]
if nsteps == 1:
return self.single_forward(x, hidden_states, masks)
return self.seq_forward(x, hidden_states, masks)
class LinearActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
):
super().__init__(action_space=action_space, observation_space=observation_space)
assert (
input_uuid in observation_space.spaces
), "LinearActorCritic expects only a single observational input."
self.input_uuid = input_uuid
box_space: gym.spaces.Box = observation_space[self.input_uuid]
assert isinstance(box_space, gym.spaces.Box), (
"LinearActorCritic requires that"
"observation space corresponding to the input uuid is a Box space."
)
assert len(box_space.shape) == 1
self.in_dim = box_space.shape[0]
self.linear = nn.Linear(self.in_dim, action_space.n + 1)
nn.init.orthogonal_(self.linear.weight)
nn.init.constant_(self.linear.bias, 0)
# noinspection PyMethodMayBeStatic
def _recurrent_memory_specification(self):
return None
def forward(self, observations, memory, prev_actions, masks):
out = self.linear(observations[self.input_uuid])
# noinspection PyArgumentList
return (
ActorCriticOutput(
# ensure [steps, samplers, ...]
distributions=CategoricalDistr(logits=out[..., :-1]),
# ensure [steps, samplers, flattened]
values=cast(torch.FloatTensor, out[..., -1:].view(*out.shape[:2], -1)),
extras={},
),
None,
)
class RNNActorCritic(ActorCriticModel[Distr]):
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
hidden_size: int = 128,
num_layers: int = 1,
rnn_type: str = "GRU",
head_type: Callable[..., ActorCriticModel[Distr]] = LinearActorCritic,
):
super().__init__(action_space=action_space, observation_space=observation_space)
self.hidden_size = hidden_size
self.rnn_type = rnn_type
assert (
input_uuid in observation_space.spaces
), "LinearActorCritic expects only a single observational input."
self.input_uuid = input_uuid
box_space: gym.spaces.Box = observation_space[self.input_uuid]
assert isinstance(box_space, gym.spaces.Box), (
"RNNActorCritic requires that"
"observation space corresponding to the input uuid is a Box space."
)
assert len(box_space.shape) == 1
self.in_dim = box_space.shape[0]
self.state_encoder = RNNStateEncoder(
input_size=self.in_dim,
hidden_size=hidden_size,
num_layers=num_layers,
rnn_type=rnn_type,
trainable_masked_hidden_state=True,
)
self.head_uuid = "{}_{}".format("rnn", input_uuid)
self.ac_nonrecurrent_head: ActorCriticModel[Distr] = head_type(
input_uuid=self.head_uuid,
action_space=action_space,
observation_space=SpaceDict(
{
self.head_uuid: gym.spaces.Box(
low=np.float32(0.0), high=np.float32(1.0), shape=(hidden_size,)
)
}
),
)
self.memory_key = "rnn"
@property
def recurrent_hidden_state_size(self) -> int:
return self.hidden_size
@property
def num_recurrent_layers(self) -> int:
return self.state_encoder.num_recurrent_layers
def _recurrent_memory_specification(self):
return {
self.memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
}
def forward( # type:ignore
self,
observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]],
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
if self.memory_key not in memory:
get_logger().warning(
f"Key {self.memory_key} not found in memory,"
f" initializing this as all zeros."
)
obs = observations[self.input_uuid]
memory.check_append(
key=self.memory_key,
tensor=obs.new(
self.num_recurrent_layers,
obs.shape[1],
self.recurrent_hidden_state_size,
)
.float()
.zero_(),
sampler_dim=1,
)
rnn_out, mem_return = self.state_encoder(
x=observations[self.input_uuid],
hidden_states=memory.tensor(self.memory_key),
masks=masks,
)
# noinspection PyCallingNonCallable
out, _ = self.ac_nonrecurrent_head(
observations={self.head_uuid: rnn_out},
memory=None,
prev_actions=prev_actions,
masks=masks,
)
# noinspection PyArgumentList
return (
out,
memory.set_tensor(self.memory_key, mem_return),
)
| allenact-main | allenact/embodiedai/models/basic_models.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/joel99/habitat-pointnav-aux/blob/master/habitat_baselines/
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym.spaces.dict import Dict as SpaceDict
from allenact.utils.model_utils import Flatten
from allenact.utils.system import get_logger
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
groups=groups,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution."""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
resneXt = False
def __init__(
self, inplanes, planes, ngroups, stride=1, downsample=None, cardinality=1,
):
super(BasicBlock, self).__init__()
self.convs = nn.Sequential(
conv3x3(inplanes, planes, stride, groups=cardinality),
nn.GroupNorm(ngroups, planes),
nn.ReLU(True),
conv3x3(planes, planes, groups=cardinality),
nn.GroupNorm(ngroups, planes),
)
self.downsample = downsample
self.relu = nn.ReLU(True)
def forward(self, x):
residual = x
out = self.convs(x)
if self.downsample is not None:
residual = self.downsample(x)
return self.relu(out + residual)
def _build_bottleneck_branch(inplanes, planes, ngroups, stride, expansion, groups=1):
return nn.Sequential(
conv1x1(inplanes, planes),
nn.GroupNorm(ngroups, planes),
nn.ReLU(True),
conv3x3(planes, planes, stride, groups=groups),
nn.GroupNorm(ngroups, planes),
nn.ReLU(True),
conv1x1(planes, planes * expansion),
nn.GroupNorm(ngroups, planes * expansion),
)
class SE(nn.Module):
def __init__(self, planes, r=16):
super().__init__()
self.squeeze = nn.AdaptiveAvgPool2d(1)
self.excite = nn.Sequential(
nn.Linear(planes, int(planes / r)),
nn.ReLU(True),
nn.Linear(int(planes / r), planes),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
x = self.squeeze(x)
x = x.view(b, c)
x = self.excite(x)
return x.view(b, c, 1, 1)
def _build_se_branch(planes, r=16):
return SE(planes, r)
class Bottleneck(nn.Module):
expansion = 4
resneXt = False
def __init__(
self, inplanes, planes, ngroups, stride=1, downsample=None, cardinality=1,
):
super().__init__()
self.convs = _build_bottleneck_branch(
inplanes, planes, ngroups, stride, self.expansion, groups=cardinality,
)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def _impl(self, x):
identity = x
out = self.convs(x)
if self.downsample is not None:
identity = self.downsample(x)
return self.relu(out + identity)
def forward(self, x):
return self._impl(x)
class SEBottleneck(Bottleneck):
def __init__(
self, inplanes, planes, ngroups, stride=1, downsample=None, cardinality=1,
):
super().__init__(inplanes, planes, ngroups, stride, downsample, cardinality)
self.se = _build_se_branch(planes * self.expansion)
def _impl(self, x):
identity = x
out = self.convs(x)
out = self.se(out) * out
if self.downsample is not None:
identity = self.downsample(x)
return self.relu(out + identity)
class SEResNeXtBottleneck(SEBottleneck):
expansion = 2
resneXt = True
class ResNeXtBottleneck(Bottleneck):
expansion = 2
resneXt = True
class GroupNormResNet(nn.Module):
def __init__(self, in_channels, base_planes, ngroups, block, layers, cardinality=1):
super(GroupNormResNet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
base_planes,
kernel_size=7,
stride=2,
padding=3,
bias=False,
),
nn.GroupNorm(ngroups, base_planes),
nn.ReLU(True),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.cardinality = cardinality
self.inplanes = base_planes
if block.resneXt:
base_planes *= 2
self.layer1 = self._make_layer(block, ngroups, base_planes, layers[0])
self.layer2 = self._make_layer(
block, ngroups, base_planes * 2, layers[1], stride=2
)
self.layer3 = self._make_layer(
block, ngroups, base_planes * 2 * 2, layers[2], stride=2
)
self.layer4 = self._make_layer(
block, ngroups, base_planes * 2 * 2 * 2, layers[3], stride=2
)
self.final_channels = self.inplanes
self.final_spatial_compress = 1.0 / (2 ** 5)
def _make_layer(self, block, ngroups, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.GroupNorm(ngroups, planes * block.expansion),
)
layers = [
block(
self.inplanes,
planes,
ngroups,
stride,
downsample,
cardinality=self.cardinality,
)
]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, ngroups))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def gnresnet18(in_channels, base_planes, ngroups):
model = GroupNormResNet(in_channels, base_planes, ngroups, BasicBlock, [2, 2, 2, 2])
return model
def gnresnet50(in_channels, base_planes, ngroups):
model = GroupNormResNet(in_channels, base_planes, ngroups, Bottleneck, [3, 4, 6, 3])
return model
def gnresneXt50(in_channels, base_planes, ngroups):
model = GroupNormResNet(
in_channels,
base_planes,
ngroups,
ResNeXtBottleneck,
[3, 4, 6, 3],
cardinality=int(base_planes / 2),
)
return model
def se_gnresnet50(in_channels, base_planes, ngroups):
model = GroupNormResNet(
in_channels, base_planes, ngroups, SEBottleneck, [3, 4, 6, 3]
)
return model
def se_gnresneXt50(in_channels, base_planes, ngroups):
model = GroupNormResNet(
in_channels,
base_planes,
ngroups,
SEResNeXtBottleneck,
[3, 4, 6, 3],
cardinality=int(base_planes / 2),
)
return model
def se_gnresneXt101(in_channels, base_planes, ngroups):
model = GroupNormResNet(
in_channels,
base_planes,
ngroups,
SEResNeXtBottleneck,
[3, 4, 23, 3],
cardinality=int(base_planes / 2),
)
return model
class GroupNormResNetEncoder(nn.Module):
def __init__(
self,
observation_space: SpaceDict,
rgb_uuid: Optional[str],
depth_uuid: Optional[str],
output_size: int,
baseplanes=32,
ngroups=32,
make_backbone=None,
):
super().__init__()
self._inputs = []
self.rgb_uuid = rgb_uuid
if self.rgb_uuid is not None:
assert self.rgb_uuid in observation_space.spaces
self._n_input_rgb = observation_space.spaces[self.rgb_uuid].shape[2]
assert self._n_input_rgb >= 0
self._inputs.append(self.rgb_uuid)
else:
self._n_input_rgb = 0
self.depth_uuid = depth_uuid
if self.depth_uuid is not None:
assert self.depth_uuid in observation_space.spaces
self._n_input_depth = observation_space.spaces[self.depth_uuid].shape[2]
assert self._n_input_depth >= 0
self._inputs.append(self.depth_uuid)
else:
self._n_input_depth = 0
if not self.is_blind:
spatial_size = (
observation_space.spaces[self._inputs[0]].shape[0] // 2
) # H (=W) / 2
# RGBD into one model
input_channels = self._n_input_rgb + self._n_input_depth # C
self.backbone = make_backbone(input_channels, baseplanes, ngroups)
final_spatial = int(
np.ceil(spatial_size * self.backbone.final_spatial_compress)
) # fix bug in habitat that uses int()
after_compression_flat_size = 2048
num_compression_channels = int(
round(after_compression_flat_size / (final_spatial ** 2))
)
self.compression = nn.Sequential(
nn.Conv2d(
self.backbone.final_channels,
num_compression_channels,
kernel_size=3,
padding=1,
bias=False,
),
nn.GroupNorm(1, num_compression_channels),
nn.ReLU(True),
)
self.output_shape = (
num_compression_channels,
final_spatial,
final_spatial,
)
self.head = nn.Sequential(
Flatten(),
nn.Linear(np.prod(self.output_shape), output_size),
nn.ReLU(True),
)
self.layer_init()
@property
def is_blind(self):
return self._n_input_rgb + self._n_input_depth == 0
def layer_init(self):
for layer in self.modules():
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(layer.weight, nn.init.calculate_gain("relu"))
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
get_logger().debug("Initializing resnet encoder")
def forward(self, observations):
if self.is_blind:
return None
# TODO: the reshape follows compute_cnn_output()
# but it's hard to make the forward as a nn.Module as cnn param
nagents: Optional[int] = None
nsteps: Optional[int] = None
nsamplers: Optional[int] = None
assert len(self._inputs) > 0
cnn_input = []
for mode in self._inputs:
mode_obs = observations[mode]
assert len(mode_obs.shape) in [
5,
6,
], "CNN input must have shape [STEP, SAMPLER, (AGENT,) dim1, dim2, dim3]"
if len(mode_obs.shape) == 6:
nsteps, nsamplers, nagents = mode_obs.shape[:3]
else:
nsteps, nsamplers = mode_obs.shape[:2]
# Make FLAT_BATCH = nsteps * nsamplers (* nagents)
mode_obs = mode_obs.view(
(-1,) + mode_obs.shape[2 + int(nagents is not None) :]
)
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
mode_obs = mode_obs.permute(0, 3, 1, 2)
cnn_input.append(mode_obs)
x = torch.cat(cnn_input, dim=1)
x = F.avg_pool2d(x, 2) # 2x downsampling
x = self.backbone(x) # (256, 4, 4)
x = self.compression(x) # (128, 4, 4)
x = self.head(x) # (2048) -> (hidden_size)
if nagents is not None:
x = x.reshape((nsteps, nsamplers, nagents,) + x.shape[1:])
else:
x = x.reshape((nsteps, nsamplers,) + x.shape[1:])
return x
| allenact-main | allenact/embodiedai/models/resnet.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/joel99/habitat-pointnav-aux/blob/master/habitat_baselines/
import math
from typing import Tuple
import torch
import torch.nn as nn
class Fusion(nn.Module):
"""Base class of belief fusion model from Auxiliary Tasks Speed Up Learning
PointGoal Navigation (Ye, 2020) Child class should implement
`get_belief_weights` function to generate weights to fuse the beliefs from
all the auxiliary task into one."""
def __init__(self, hidden_size, obs_embed_size, num_tasks):
super().__init__()
self.hidden_size = hidden_size # H
self.obs_embed_size = obs_embed_size # Z
self.num_tasks = num_tasks # k
def forward(
self,
all_beliefs: torch.FloatTensor, # (T, N, H, K)
obs_embeds: torch.FloatTensor, # (T, N, Z)
) -> Tuple[torch.FloatTensor, torch.FloatTensor]: # (T, N, H), (T, N, K)
num_steps, num_samplers, _, _ = all_beliefs.shape
all_beliefs = all_beliefs.view(
num_steps * num_samplers, self.hidden_size, self.num_tasks
)
obs_embeds = obs_embeds.view(num_steps * num_samplers, -1)
weights = self.get_belief_weights(
all_beliefs=all_beliefs, obs_embeds=obs_embeds, # (T*N, H, K) # (T*N, Z)
).unsqueeze(
-1
) # (T*N, K, 1)
beliefs = torch.bmm(all_beliefs, weights) # (T*N, H, 1)
beliefs = beliefs.squeeze(-1).view(num_steps, num_samplers, self.hidden_size)
weights = weights.squeeze(-1).view(num_steps, num_samplers, self.num_tasks)
return beliefs, weights
def get_belief_weights(
self,
all_beliefs: torch.FloatTensor, # (T*N, H, K)
obs_embeds: torch.FloatTensor, # (T*N, Z)
) -> torch.FloatTensor: # (T*N, K)
raise NotImplementedError()
class AverageFusion(Fusion):
UUID = "avg"
def get_belief_weights(
self,
all_beliefs: torch.FloatTensor, # (T*N, H, K)
obs_embeds: torch.FloatTensor, # (T*N, Z)
) -> torch.FloatTensor: # (T*N, K)
batch_size = all_beliefs.shape[0]
weights = torch.ones(batch_size, self.num_tasks).to(all_beliefs)
weights /= self.num_tasks
return weights
class SoftmaxFusion(Fusion):
"""Situational Fusion of Visual Representation for Visual Navigation
https://arxiv.org/abs/1908.09073."""
UUID = "smax"
def __init__(self, hidden_size, obs_embed_size, num_tasks):
super().__init__(hidden_size, obs_embed_size, num_tasks)
# mapping from rnn input to task
# ignore beliefs
self.linear = nn.Linear(obs_embed_size, num_tasks)
def get_belief_weights(
self,
all_beliefs: torch.Tensor, # (T*N, H, K)
obs_embeds: torch.Tensor, # (T*N, Z)
) -> torch.Tensor: # (T*N, K)
scores = self.linear(obs_embeds) # (T*N, K)
weights = torch.softmax(scores, dim=-1)
return weights
class AttentiveFusion(Fusion):
"""Attention is All You Need https://arxiv.org/abs/1706.03762 i.e. scaled
dot-product attention."""
UUID = "attn"
def __init__(self, hidden_size, obs_embed_size, num_tasks):
super().__init__(hidden_size, obs_embed_size, num_tasks)
self.linear = nn.Linear(obs_embed_size, hidden_size)
def get_belief_weights(
self,
all_beliefs: torch.Tensor, # (T*N, H, K)
obs_embeds: torch.Tensor, # (T*N, Z)
) -> torch.Tensor: # (T*N, K)
queries = self.linear(obs_embeds).unsqueeze(1) # (T*N, 1, H)
scores = torch.bmm(queries, all_beliefs).squeeze(1) # (T*N, K)
weights = torch.softmax(
scores / math.sqrt(self.hidden_size), dim=-1
) # (T*N, K)
return weights
| allenact-main | allenact/embodiedai/models/fusion_models.py |
from collections import OrderedDict
from typing import Tuple, Dict, Optional, List, Sequence
from typing import TypeVar
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
LinearCriticHead,
LinearActorHead,
ObservationType,
DistributionType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact.embodiedai.aux_losses.losses import MultiAuxTaskNegEntropyLoss
from allenact.embodiedai.models.aux_models import AuxiliaryModel
from allenact.embodiedai.models.basic_models import RNNStateEncoder
from allenact.embodiedai.models.fusion_models import Fusion
from allenact.utils.model_utils import FeatureEmbedding
from allenact.utils.system import get_logger
FusionType = TypeVar("FusionType", bound=Fusion)
class VisualNavActorCritic(ActorCriticModel[CategoricalDistr]):
"""Base class of visual navigation / manipulation (or broadly, embodied AI)
model.
`forward_encoder` function requires implementation.
"""
action_space: gym.spaces.Discrete
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
hidden_size=512,
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[List[str]] = None,
auxiliary_model_class=AuxiliaryModel,
):
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
assert multiple_beliefs == (beliefs_fusion is not None)
self.multiple_beliefs = multiple_beliefs
self.beliefs_fusion = beliefs_fusion
self.auxiliary_uuids = auxiliary_uuids
if isinstance(self.auxiliary_uuids, list) and len(self.auxiliary_uuids) == 0:
self.auxiliary_uuids = None
# Define the placeholders in init function
self.state_encoders: Optional[nn.ModuleDict] = None
self.aux_models: Optional[nn.ModuleDict] = None
self.actor: Optional[LinearActorHead] = None
self.critic: Optional[LinearCriticHead] = None
self.prev_action_embedder: Optional[FeatureEmbedding] = None
self.fusion_model: Optional[nn.Module] = None
self.belief_names: Optional[Sequence[str]] = None
self.auxiliary_model_class = auxiliary_model_class
def create_state_encoders(
self,
obs_embed_size: int,
prev_action_embed_size: int,
num_rnn_layers: int,
rnn_type: str,
add_prev_actions: bool,
add_prev_action_null_token: bool,
trainable_masked_hidden_state=False,
):
rnn_input_size = obs_embed_size
self.prev_action_embedder = FeatureEmbedding(
input_size=int(add_prev_action_null_token) + self.action_space.n,
output_size=prev_action_embed_size if add_prev_actions else 0,
)
if add_prev_actions:
rnn_input_size += prev_action_embed_size
state_encoders = OrderedDict() # perserve insertion order in py3.6
if self.multiple_beliefs: # multiple belief model
for aux_uuid in self.auxiliary_uuids:
state_encoders[aux_uuid] = RNNStateEncoder(
rnn_input_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
trainable_masked_hidden_state=trainable_masked_hidden_state,
)
# create fusion model
self.fusion_model = self.beliefs_fusion(
hidden_size=self._hidden_size,
obs_embed_size=obs_embed_size,
num_tasks=len(self.auxiliary_uuids),
)
else: # single belief model
state_encoders["single_belief"] = RNNStateEncoder(
rnn_input_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
trainable_masked_hidden_state=trainable_masked_hidden_state,
)
self.state_encoders = nn.ModuleDict(state_encoders)
self.belief_names = list(self.state_encoders.keys())
get_logger().info(
"there are {} belief models: {}".format(
len(self.belief_names), self.belief_names
)
)
def load_state_dict(self, state_dict, **kwargs):
new_state_dict = OrderedDict()
for key in state_dict.keys():
if "state_encoder." in key: # old key name
new_key = key.replace("state_encoder.", "state_encoders.single_belief.")
elif "goal_visual_encoder.embed_class" in key:
new_key = key.replace(
"goal_visual_encoder.embed_class", "goal_visual_encoder.embed_goal"
)
else:
new_key = key
new_state_dict[new_key] = state_dict[key]
return super().load_state_dict(new_state_dict, **kwargs) # compatible in keys
def create_actorcritic_head(self):
self.actor = LinearActorHead(self._hidden_size, self.action_space.n)
self.critic = LinearCriticHead(self._hidden_size)
def create_aux_models(self, obs_embed_size: int, action_embed_size: int):
if self.auxiliary_uuids is None:
return
aux_models = OrderedDict()
for aux_uuid in self.auxiliary_uuids:
aux_models[aux_uuid] = self.auxiliary_model_class(
aux_uuid=aux_uuid,
action_dim=self.action_space.n,
obs_embed_dim=obs_embed_size,
belief_dim=self._hidden_size,
action_embed_size=action_embed_size,
)
self.aux_models = nn.ModuleDict(aux_models)
@property
def num_recurrent_layers(self):
"""Number of recurrent hidden layers."""
return list(self.state_encoders.values())[0].num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
"""The recurrent hidden state size of a single model."""
return self._hidden_size
def _recurrent_memory_specification(self):
return {
memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
for memory_key in self.belief_names
}
def forward_encoder(self, observations: ObservationType) -> torch.FloatTensor:
raise NotImplementedError("Obs Encoder Not Implemented")
def fuse_beliefs(
self, beliefs_dict: Dict[str, torch.FloatTensor], obs_embeds: torch.FloatTensor,
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
all_beliefs = torch.stack(list(beliefs_dict.values()), dim=-1) # (T, N, H, k)
if self.multiple_beliefs: # call the fusion model
return self.fusion_model(all_beliefs=all_beliefs, obs_embeds=obs_embeds)
# single belief
beliefs = all_beliefs.squeeze(-1) # (T,N,H)
return beliefs, None
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
"""Processes input batched observations to produce new actor and critic
values. Processes input batched observations (along with prior hidden
states, previous actions, and masks denoting which recurrent hidden
states should be masked) and returns an `ActorCriticOutput` object
containing the model's policy (distribution over actions) and
evaluation of the current state (value).
# Parameters
observations : Batched input observations.
memory : `Memory` containing the hidden states from initial timepoints.
prev_actions : Tensor of previous actions taken.
masks : Masks applied to hidden states. See `RNNStateEncoder`.
# Returns
Tuple of the `ActorCriticOutput` and recurrent hidden state.
"""
# 1.1 use perception model (i.e. encoder) to get observation embeddings
obs_embeds = self.forward_encoder(observations)
# 1.2 use embedding model to get prev_action embeddings
if self.prev_action_embedder.input_size == self.action_space.n + 1:
# In this case we have a unique embedding for the start of an episode
prev_actions_embeds = self.prev_action_embedder(
torch.where(
condition=0 != masks.view(*prev_actions.shape),
input=prev_actions + 1,
other=torch.zeros_like(prev_actions),
)
)
else:
prev_actions_embeds = self.prev_action_embedder(prev_actions)
joint_embeds = torch.cat((obs_embeds, prev_actions_embeds), dim=-1) # (T, N, *)
# 2. use RNNs to get single/multiple beliefs
beliefs_dict = {}
for key, model in self.state_encoders.items():
beliefs_dict[key], rnn_hidden_states = model(
joint_embeds, memory.tensor(key), masks
)
memory.set_tensor(key, rnn_hidden_states) # update memory here
# 3. fuse beliefs for multiple belief models
beliefs, task_weights = self.fuse_beliefs(
beliefs_dict, obs_embeds
) # fused beliefs
# 4. prepare output
extras = (
{
aux_uuid: {
"beliefs": (
beliefs_dict[aux_uuid] if self.multiple_beliefs else beliefs
),
"obs_embeds": obs_embeds,
"aux_model": (
self.aux_models[aux_uuid]
if aux_uuid in self.aux_models
else None
),
}
for aux_uuid in self.auxiliary_uuids
}
if self.auxiliary_uuids is not None
else {}
)
if self.multiple_beliefs:
extras[MultiAuxTaskNegEntropyLoss.UUID] = task_weights
actor_critic_output = ActorCriticOutput(
distributions=self.actor(beliefs),
values=self.critic(beliefs),
extras=extras,
)
return actor_critic_output, memory
| allenact-main | allenact/embodiedai/models/visual_nav_models.py |
allenact-main | allenact/embodiedai/storage/__init__.py |
|
import math
import random
from collections import defaultdict
from typing import Union, Tuple, Optional, Dict, Callable, cast, Sequence
import torch
import torch.nn.functional as F
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.algorithms.onpolicy_sync.storage import (
MiniBatchStorageMixin,
ExperienceStorage,
)
from allenact.base_abstractions.misc import (
GenericAbstractLoss,
ModelType,
Memory,
LossOutput,
)
from allenact.utils.misc_utils import unzip, partition_sequence
def _index_recursive(d: Dict, key: Union[str, Tuple[str, ...]]):
if isinstance(key, str):
return d[key]
for k in key:
d = d[k]
return d
class InverseDynamicsVDRLoss(GenericAbstractLoss):
def __init__(
self,
compute_action_logits_fn: Callable,
img0_key: str,
img1_key: str,
action_key: str,
):
self.compute_action_logits_fn = compute_action_logits_fn
self.img0_key = img0_key
self.img1_key = img1_key
self.action_key = action_key
def loss(
self,
*,
model: ModelType,
batch: ObservationType,
batch_memory: Memory,
stream_memory: Memory,
) -> LossOutput:
action_logits = self.compute_action_logits_fn(
model=model, img0=batch[self.img0_key], img1=batch[self.img1_key],
)
loss = F.cross_entropy(action_logits, target=batch[self.action_key])
return LossOutput(
value=loss,
info={"cross_entropy": loss.item()},
per_epoch_info={},
batch_memory=batch_memory,
stream_memory=stream_memory,
bsize=int(batch[self.img0_key].shape[0]),
)
class DiscreteVisualDynamicsReplayStorage(ExperienceStorage, MiniBatchStorageMixin):
def __init__(
self,
image_uuid: Union[str, Tuple[str, ...]],
action_success_uuid: Optional[Union[str, Tuple[str, ...]]],
nactions: int,
num_to_store_per_action: int,
max_to_save_per_episode: int,
target_batch_size: int,
extra_targets: Optional[Sequence] = None,
):
self.image_uuid = image_uuid
self.action_success_uuid = action_success_uuid
self.nactions = nactions
self.num_to_store_per_action = num_to_store_per_action
self.max_to_save_per_episode = max_to_save_per_episode
self.target_batch_size = target_batch_size
self.extra_targets = extra_targets if extra_targets is not None else []
self._prev_imgs: Optional[torch.Tensor] = None
self.action_to_saved_transitions = {i: [] for i in range(nactions)}
self.action_to_num_seen = {i: 0 for i in range(nactions)}
self.task_sampler_to_actions_already_sampled = defaultdict(lambda: set())
self.device = torch.device("cpu")
self._total_samples_returned_in_batches = 0
@property
def total_experiences(self):
return self._total_samples_returned_in_batches
def set_partition(self, index: int, num_parts: int):
self.num_to_store_per_action = math.ceil(
self.num_to_store_per_action / num_parts
)
self.target_batch_size = math.ceil(self.target_batch_size / num_parts)
def initialize(self, *, observations: ObservationType, **kwargs):
self._prev_imgs = None
self.add(observations=observations, actions=None, masks=None)
def batched_experience_generator(self, num_mini_batch: int):
triples = [
(i0, a, i1)
for a, v in self.action_to_saved_transitions.items()
for (i0, i1) in v
]
random.shuffle(triples)
if len(triples) == 0:
return
parts = partition_sequence(
triples, math.ceil(len(triples) / self.target_batch_size)
)
for part in parts:
img0s, actions, img1s = unzip(part, n=3)
img0 = torch.stack([i0.to(self.device) for i0 in img0s], 0)
action = torch.tensor(actions, device=self.device)
img1 = torch.stack([i1.to(self.device) for i1 in img1s], 0)
self._total_samples_returned_in_batches += img0.shape[0]
yield {"img0": img0, "action": action, "img1": img1}
def add(
self,
*,
observations: ObservationType,
actions: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
**kwargs,
):
cur_imgs = cast(
torch.Tensor, _index_recursive(d=observations, key=self.image_uuid).cpu()
)
if self._prev_imgs is not None:
actions = actions.view(-1).cpu().numpy()
masks = masks.view(-1).cpu().numpy()
if self.action_success_uuid is not None:
action_successes = (
observations[self.action_success_uuid].cpu().view(-1).numpy()
)
else:
action_successes = [True] * actions.shape[0]
extra = {}
for et in self.extra_targets:
extra[et] = observations[et][0].cpu().numpy()
nsamplers = actions.shape[0]
assert nsamplers == masks.shape[0]
for i, (a, m, action_success) in enumerate(
zip(actions, masks, action_successes)
):
actions_already_sampled_in_ep = self.task_sampler_to_actions_already_sampled[
i
]
if (
m != 0
and action_success
and (
len(actions_already_sampled_in_ep)
<= self.max_to_save_per_episode
)
and a not in actions_already_sampled_in_ep
): # Not the start of a new episode/task -> self._prev_imgs[i] corresponds to cur_imgs[i]
saved_transitions = self.action_to_saved_transitions[a]
if len(saved_transitions) < self.num_to_store_per_action:
saved_transitions.append((self._prev_imgs[i], cur_imgs[i]))
else:
saved_transitions[
random.randint(0, len(saved_transitions) - 1)
] = (
self._prev_imgs[i],
cur_imgs[i],
)
# Reservoir sampling transitions
# a = int(a)
# saved_transitions = self.action_to_saved_transitions[a]
# num_seen = self.action_to_num_seen[a]
# if num_seen < self.triples_to_save_per_action:
# saved_transitions.append((self._prev_imgs[i], cur_imgs[i]))
# else:
# index = random.randint(0, num_seen)
# if index < self.triples_to_save_per_action:
# saved_transitions[index] = (self._prev_imgs[i], cur_imgs[i])
actions_already_sampled_in_ep.add(a)
self.action_to_num_seen[a] += 1
else:
actions_already_sampled_in_ep.clear()
self._prev_imgs = cur_imgs
def before_updates(self, **kwargs):
pass
def after_updates(self, **kwargs):
pass
def to(self, device: torch.device):
self.device = device
| allenact-main | allenact/embodiedai/storage/vdr_storage.py |
allenact-main | allenact/embodiedai/aux_losses/__init__.py |
|
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Defining the auxiliary loss for actor critic type models.
Several of the losses defined in this file are modified versions of those found in
https://github.com/joel99/habitat-pointnav-aux/blob/master/habitat_baselines/
"""
import abc
from typing import Dict, cast, Tuple, Sequence
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
def _bernoulli_subsample_mask_like(masks, p=0.1):
return (torch.rand_like(masks) <= p).float()
class MultiAuxTaskNegEntropyLoss(AbstractActorCriticLoss):
"""Used in multiple auxiliary tasks setting.
Add a negative entropy loss over all the task weights.
"""
UUID = "multitask_entropy" # make sure this is unique
def __init__(self, task_names: Sequence[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_tasks = len(task_names)
self.task_names = task_names
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
) -> Tuple[torch.FloatTensor, Dict[str, float]]:
task_weights = actor_critic_output.extras[self.UUID]
task_weights = task_weights.view(-1, self.num_tasks)
entropy = CategoricalDistr(task_weights).entropy()
avg_loss = (-entropy).mean()
avg_task_weights = task_weights.mean(dim=0) # (K)
outputs = {"entropy_loss": cast(torch.Tensor, avg_loss).item()}
for i in range(self.num_tasks):
outputs["weight_" + self.task_names[i]] = cast(
torch.Tensor, avg_task_weights[i]
).item()
return (
avg_loss,
outputs,
)
class AuxiliaryLoss(AbstractActorCriticLoss):
"""Base class of auxiliary loss.
Any auxiliary task loss should inherit from it, and implement the
`get_aux_loss` function.
"""
def __init__(self, auxiliary_uuid: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auxiliary_uuid = auxiliary_uuid
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, float]]:
# auxiliary loss
return self.get_aux_loss(
**actor_critic_output.extras[self.auxiliary_uuid],
observations=batch["observations"],
actions=batch["actions"],
masks=batch["masks"],
)
@abc.abstractmethod
def get_aux_loss(
self,
aux_model: nn.Module,
observations: ObservationType,
obs_embeds: torch.Tensor,
actions: torch.Tensor,
beliefs: torch.Tensor,
masks: torch.Tensor,
*args,
**kwargs,
):
raise NotImplementedError()
def _propagate_final_beliefs_to_all_steps(
beliefs: torch.Tensor, masks: torch.Tensor, num_sampler: int, num_steps: int,
):
final_beliefs = torch.zeros_like(beliefs) # (T, B, *)
start_locs_list = []
end_locs_list = []
for i in range(num_sampler):
# right shift: to locate the 1 before 0 and ignore the 1st element
end_locs = torch.where(masks[1:, i] == 0)[0] # maybe [], dtype=torch.Long
start_locs = torch.cat(
[torch.tensor([0]).to(end_locs), end_locs + 1]
) # add the first element
start_locs_list.append(start_locs)
end_locs = torch.cat(
[end_locs, torch.tensor([num_steps - 1]).to(end_locs)]
) # add the last element
end_locs_list.append(end_locs)
for st, ed in zip(start_locs, end_locs):
final_beliefs[st : ed + 1, i] = beliefs[ed, i]
return final_beliefs, start_locs_list, end_locs_list
class InverseDynamicsLoss(AuxiliaryLoss):
"""Auxiliary task of Inverse Dynamics from Auxiliary Tasks Speed Up
Learning PointGoal Navigation (Ye, 2020) https://arxiv.org/abs/2007.04561
originally from Curiosity-driven Exploration by Self-supervised Prediction
(Pathak, 2017) https://arxiv.org/abs/1705.05363."""
UUID = "InvDyn"
def __init__(
self, subsample_rate: float = 0.2, subsample_min_num: int = 10, *args, **kwargs
):
"""Subsample the valid samples by the rate of `subsample_rate`, if the
total num of the valid samples is larger than `subsample_min_num`."""
super().__init__(auxiliary_uuid=self.UUID, *args, **kwargs)
self.cross_entropy_loss = nn.CrossEntropyLoss(reduction="none")
self.subsample_rate = subsample_rate
self.subsample_min_num = subsample_min_num
def get_aux_loss(
self,
aux_model: nn.Module,
observations: ObservationType,
obs_embeds: torch.FloatTensor,
actions: torch.FloatTensor,
beliefs: torch.FloatTensor,
masks: torch.FloatTensor,
*args,
**kwargs,
):
## we discard the last action in the batch
num_steps, num_sampler = actions.shape # T, B
actions = cast(torch.LongTensor, actions)
actions = actions[:-1] # (T-1, B)
## find the final belief state based on masks
# we did not compute loss here as model.forward is compute-heavy
masks = masks.squeeze(-1) # (T, B)
final_beliefs, _, _ = _propagate_final_beliefs_to_all_steps(
beliefs, masks, num_sampler, num_steps,
)
## compute CE loss
decoder_in = torch.cat(
[obs_embeds[:-1], obs_embeds[1:], final_beliefs[:-1]], dim=2
) # (T-1, B, *)
preds = aux_model(decoder_in) # (T-1, B, A)
# cross entropy loss require class dim at 1
loss = self.cross_entropy_loss(
preds.view((num_steps - 1) * num_sampler, -1), # ((T-1)*B, A)
actions.flatten(), # ((T-1)*B,)
)
loss = loss.view(num_steps - 1, num_sampler) # (T-1, B)
# def vanilla_valid_losses(loss, num_sampler, end_locs_batch):
# ## this is just used to verify the vectorized version works correctly.
# ## not used for experimentation
# valid_losses = []
# for i in range(num_sampler):
# end_locs = end_locs_batch[i]
# for j in range(len(end_locs)):
# if j == 0:
# start_loc = 0
# else:
# start_loc = end_locs[j - 1] + 1
# end_loc = end_locs[j]
# if end_loc - start_loc <= 0: # the episode only 1-step
# continue
# valid_losses.append(loss[start_loc:end_loc, i])
# if len(valid_losses) == 0:
# valid_losses = torch.zeros(1, dtype=torch.float).to(loss)
# else:
# valid_losses = torch.cat(valid_losses) # (sum m, )
# return valid_losses
# valid_losses = masks[1:] * loss # (T-1, B)
# valid_losses0 = vanilla_valid_losses(loss, num_sampler, end_locs_batch)
# assert valid_losses0.sum() == valid_losses.sum()
num_valid_losses = torch.count_nonzero(masks[1:])
if num_valid_losses < self.subsample_min_num: # don't subsample
subsample_rate = 1.0
else:
subsample_rate = self.subsample_rate
loss_masks = masks[1:] * _bernoulli_subsample_mask_like(
masks[1:], subsample_rate
)
num_valid_losses = torch.count_nonzero(loss_masks)
avg_loss = (loss * loss_masks).sum() / torch.clamp(num_valid_losses, min=1.0)
return (
avg_loss,
{"total": cast(torch.Tensor, avg_loss).item(),},
)
class TemporalDistanceLoss(AuxiliaryLoss):
"""Auxiliary task of Temporal Distance from Auxiliary Tasks Speed Up
Learning PointGoal Navigation (Ye, 2020)
https://arxiv.org/abs/2007.04561."""
UUID = "TempDist"
def __init__(self, num_pairs: int = 8, epsiode_len_min: int = 5, *args, **kwargs):
super().__init__(auxiliary_uuid=self.UUID, *args, **kwargs)
self.num_pairs = num_pairs
self.epsiode_len_min = float(epsiode_len_min)
def get_aux_loss(
self,
aux_model: nn.Module,
observations: ObservationType,
obs_embeds: torch.FloatTensor,
actions: torch.FloatTensor,
beliefs: torch.FloatTensor,
masks: torch.FloatTensor,
*args,
**kwargs,
):
## we discard the last action in the batch
num_steps, num_sampler = actions.shape # T, B
## find the final belief state based on masks
# we did not compute loss here as model.forward is compute-heavy
masks = masks.squeeze(-1) # (T, B)
(
final_beliefs,
start_locs_list,
end_locs_list,
) = _propagate_final_beliefs_to_all_steps(
beliefs, masks, num_sampler, num_steps,
)
## also find the locs_batch of shape (M, 3)
# the last dim: [0] is on num_sampler loc, [1] and [2] is start and end locs
# of one episode
# in other words: at locs_batch[m, 0] in num_sampler dim, there exists one episode
# starting from locs_batch[m, 1], ends at locs_batch[m, 2] (included)
locs_batch = []
for i in range(num_sampler):
locs_batch.append(
torch.stack(
[
i * torch.ones_like(start_locs_list[i]),
start_locs_list[i],
end_locs_list[i],
],
dim=-1,
)
) # shape (M[i], 3)
locs_batch = torch.cat(locs_batch) # shape (M, 3)
temporal_dist_max = (
locs_batch[:, 2] - locs_batch[:, 1]
).float() # end - start, (M)
# create normalizer that ignores too short episode, otherwise 1/T
normalizer = torch.where(
temporal_dist_max > self.epsiode_len_min,
1.0 / temporal_dist_max,
torch.tensor([0]).to(temporal_dist_max),
) # (M)
# sample valid pairs: sampled_pairs shape (M, num_pairs, 3)
# where M is the num of total episodes in the batch
locs = locs_batch.cpu().numpy() # as torch.randint only support int, not tensor
sampled_pairs = np.random.randint(
np.repeat(locs[:, [1]], 2 * self.num_pairs, axis=-1), # (M, 2*k)
np.repeat(locs[:, [2]] + 1, 2 * self.num_pairs, axis=-1), # (M, 2*k)
).reshape(
(-1, self.num_pairs, 2)
) # (M, k, 2)
sampled_pairs_batch = torch.from_numpy(sampled_pairs).to(
locs_batch
) # (M, k, 2)
num_sampler_batch = locs_batch[:, [0]].expand(
-1, 2 * self.num_pairs
) # (M, 1) -> (M, 2*k)
num_sampler_batch = num_sampler_batch.reshape(
-1, self.num_pairs, 2
) # (M, k, 2)
sampled_obs_embeds = obs_embeds[
sampled_pairs_batch, num_sampler_batch
] # (M, k, 2, H1)
sampled_final_beliefs = final_beliefs[
sampled_pairs_batch, num_sampler_batch
] # (M, k, 2, H2)
features = torch.cat(
[
sampled_obs_embeds[:, :, 0],
sampled_obs_embeds[:, :, 1],
sampled_final_beliefs[:, :, 0],
],
dim=-1,
) # (M, k, 2*H1 + H2)
pred_temp_dist = aux_model(features).squeeze(-1) # (M, k)
true_temp_dist = (
sampled_pairs_batch[:, :, 1] - sampled_pairs_batch[:, :, 0]
).float() # (M, k)
pred_error = (pred_temp_dist - true_temp_dist) * normalizer.unsqueeze(1)
loss = 0.5 * (pred_error).pow(2)
avg_loss = loss.mean()
return (
avg_loss,
{"total": cast(torch.Tensor, avg_loss).item(),},
)
class CPCALoss(AuxiliaryLoss):
"""Auxiliary task of CPC|A from Auxiliary Tasks Speed Up Learning PointGoal
Navigation (Ye, 2020) https://arxiv.org/abs/2007.04561 originally from
Neural Predictive Belief Representations (Guo, 2018)
https://arxiv.org/abs/1811.06407."""
UUID = "CPCA"
def __init__(
self, planning_steps: int = 8, subsample_rate: float = 0.2, *args, **kwargs
):
super().__init__(auxiliary_uuid=self.UUID, *args, **kwargs)
self.planning_steps = planning_steps
self.subsample_rate = subsample_rate
self.cross_entropy_loss = nn.BCEWithLogitsLoss(reduction="none")
def get_aux_loss(
self,
aux_model: nn.Module,
observations: ObservationType,
obs_embeds: torch.Tensor,
actions: torch.Tensor,
beliefs: torch.Tensor,
masks: torch.Tensor,
*args,
**kwargs,
):
# prepare for autoregressive inputs: c_{t+1:t+k} = GRU(b_t, a_{t:t+k-1}) <-> z_{t+k}
## where b_t = RNN(b_{t-1}, z_t, a_{t-1}), prev action is optional
num_steps, num_sampler, obs_embed_size = obs_embeds.shape # T, N, H_O
assert 0 < self.planning_steps <= num_steps
## prepare positive and negatives that sample from all the batch
positives = obs_embeds # (T, N, -1)
negative_inds = torch.randperm(num_steps * num_sampler).to(positives.device)
negatives = torch.gather( # input[index[i,j]][j]
positives.view(num_steps * num_sampler, -1),
dim=0,
index=negative_inds.view(num_steps * num_sampler, 1).expand(
num_steps * num_sampler, positives.shape[-1]
),
).view(
num_steps, num_sampler, -1
) # (T, N, -1)
## prepare action sequences and initial beliefs
action_embedding = aux_model.action_embedder(actions) # (T, N, -1)
action_embed_size = action_embedding.size(-1)
action_padding = torch.zeros(
self.planning_steps - 1, num_sampler, action_embed_size
).to(
action_embedding
) # (k-1, N, -1)
action_padded = torch.cat(
(action_embedding, action_padding), dim=0
) # (T+k-1, N, -1)
## unfold function will create consecutive action sequences
action_seq = (
action_padded.unfold(dimension=0, size=self.planning_steps, step=1)
.permute(3, 0, 1, 2)
.view(self.planning_steps, num_steps * num_sampler, action_embed_size)
) # (k, T*N, -1)
## beliefs GRU output
beliefs = beliefs.view(num_steps * num_sampler, -1).unsqueeze(0) # (1, T*N, -1)
# get future contexts c_{t+1:t+k} = GRU(b_t, a_{t:t+k-1})
future_contexts_all, _ = aux_model.context_model(
action_seq, beliefs
) # (k, T*N, -1)
## NOTE: future_contexts_all starting from next step t+1 to t+k, not t to t+k-1
future_contexts_all = future_contexts_all.view(
self.planning_steps, num_steps, num_sampler, -1
).permute(
1, 0, 2, 3
) # (k, T, N, -1)
# get all the classifier scores I(c_{t+1:t+k}; z_{t+1:t+k})
positives_padding = torch.zeros(
self.planning_steps, num_sampler, obs_embed_size
).to(
positives
) # (k, N, -1)
positives_padded = torch.cat(
(positives[1:], positives_padding), dim=0
) # (T+k-1, N, -1)
positives_expanded = positives_padded.unfold(
dimension=0, size=self.planning_steps, step=1
).permute(
0, 3, 1, 2
) # (T, k, N, -1)
positives_logits = aux_model.classifier(
torch.cat([positives_expanded, future_contexts_all], -1)
) # (T, k, N, 1)
positive_loss = self.cross_entropy_loss(
positives_logits, torch.ones_like(positives_logits)
) # (T, k, N, 1)
negatives_padding = torch.zeros(
self.planning_steps, num_sampler, obs_embed_size
).to(
negatives
) # (k, N, -1)
negatives_padded = torch.cat(
(negatives[1:], negatives_padding), dim=0
) # (T+k-1, N, -1)
negatives_expanded = negatives_padded.unfold(
dimension=0, size=self.planning_steps, step=1
).permute(
0, 3, 1, 2
) # (T, k, N, -1)
negatives_logits = aux_model.classifier(
torch.cat([negatives_expanded, future_contexts_all], -1)
) # (T, k, N, 1)
negative_loss = self.cross_entropy_loss(
negatives_logits, torch.zeros_like(negatives_logits)
) # (T, k, N, 1)
# Masking to get valid scores
## masks: Note which timesteps [1, T+k+1] could have valid queries, at distance (k) (note offset by 1)
## we will extract the **diagonals** as valid_masks from masks later as below
## the vertical axis is (absolute) real timesteps, the horizontal axis is (relative) planning timesteps
## | - - - - - |
## | . |
## | , . |
## | . , . |
## | , . , . |
## | , . , . |
## | , . , |
## | , . |
## | , |
## | - - - - - |
masks = masks.squeeze(-1) # (T, N)
pred_masks = torch.ones(
num_steps + self.planning_steps,
self.planning_steps,
num_sampler,
1,
dtype=torch.bool,
).to(
beliefs.device
) # (T+k, k, N, 1)
pred_masks[
num_steps - 1 :
] = False # GRU(b_t, a_{t:t+k-1}) is invalid when t >= T, as we don't have real z_{t+1}
for j in range(1, self.planning_steps + 1): # for j-step predictions
pred_masks[
: j - 1, j - 1
] = False # Remove the upper triangle above the diagnonal (but I think this is unnecessary for valid_masks)
for n in range(num_sampler):
has_zeros_batch = torch.where(masks[:, n] == 0)[0]
# in j-step prediction, timesteps z -> z + j are disallowed as those are the first j timesteps of a new episode
# z-> z-1 because of pred_masks being offset by 1
for z in has_zeros_batch:
pred_masks[
z - 1 : z - 1 + j, j - 1, n
] = False # can affect j timesteps
# instead of the whole range, we actually are only comparing a window i:i+k for each query/target i - for each, select the appropriate k
# we essentially gather diagonals from this full mask, t of them, k long
valid_diagonals = [
torch.diagonal(pred_masks, offset=-i) for i in range(num_steps)
] # pull the appropriate k per timestep
valid_masks = (
torch.stack(valid_diagonals, dim=0).permute(0, 3, 1, 2).float()
) # (T, N, 1, k) -> (T, k, N, 1)
# print(valid_masks.int().squeeze(-1)); print(masks) # verify its correctness
loss_masks = valid_masks * _bernoulli_subsample_mask_like(
valid_masks, self.subsample_rate
) # (T, k, N, 1)
num_valid_losses = torch.count_nonzero(loss_masks)
avg_positive_loss = (positive_loss * loss_masks).sum() / torch.clamp(
num_valid_losses, min=1.0
)
avg_negative_loss = (negative_loss * loss_masks).sum() / torch.clamp(
num_valid_losses, min=1.0
)
avg_loss = avg_positive_loss + avg_negative_loss
return (
avg_loss,
{
"total": cast(torch.Tensor, avg_loss).item(),
"positive_loss": cast(torch.Tensor, avg_positive_loss).item(),
"negative_loss": cast(torch.Tensor, avg_negative_loss).item(),
},
)
class CPCASoftMaxLoss(AuxiliaryLoss):
"""Auxiliary task of CPC|A with multi class softmax."""
UUID = "cpcA_SOFTMAX"
def __init__(
self,
planning_steps: int = 8,
subsample_rate: float = 1,
allow_skipping: bool = True,
*args,
**kwargs,
):
super().__init__(auxiliary_uuid=self.UUID, *args, **kwargs)
self.planning_steps = planning_steps
self.subsample_rate = subsample_rate
self.cross_entropy_loss = nn.CrossEntropyLoss(
reduction="none"
) # nn.BCEWithLogitsLoss(reduction="none")
self.allow_skipping = allow_skipping
def get_aux_loss(
self,
aux_model: nn.Module,
observations: ObservationType,
obs_embeds: torch.Tensor,
actions: torch.Tensor,
beliefs: torch.Tensor,
masks: torch.Tensor,
*args,
**kwargs,
):
# prepare for autoregressive inputs: c_{t+1:t+k} = GRU(b_t, a_{t:t+k-1}) <-> z_{t+k}
## where b_t = RNN(b_{t-1}, z_t, a_{t-1}), prev action is optional
num_steps, num_samplers, obs_embed_size = obs_embeds.shape # T, N, H_O
##visual observation of all num_steps
if not (0 < self.planning_steps <= num_steps):
if self.allow_skipping:
return 0, {}
else:
raise RuntimeError(
f"Insufficient planning steps: self.planning_steps {self.planning_steps} must"
f" be greater than zero and less than or equal to num_steps {num_steps}."
)
## prepare action sequences and initial beliefs
action_embedding = aux_model.action_embedder(actions) # (T, N, -1)
action_embed_size = action_embedding.size(-1)
action_padding = torch.zeros(
self.planning_steps - 1,
num_samplers,
action_embed_size,
device=action_embedding.device,
) # (k-1, N, -1)
action_padded = torch.cat(
(action_embedding, action_padding), dim=0
) # (T+k-1, N, -1)
## unfold function will create consecutive action sequences
action_seq = (
action_padded.unfold(dimension=0, size=self.planning_steps, step=1)
.permute(3, 0, 1, 2)
.view(self.planning_steps, num_steps * num_samplers, action_embed_size)
) # (k, T*N, -1)
## beliefs GRU output
obs_embeds = aux_model.visual_mlp(obs_embeds) # (T, N, 128)
beliefs = beliefs.view(1, num_steps * num_samplers, -1) # (1, T*N, -1)
# get future contexts c_{t+1:t+k} = GRU(b_t, a_{t:t+k-1})
future_contexts_all, _ = aux_model.context_model(
action_seq, beliefs
) # (k, T*N, -1)
future_contexts_all = aux_model.belief_mlp(future_contexts_all) # (k, T*N, 128)
future_contexts_all = future_contexts_all.view(-1, 128) # (k*T*N, 128)
obs_embeds = obs_embeds.view(
num_steps * num_samplers, obs_embeds.shape[-1]
).permute(
1, 0
) # (-1, T*N)
visual_logits = torch.matmul(future_contexts_all, obs_embeds)
visual_log_probs = F.log_softmax(visual_logits, dim=1) ## (k*T*N, T*N)
target = torch.zeros(
(self.planning_steps, num_steps, num_samplers),
dtype=torch.long,
device=beliefs.device,
) # (k, T, N)
loss_mask = torch.zeros(
(self.planning_steps, num_steps, num_samplers), device=beliefs.device
) # (k, T, N)
num_valid_before = 0
for j in range(num_samplers):
for i in range(num_steps):
index = i * num_samplers + j
if i == 0 or masks[i, j].item() == 0:
num_valid_before = 0
continue
num_valid_before += 1
for back in range(min(num_valid_before, self.planning_steps)):
target[back, i - (back + 1), j] = index
loss_mask[back, i - (back + 1), j] = 1.0
target = target.view(-1) # (k*T*N,)
loss_value = self.cross_entropy_loss(visual_log_probs, target)
loss_value = loss_value.view(
self.planning_steps, num_steps, num_samplers, 1
) # (k, T, N, 1)
loss_mask = loss_mask.unsqueeze(-1) # (k, T, N, 1)
loss_valid_masks = loss_mask * _bernoulli_subsample_mask_like(
loss_mask, self.subsample_rate
) # (k, T, N, 1)
num_valid_losses = torch.count_nonzero(loss_valid_masks)
avg_multi_class_loss = (loss_value * loss_valid_masks).sum() / torch.clamp(
num_valid_losses, min=1.0
)
return (
avg_multi_class_loss,
{"total": cast(torch.Tensor, avg_multi_class_loss).item(),},
)
######## CPCA Softmax variants ######
class CPCA1SoftMaxLoss(CPCASoftMaxLoss):
UUID = "cpcA_SOFTMAX_1"
def __init__(self, subsample_rate: float = 1, *args, **kwargs):
super().__init__(
planning_steps=1, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA2SoftMaxLoss(CPCASoftMaxLoss):
UUID = "cpcA_SOFTMAX_2"
def __init__(self, subsample_rate: float = 1, *args, **kwargs):
super().__init__(
planning_steps=2, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA4SoftMaxLoss(CPCASoftMaxLoss):
UUID = "cpcA_SOFTMAX_4"
def __init__(self, subsample_rate: float = 1, *args, **kwargs):
super().__init__(
planning_steps=4, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA8SoftMaxLoss(CPCASoftMaxLoss):
UUID = "cpcA_SOFTMAX_8"
def __init__(self, subsample_rate: float = 1, *args, **kwargs):
super().__init__(
planning_steps=8, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA16SoftMaxLoss(CPCASoftMaxLoss):
UUID = "cpcA_SOFTMAX_16"
def __init__(self, subsample_rate: float = 1, *args, **kwargs):
super().__init__(
planning_steps=16, subsample_rate=subsample_rate, *args, **kwargs
)
###########
class CPCA1Loss(CPCALoss):
UUID = "CPCA_1"
def __init__(self, subsample_rate: float = 0.2, *args, **kwargs):
super().__init__(
planning_steps=1, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA2Loss(CPCALoss):
UUID = "CPCA_2"
def __init__(self, subsample_rate: float = 0.2, *args, **kwargs):
super().__init__(
planning_steps=2, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA4Loss(CPCALoss):
UUID = "CPCA_4"
def __init__(self, subsample_rate: float = 0.2, *args, **kwargs):
super().__init__(
planning_steps=4, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA8Loss(CPCALoss):
UUID = "CPCA_8"
def __init__(self, subsample_rate: float = 0.2, *args, **kwargs):
super().__init__(
planning_steps=8, subsample_rate=subsample_rate, *args, **kwargs
)
class CPCA16Loss(CPCALoss):
UUID = "CPCA_16"
def __init__(self, subsample_rate: float = 0.2, *args, **kwargs):
super().__init__(
planning_steps=16, subsample_rate=subsample_rate, *args, **kwargs
)
| allenact-main | allenact/embodiedai/aux_losses/losses.py |
"""Defines the `ExperimentConfig` abstract class used as the basis of all
experiments."""
import abc
from typing import Dict, Any, Optional, List, Union, Sequence, Tuple, cast
import torch
import torch.nn as nn
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import TrainingPipeline, Builder
from allenact.utils.system import get_logger
from allenact.utils.viz_utils import VizSuite
def split_processes_onto_devices(nprocesses: int, ndevices: int):
assert (
nprocesses == 0 or nprocesses >= ndevices
), "NUM_PROCESSES {} < ndevices {}".format(nprocesses, ndevices)
res = [0] * ndevices
for it in range(nprocesses):
res[it % ndevices] += 1
return res
class MachineParams(object):
def __init__(
self,
nprocesses: Union[int, Sequence[int]],
devices: Union[
None, int, str, torch.device, Sequence[Union[int, str, torch.device]]
] = None,
sensor_preprocessor_graph: Optional[
Union[SensorPreprocessorGraph, Builder[SensorPreprocessorGraph]]
] = None,
sampler_devices: Union[
None, int, str, torch.device, Sequence[Union[int, str, torch.device]]
] = None,
visualizer: Optional[Union[VizSuite, Builder[VizSuite]]] = None,
gpu_ids: Union[int, Sequence[int]] = None,
local_worker_ids: Optional[List[int]] = None,
):
assert (
gpu_ids is None or devices is None
), "only one of `gpu_ids` or `devices` should be set."
if gpu_ids is not None:
get_logger().warning(
"The `gpu_ids` parameter will be deprecated, use `devices` instead."
)
devices = gpu_ids
self.nprocesses = (
nprocesses if isinstance(nprocesses, Sequence) else (nprocesses,)
)
self.devices: Tuple[torch.device, ...] = self._standardize_devices(
devices=devices, nworkers=len(self.nprocesses)
)
self._sensor_preprocessor_graph_maybe_builder = sensor_preprocessor_graph
self.sampler_devices: Tuple[torch.device, ...] = (
None
if sampler_devices is None
else self._standardize_devices(
devices=sampler_devices, nworkers=len(self.nprocesses)
)
)
self._visualizer_maybe_builder = visualizer
self._sensor_preprocessor_graph_cached: Optional[SensorPreprocessorGraph] = None
self._visualizer_cached: Optional[VizSuite] = None
self.local_worker_ids: Optional[List[int]] = None
self.set_local_worker_ids(local_worker_ids)
def set_local_worker_ids(self, local_worker_ids: Optional[List[int]]):
self.local_worker_ids = local_worker_ids or list(range(len(self.devices)))
assert all(0 <= id < len(self.devices) for id in self.local_worker_ids), (
f"Passed {len(self.local_worker_ids)} local worker ids {self.local_worker_ids}"
f" for {len(self.devices)} total devices (workers)"
)
@classmethod
def instance_from(
cls, machine_params: Union["MachineParams", Dict[str, Any]]
) -> "MachineParams":
if isinstance(machine_params, cls):
return machine_params
assert isinstance(machine_params, Dict)
return cls(**machine_params)
@staticmethod
def _standardize_devices(
devices: Optional[
Union[int, str, torch.device, Sequence[Union[int, str, torch.device]]]
],
nworkers: int,
) -> Tuple[torch.device, ...]:
if devices is None or (isinstance(devices, Sequence) and len(devices) == 0):
devices = torch.device("cpu")
if not isinstance(devices, Sequence):
devices = (devices,) * nworkers
assert len(devices) == nworkers, (
f"The number of devices (len({devices})={len(devices)})"
f" must equal the number of workers ({nworkers})"
)
devices = tuple(
torch.device("cpu") if d == -1 else torch.device(d) for d in devices # type: ignore
)
for d in devices:
if d != torch.device("cpu"):
try:
torch.cuda.get_device_capability(d) # type: ignore
except Exception:
raise RuntimeError(
f"It appears the cuda device {d} is not available on your system."
)
return cast(Tuple[torch.device, ...], devices)
@property
def sensor_preprocessor_graph(self) -> Optional[SensorPreprocessorGraph]:
if self._sensor_preprocessor_graph_maybe_builder is None:
return None
if self._sensor_preprocessor_graph_cached is None:
if isinstance(self._sensor_preprocessor_graph_maybe_builder, Builder):
self._sensor_preprocessor_graph_cached = (
self._sensor_preprocessor_graph_maybe_builder()
)
else:
self._sensor_preprocessor_graph_cached = (
self._sensor_preprocessor_graph_maybe_builder
)
return self._sensor_preprocessor_graph_cached
def set_visualizer(self, viz: VizSuite):
if self._visualizer_cached is None:
self._visualizer_maybe_builder = viz
else:
get_logger().warning("Ignoring viz (already instantiated)")
@property
def visualizer(self) -> Optional[VizSuite]:
if self._visualizer_maybe_builder is None:
return None
if self._visualizer_cached is None:
if isinstance(self._visualizer_maybe_builder, Builder):
self._visualizer_cached = self._visualizer_maybe_builder()
else:
self._visualizer_cached = self._visualizer_maybe_builder
return self._visualizer_cached
class FrozenClassVariables(abc.ABCMeta):
"""Metaclass for ExperimentConfig.
Ensures ExperimentConfig class-level attributes cannot be modified.
ExperimentConfig attributes can still be modified at the object
level.
"""
def __setattr__(cls, attr, value):
if isinstance(cls, type) and (
attr != "__abstractmethods__" and not attr.startswith("_abc_")
):
raise RuntimeError(
"Cannot edit class-level attributes.\n"
"Changing the values of class-level attributes is disabled in ExperimentConfig classes.\n"
"This is to prevent problems that can occur otherwise when using multiprocessing.\n"
"If you wish to change the value of a configuration, please do so for an instance of that"
" configuration.\nTriggered by attempting to modify {}".format(
cls.__name__
)
)
else:
super().__setattr__(attr, value)
class ExperimentConfig(metaclass=FrozenClassVariables):
"""Abstract class used to define experiments.
Instead of using yaml or text files, experiments in our framework
are defined as a class. In particular, to define an experiment one
must define a new class inheriting from this class which implements
all of the below methods. The below methods will then be called when
running the experiment.
"""
@abc.abstractmethod
def tag(self) -> str:
"""A string describing the experiment."""
raise NotImplementedError()
@abc.abstractmethod
def training_pipeline(self, **kwargs) -> TrainingPipeline:
"""Creates the training pipeline.
# Parameters
kwargs : Extra kwargs. Currently unused.
# Returns
An instantiate `TrainingPipeline` object.
"""
raise NotImplementedError()
@abc.abstractmethod
def machine_params(
self, mode="train", **kwargs
) -> Union[MachineParams, Dict[str, Any]]:
"""Parameters used to specify machine information.
Machine information includes at least (1) the number of processes
to train with and (2) the gpu devices indices to use.
mode : Whether or not the machine parameters should be those for
"train", "valid", or "test".
kwargs : Extra kwargs.
# Returns
A dictionary of the form `{"nprocesses": ..., "gpu_ids": ..., ...}`.
Here `nprocesses` must be a non-negative integer, `gpu_ids` must
be a sequence of non-negative integers (if empty, then everything
will be run on the cpu).
"""
raise NotImplementedError()
@abc.abstractmethod
def create_model(self, **kwargs) -> nn.Module:
"""Create the neural model."""
raise NotImplementedError()
@abc.abstractmethod
def make_sampler_fn(self, **kwargs) -> TaskSampler:
"""Create the TaskSampler given keyword arguments.
These `kwargs` will be generated by one of
`ExperimentConfig.train_task_sampler_args`,
`ExperimentConfig.valid_task_sampler_args`, or
`ExperimentConfig.test_task_sampler_args` depending on whether
the user has chosen to train, validate, or test.
"""
raise NotImplementedError()
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
"""Specifies the training parameters for the `process_ind`th training
process.
These parameters are meant be passed as keyword arguments to `ExperimentConfig.make_sampler_fn`
to generate a task sampler.
# Parameters
process_ind : The unique index of the training process (`0 ≤ process_ind < total_processes`).
total_processes : The total number of training processes.
devices : Gpu devices (if any) to use.
seeds : The seeds to use, if any.
deterministic_cudnn : Whether or not to use deterministic cudnn.
# Returns
The parameters for `make_sampler_fn`
"""
raise NotImplementedError()
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
"""Specifies the validation parameters for the `process_ind`th
validation process.
See `ExperimentConfig.train_task_sampler_args` for parameter
definitions.
"""
raise NotImplementedError()
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
"""Specifies the test parameters for the `process_ind`th test process.
See `ExperimentConfig.train_task_sampler_args` for parameter
definitions.
"""
raise NotImplementedError()
| allenact-main | allenact/base_abstractions/experiment_config.py |
import abc
from typing import (
Dict,
Any,
TypeVar,
Sequence,
NamedTuple,
Optional,
List,
Union,
Generic,
)
import attr
import torch
EnvType = TypeVar("EnvType")
DistributionType = TypeVar("DistributionType")
ModelType = TypeVar("ModelType")
ObservationType = Dict[str, Union[torch.Tensor, Dict[str, Any]]]
class RLStepResult(NamedTuple):
observation: Optional[Any]
reward: Optional[Union[float, List[float]]]
done: Optional[bool]
info: Optional[Dict[str, Any]]
def clone(self, new_info: Dict[str, Any]):
return RLStepResult(
observation=self.observation
if "observation" not in new_info
else new_info["observation"],
reward=self.reward if "reward" not in new_info else new_info["reward"],
done=self.done if "done" not in new_info else new_info["done"],
info=self.info if "info" not in new_info else new_info["info"],
)
def merge(self, other: "RLStepResult"):
return RLStepResult(
observation=self.observation
if other.observation is None
else other.observation,
reward=self.reward if other.reward is None else other.reward,
done=self.done if other.done is None else other.done,
info={
**(self.info if self.info is not None else {}),
**(other.info if other is not None else {}),
},
)
class ActorCriticOutput(tuple, Generic[DistributionType]):
distributions: DistributionType
values: torch.FloatTensor
extras: Dict[str, Any]
# noinspection PyTypeChecker
def __new__(
cls,
distributions: DistributionType,
values: torch.FloatTensor,
extras: Dict[str, Any],
):
self = tuple.__new__(cls, (distributions, values, extras))
self.distributions = distributions
self.values = values
self.extras = extras
return self
def __repr__(self) -> str:
return (
f"Group(distributions={self.distributions},"
f" values={self.values},"
f" extras={self.extras})"
)
class Memory(Dict):
def __init__(self, *args, **kwargs):
super().__init__()
if len(args) > 0:
assert len(args) == 1, (
"Only one of Sequence[Tuple[str, Tuple[torch.Tensor, int]]]"
"or Dict[str, Tuple[torch.Tensor, int]] accepted as unnamed args"
)
if isinstance(args[0], Sequence):
for key, tensor_dim in args[0]:
assert (
len(tensor_dim) == 2
), "Only Tuple[torch.Tensor, int]] accepted as second item in Tuples"
tensor, dim = tensor_dim
self.check_append(key, tensor, dim)
elif isinstance(args[0], Dict):
for key in args[0]:
assert (
len(args[0][key]) == 2
), "Only Tuple[torch.Tensor, int]] accepted as values in Dict"
tensor, dim = args[0][key]
self.check_append(key, tensor, dim)
elif len(kwargs) > 0:
for key in kwargs:
assert (
len(kwargs[key]) == 2
), "Only Tuple[torch.Tensor, int]] accepted as keyword arg"
tensor, dim = kwargs[key]
self.check_append(key, tensor, dim)
def check_append(
self, key: str, tensor: torch.Tensor, sampler_dim: int
) -> "Memory":
"""Appends a new memory type given its identifier, its memory tensor
and its sampler dim.
# Parameters
key: string identifier of the memory type
tensor: memory tensor
sampler_dim: sampler dimension
# Returns
Updated Memory
"""
assert isinstance(key, str), "key {} must be str".format(key)
assert isinstance(
tensor, torch.Tensor
), "tensor {} must be torch.Tensor".format(tensor)
assert isinstance(sampler_dim, int), "sampler_dim {} must be int".format(
sampler_dim
)
assert key not in self, "Reused key {}".format(key)
assert (
0 <= sampler_dim < len(tensor.shape)
), "Got sampler_dim {} for tensor with shape {}".format(
sampler_dim, tensor.shape
)
self[key] = (tensor, sampler_dim)
return self
def tensor(self, key: str) -> torch.Tensor:
"""Returns the memory tensor for a given memory type.
# Parameters
key: string identifier of the memory type
# Returns
Memory tensor for type `key`
"""
assert key in self, "Missing key {}".format(key)
return self[key][0]
def sampler_dim(self, key: str) -> int:
"""Returns the sampler dimension for the given memory type.
# Parameters
key: string identifier of the memory type
# Returns
The sampler dim
"""
assert key in self, "Missing key {}".format(key)
return self[key][1]
def sampler_select(self, keep: Sequence[int]) -> "Memory":
"""Equivalent to PyTorch index_select along the `sampler_dim` of each
memory type.
# Parameters
keep: a list of sampler indices to keep
# Returns
Selected memory
"""
res = Memory()
valid = False
for name in self:
sampler_dim = self.sampler_dim(name)
tensor = self.tensor(name)
assert len(keep) == 0 or (
0 <= min(keep) and max(keep) < tensor.shape[sampler_dim]
), "Got min(keep)={} max(keep)={} for memory type {} with shape {}, dim {}".format(
min(keep), max(keep), name, tensor.shape, sampler_dim
)
if tensor.shape[sampler_dim] > len(keep):
tensor = tensor.index_select(
dim=sampler_dim,
index=torch.as_tensor(
list(keep), dtype=torch.int64, device=tensor.device
),
)
res.check_append(name, tensor, sampler_dim)
valid = True
if valid:
return res
return self
def set_tensor(self, key: str, tensor: torch.Tensor) -> "Memory":
"""Replaces tensor for given key with an updated version.
# Parameters
key: memory type identifier to update
tensor: updated tensor
# Returns
Updated memory
"""
assert key in self, "Missing key {}".format(key)
assert (
tensor.shape == self[key][0].shape
), "setting tensor with shape {} for former {}".format(
tensor.shape, self[key][0].shape
)
self[key] = (tensor, self[key][1])
return self
def step_select(self, step: int) -> "Memory":
"""Equivalent to slicing with length 1 for the `step` (i.e first)
dimension in rollouts storage.
# Parameters
step: step to keep
# Returns
Sliced memory with a single step
"""
res = Memory()
for key in self:
tensor = self.tensor(key)
assert (
tensor.shape[0] > step
), "attempting to access step {} for memory type {} of shape {}".format(
step, key, tensor.shape
)
if step != -1:
res.check_append(
key, self.tensor(key)[step : step + 1, ...], self.sampler_dim(key)
)
else:
res.check_append(
key, self.tensor(key)[step:, ...], self.sampler_dim(key)
)
return res
def step_squeeze(self, step: int) -> "Memory":
"""Equivalent to simple indexing for the `step` (i.e first) dimension
in rollouts storage.
# Parameters
step: step to keep
# Returns
Sliced memory with a single step (and squeezed step dimension)
"""
res = Memory()
for key in self:
tensor = self.tensor(key)
assert (
tensor.shape[0] > step
), "attempting to access step {} for memory type {} of shape {}".format(
step, key, tensor.shape
)
res.check_append(
key, self.tensor(key)[step, ...], self.sampler_dim(key) - 1
)
return res
def slice(
self,
dim: int,
start: Optional[int] = None,
stop: Optional[int] = None,
step: int = 1,
) -> "Memory":
"""Slicing for dimensions that have same extents in all memory types.
It also accepts negative indices.
# Parameters
dim: the dimension to slice
start: the index of the first item to keep if given (default 0 if None)
stop: the index of the first item to discard if given (default tensor size along `dim` if None)
step: the increment between consecutive indices (default 1)
# Returns
Sliced memory
"""
checked = False
total: Optional[int] = None
res = Memory()
for key in self:
tensor = self.tensor(key)
assert (
len(tensor.shape) > dim
), f"attempting to access dim {dim} for memory type {key} of shape {tensor.shape}"
if not checked:
total = tensor.shape[dim]
checked = True
assert (
total == tensor.shape[dim]
), f"attempting to slice along non-uniform dimension {dim}"
if start is not None or stop is not None or step != 1:
slice_tuple = (
(slice(None),) * dim
+ (slice(start, stop, step),)
+ (slice(None),) * (len(tensor.shape) - (1 + dim))
)
sliced_tensor = tensor[slice_tuple]
res.check_append(
key=key, tensor=sliced_tensor, sampler_dim=self.sampler_dim(key),
)
else:
res.check_append(
key, tensor, self.sampler_dim(key),
)
return res
def to(self, device: torch.device) -> "Memory":
for key in self:
tensor = self.tensor(key)
if tensor.device != device:
self.set_tensor(key, tensor.to(device))
return self
class Loss(abc.ABC):
pass
@attr.s(kw_only=True)
class LossOutput:
value: torch.Tensor = attr.ib()
info: Dict[str, Union[float, int]] = attr.ib()
per_epoch_info: Dict[str, Union[float, int]] = attr.ib()
batch_memory: Memory = attr.ib()
stream_memory: Memory = attr.ib()
bsize: int = attr.ib()
class GenericAbstractLoss(Loss):
# noinspection PyMethodOverriding
@abc.abstractmethod
def loss( # type: ignore
self,
*, # No positional arguments
model: ModelType,
batch: ObservationType,
batch_memory: Memory,
stream_memory: Memory,
) -> LossOutput:
"""Computes the loss.
Loss after processing a batch of data with (part of) a model (possibly with memory).
We support two different types of memory: `batch_memory` and `stream_memory` that can be
used to compute losses and share computation.
## `batch_memory`
During the update phase of training, the following
steps happen in order:
1. A `batch` of data is sampled from an `ExperienceStorage` (which stores data possibly collected during previous
rollout steps).
2. This `batch` is passed to each of the specified `GenericAbstractLoss`'s and is used, along with the `model`,
to compute each such loss.
3. The losses are summed together, gradients are computed by backpropagation, and an update step is taken.
4. The process loops back to (1) with a new batch until.
Now supposed that the computation used by a `GenericAbstractLoss` (`LossA`) can be shared across multiple of the
`GenericAbstractLoss`'s (`LossB`, ...). For instance, `LossA` might run the visual encoder of `model` across
all the images contained in `batch` so that it can compute a classification loss while `LossB` would like to
run the same visual encoder on the same images to compute a depth-prediction loss. Without having some sort
of memory, you would need to rerun this visual encoder on all images multiple times, wasting computational
resources. This is where `batch_memory` comes in: `LossA` is can store the visual representations it computed
in `batch_memory` and then `LossB` can access them. Note that the `batch_memory` will be reinitialized after
each new `batch` is sampled.
## `stream_memory`
As described above, `batch_memory` treats each batch as its own independent collection of data. But what if
your `ExperienceStorage` samples its batches in a streaming fashion? E.g. your `ExperienceStorage`
might be a fixed collection of expert trajectories for use with imitation learning. In this case you can't
simply treat each batch independently: you might want to save information from one batch to use in another.
The simplest case of this would be if your agent `model` uses an RNN and produces a recurrent hidden state.
In this case, the hidden state from the end of one batch should be used at the start of computations for the
next batch. To allow for this, you can use the `stream_memory`. `stream_memory` is not cleared across
batches but, **importantly**, `stream_memory` is detached from the computation graph after each backpropagation
step so that the size of the computation graph does not grow unboundedly.
# Parameters
model: model to run on data batch (both assumed to be on the same device)
batch: data to use as input for model (already on the same device as model)
batch_memory: See above.
stream_memory: See above.
# Returns
A tuple with:
current_loss: total loss
current_info: additional information about the current loss
batch_memory: `batch_memory` memory after processing current data batch, see above.
stream_memory: `stream_memory` memory after processing current data batch, see above.
bsize: batch size
"""
raise NotImplementedError()
| allenact-main | allenact/base_abstractions/misc.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Defines the primary data structures by which agents interact with their
environment."""
import abc
from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union
import gym
import numpy as np
from gym.spaces.dict import Dict as SpaceDict
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.utils.misc_utils import deprecated
EnvType = TypeVar("EnvType")
class Task(Generic[EnvType]):
"""An abstract class defining a, goal directed, 'task.' Agents interact
with their environment through a task by taking a `step` after which they
receive new observations, rewards, and (potentially) other useful
information.
A Task is a helpful generalization of the OpenAI gym's `Env` class
and allows for multiple tasks (e.g. point and object navigation) to
be defined on a single environment (e.g. AI2-THOR).
# Attributes
env : The environment.
sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer.
task_info : Dictionary of (k, v) pairs defining task goals and other task information.
max_steps : The maximum number of steps an agent can take an in the task before it is considered failed.
observation_space: The observation space returned on each step from the sensors.
"""
env: EnvType
sensor_suite: SensorSuite[EnvType]
task_info: Dict[str, Any]
max_steps: int
observation_space: SpaceDict
def __init__(
self,
env: EnvType,
sensors: Union[SensorSuite, Sequence[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs
) -> None:
self.env = env
self.sensor_suite = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.task_info = task_info
self.max_steps = max_steps
self.observation_space = self.sensor_suite.observation_spaces
self._num_steps_taken = 0
self._total_reward: Union[float, List[float]] = 0.0
def get_observations(self, **kwargs) -> Any:
return self.sensor_suite.get_observations(env=self.env, task=self, **kwargs)
@property
@abc.abstractmethod
def action_space(self) -> gym.Space:
"""Task's action space.
# Returns
The action space for the task.
"""
raise NotImplementedError()
@abc.abstractmethod
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
"""Render the current task state.
Rendered task state can come in any supported modes.
# Parameters
mode : The mode in which to render. For example, you might have a 'rgb'
mode that renders the agent's egocentric viewpoint or a 'dev' mode
returning additional information.
args : Extra args.
kwargs : Extra kwargs.
# Returns
An numpy array corresponding to the requested render.
"""
raise NotImplementedError()
def _increment_num_steps_taken(self) -> None:
"""Helper function that increases the number of steps counter by
one."""
self._num_steps_taken += 1
def step(self, action: Any) -> RLStepResult:
"""Take an action in the environment (one per agent).
Takes the action in the environment and returns
observations (& rewards and any additional information)
corresponding to the agent's new state. Note that this function
should not be overwritten without care (instead
implement the `_step` function).
# Parameters
action : The action to take, should be of the same form as specified by `self.action_space`.
# Returns
A `RLStepResult` object encoding the new observations, reward, and
(possibly) additional information.
"""
assert not self.is_done()
sr = self._step(action=action)
# If reward is Sequence, it's assumed to follow the same order imposed by spaces' flatten operation
if isinstance(sr.reward, Sequence):
if isinstance(self._total_reward, Sequence):
for it, rew in enumerate(sr.reward):
self._total_reward[it] += float(rew)
else:
self._total_reward = [float(r) for r in sr.reward]
else:
self._total_reward += float(sr.reward) # type:ignore
self._increment_num_steps_taken()
# TODO: We need a better solution to the below. It's not a good idea
# to pre-increment the step counter as this might play poorly with `_step`
# if it relies on some aspect of the current number of steps taken.
return sr.clone({"done": sr.done or self.is_done()})
@abc.abstractmethod
def _step(self, action: Any) -> RLStepResult:
"""Helper function called by `step` to take a step by each agent in the
environment.
Takes the action in the environment and returns
observations (& rewards and any additional information)
corresponding to the agent's new state. This function is called
by the (public) `step` function and is what should be implemented
when defining your new task. Having separate `_step` be separate from `step`
is useful as this allows the `step` method to perform bookkeeping (e.g.
keeping track of the number of steps), without having `_step` as a separate
method, everyone implementing `step` would need to copy this bookkeeping code.
# Parameters
action : The action to take.
# Returns
A `RLStepResult` object encoding the new observations, reward, and
(possibly) additional information.
"""
raise NotImplementedError()
def reached_max_steps(self) -> bool:
"""Has the agent reached the maximum number of steps."""
return self.num_steps_taken() >= self.max_steps
@abc.abstractmethod
def reached_terminal_state(self) -> bool:
"""Has the agent reached a terminal state (excluding reaching the
maximum number of steps)."""
raise NotImplementedError()
def is_done(self) -> bool:
"""Did the agent reach a terminal state or performed the maximum number
of steps."""
return self.reached_terminal_state() or self.reached_max_steps()
def num_steps_taken(self) -> int:
"""Number of steps taken by the agent in the task so far."""
return self._num_steps_taken
@deprecated
def action_names(self) -> Tuple[str, ...]:
"""Action names of the Task instance.
This function has been deprecated and will be removed.
This function is a hold-over from when the `Task`
abstraction only considered `gym.space.Discrete` action spaces (in which
case it makes sense name these actions).
This implementation of `action_names` requires that a `class_action_names`
method has been defined. This method should be overwritten if `class_action_names`
requires key word arguments to determine the number of actions.
"""
if hasattr(self, "class_action_names"):
return self.class_action_names()
else:
raise NotImplementedError(
"`action_names` requires that a function `class_action_names` be defined."
" This said, please do not use this functionality as it has been deprecated and will be removed."
" If you would like an `action_names` function for your task, feel free to define one"
" with the knowledge that the AllenAct internals will ignore it."
)
@abc.abstractmethod
def close(self) -> None:
"""Closes the environment and any other files opened by the Task (if
applicable)."""
raise NotImplementedError()
def metrics(self) -> Dict[str, Any]:
"""Computes metrics related to the task after the task's completion.
By default this function is automatically called during training
and the reported metrics logged to tensorboard.
# Returns
A dictionary where every key is a string (the metric's
name) and the value is the value of the metric.
"""
return {
"ep_length": self.num_steps_taken(),
"reward": self.cumulative_reward,
"task_info": self.task_info,
}
def query_expert(self, **kwargs) -> Tuple[Any, bool]:
"""(Deprecated) Query the expert policy for this task.
The new correct way to include this functionality is through the definition of a class
derived from `allenact.base_abstractions.sensor.AbstractExpertActionSensor` or
`allenact.base_abstractions.sensor.AbstractExpertPolicySensor`, where a
`query_expert` method must be defined.
# Returns
A tuple (x, y) where x is the expert action (or policy) and y is False \
if the expert could not determine the optimal action (otherwise True). Here y \
is used for masking. Even when y is False, x should still lie in the space of \
possible values (e.g. if x is the expert policy then x should be the correct length, \
sum to 1, and have non-negative entries).
"""
return None, False
@property
def cumulative_reward(self) -> float:
"""Mean per-agent total cumulative in the task so far.
# Returns
Mean per-agent cumulative reward as a float.
"""
return (
np.mean(self._total_reward).item()
if isinstance(self._total_reward, Sequence)
else self._total_reward
)
SubTaskType = TypeVar("SubTaskType", bound=Task)
class TaskSampler(abc.ABC):
"""Abstract class defining a how new tasks are sampled."""
@property
@abc.abstractmethod
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be
float('inf').
"""
raise NotImplementedError()
@property
@abc.abstractmethod
def last_sampled_task(self) -> Optional[Task]:
"""Get the most recently sampled Task.
# Returns
The most recently sampled Task.
"""
raise NotImplementedError()
@abc.abstractmethod
def next_task(self, force_advance_scene: bool = False) -> Optional[Task]:
"""Get the next task in the sampler's stream.
# Parameters
force_advance_scene : Used to (if applicable) force the task sampler to
use a new scene for the next task. This is useful if, during training,
you would like to train with one scene for some number of steps and
then explicitly control when you begin training with the next scene.
# Returns
The next Task in the sampler's stream if a next task exists. Otherwise None.
"""
raise NotImplementedError()
@abc.abstractmethod
def close(self) -> None:
"""Closes any open environments or streams.
Should be run when done sampling.
"""
raise NotImplementedError()
@property
@abc.abstractmethod
def all_observation_spaces_equal(self) -> bool:
"""Checks if all observation spaces of tasks that can be sampled are
equal.
This will almost always simply return `True`. A case in which it should
return `False` includes, for example, a setting where you design
a `TaskSampler` that can generate different types of tasks, i.e.
point navigation tasks and object navigation tasks. In this case, these
different tasks may output different types of observations.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
raise NotImplementedError()
@abc.abstractmethod
def reset(self) -> None:
"""Resets task sampler to its original state (except for any seed)."""
raise NotImplementedError()
@abc.abstractmethod
def set_seed(self, seed: int) -> None:
"""Sets new RNG seed.
# Parameters
seed : New seed.
"""
raise NotImplementedError()
| allenact-main | allenact/base_abstractions/task.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import (
Generic,
Dict,
Any,
Optional,
TYPE_CHECKING,
TypeVar,
Sequence,
Union,
Tuple,
cast,
)
import abc
import gym
import gym.spaces as gyms
import numpy as np
from torch.distributions.utils import lazy_property
from allenact.base_abstractions.misc import EnvType
from allenact.utils import spaces_utils as su
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.system import get_logger
if TYPE_CHECKING:
from allenact.base_abstractions.task import SubTaskType
else:
SubTaskType = TypeVar("SubTaskType", bound="Task")
SpaceDict = gyms.Dict
class Sensor(Generic[EnvType, SubTaskType]):
"""Represents a sensor that provides data from the environment to agent.
The user of this class needs to implement the get_observation method and
the user is also required to set the below attributes:
# Attributes
uuid : universally unique id.
observation_space : ``gym.Space`` object corresponding to observation of
sensor.
"""
uuid: str
observation_space: gym.Space
def __init__(self, uuid: str, observation_space: gym.Space, **kwargs: Any) -> None:
self.uuid = uuid
self.observation_space = observation_space
def get_observation(
self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any
) -> Any:
"""Returns observations from the environment (or task).
# Parameters
env : The environment the sensor is used upon.
task : (Optionally) a Task from which the sensor should get data.
# Returns
Current observation for Sensor.
"""
raise NotImplementedError()
class SensorSuite(Generic[EnvType]):
"""Represents a set of sensors, with each sensor being identified through a
unique id.
# Attributes
sensors: list containing sensors for the environment, uuid of each
sensor must be unique.
"""
sensors: Dict[str, Sensor[EnvType, Any]]
observation_spaces: gyms.Dict
def __init__(self, sensors: Sequence[Sensor]) -> None:
"""Initializer.
# Parameters
param sensors: the sensors that will be included in the suite.
"""
self.sensors = OrderedDict()
spaces: OrderedDict[str, gym.Space] = OrderedDict()
for sensor in sensors:
assert (
sensor.uuid not in self.sensors
), "'{}' is duplicated sensor uuid".format(sensor.uuid)
self.sensors[sensor.uuid] = sensor
spaces[sensor.uuid] = sensor.observation_space
self.observation_spaces = SpaceDict(spaces=spaces)
def get(self, uuid: str) -> Sensor:
"""Return sensor with the given `uuid`.
# Parameters
uuid : The unique id of the sensor
# Returns
The sensor with unique id `uuid`.
"""
return self.sensors[uuid]
def get_observations(
self, env: EnvType, task: Optional[SubTaskType], **kwargs: Any
) -> Dict[str, Any]:
"""Get all observations corresponding to the sensors in the suite.
# Parameters
env : The environment from which to get the observation.
task : (Optionally) the task from which to get the observation.
# Returns
Data from all sensors packaged inside a Dict.
"""
return {
uuid: sensor.get_observation(env=env, task=task, **kwargs) # type: ignore
for uuid, sensor in self.sensors.items()
}
class AbstractExpertSensor(Sensor[EnvType, SubTaskType], abc.ABC):
"""Base class for sensors that obtain the expert action for a given task
(if available)."""
ACTION_POLICY_LABEL: str = "action_or_policy"
EXPERT_SUCCESS_LABEL: str = "expert_success"
_NO_GROUPS_LABEL: str = "__dummy_expert_group__"
def __init__(
self,
action_space: Optional[Union[gym.Space, int]] = None,
uuid: str = "expert_sensor_type_uuid",
expert_args: Optional[Dict[str, Any]] = None,
nactions: Optional[int] = None,
use_dict_as_groups: bool = True,
**kwargs: Any,
) -> None:
"""Initialize an `ExpertSensor`.
# Parameters
action_space : The action space of the agent. This is necessary in order for this sensor
to know what its output observation space is.
uuid : A string specifying the unique ID of this sensor.
expert_args : This sensor obtains an expert action from the task by calling the `query_expert`
method of the task. `expert_args` are any keyword arguments that should be passed to the
`query_expert` method when called.
nactions : [DEPRECATED] The number of actions available to the agent, corresponds to an `action_space`
of `gym.spaces.Discrete(nactions)`.
use_dict_as_groups : Whether to use the top-level action_space of type `gym.spaces.Dict` as action groups.
"""
if isinstance(action_space, int):
action_space = gym.spaces.Discrete(action_space)
elif action_space is None:
assert (
nactions is not None
), "One of `action_space` or `nactions` must be not `None`."
get_logger().warning(
"The `nactions` parameter to `AbstractExpertSensor` is deprecated and will be removed, please use"
" the `action_space` parameter instead."
)
action_space = gym.spaces.Discrete(nactions)
self.action_space = action_space
self.use_groups = (
isinstance(action_space, gym.spaces.Dict) and use_dict_as_groups
)
self.group_spaces = (
self.action_space
if self.use_groups
else OrderedDict([(self._NO_GROUPS_LABEL, self.action_space,)])
)
self.expert_args: Dict[str, Any] = expert_args or {}
assert (
"expert_sensor_group_name" not in self.expert_args
), "`expert_sensor_group_name` is reserved for `AbstractExpertSensor`"
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
@classmethod
@abc.abstractmethod
def flagged_group_space(cls, group_space: gym.spaces.Space) -> gym.spaces.Dict:
"""gym space resulting from wrapping the given action space (or a
derived space, as in `AbstractExpertPolicySensor`) together with a
binary action space corresponding to an expert success flag, in a Dict
space.
# Parameters
group_space : The source action space to be (optionally used to derive a policy space,) flagged and wrapped
"""
raise NotImplementedError
@classmethod
def flagged_space(
cls, action_space: gym.spaces.Space, use_dict_as_groups: bool = True
) -> gym.spaces.Dict:
"""gym space resulting from wrapping the given action space (or every
highest-level entry in a Dict action space), together with binary
action space corresponding to an expert success flag, in a Dict space.
# Parameters
action_space : The agent's action space (to be flagged and wrapped)
use_dict_as_groups : Flag enabling every highest-level entry in a Dict action space to be independently flagged.
"""
use_groups = isinstance(action_space, gym.spaces.Dict) and use_dict_as_groups
if not use_groups:
return cls.flagged_group_space(action_space)
else:
return gym.spaces.Dict(
[
(group_space, cls.flagged_group_space(action_space[group_space]),)
for group_space in cast(gym.spaces.Dict, action_space)
]
)
def _get_observation_space(self) -> gym.spaces.Dict:
"""The observation space of the expert sensor.
For the most basic discrete agent's ExpertActionSensor, it will
equal `gym.spaces.Dict([ (self.ACTION_POLICY_LABEL,
self.action_space), (self.EXPERT_SUCCESS_LABEL,
gym.spaces.Discrete(2))])`, where the first entry hosts the
expert action index and the second equals 0 if and only if the
expert failed to generate a true expert action.
"""
return self.flagged_space(self.action_space, use_dict_as_groups=self.use_groups)
@lazy_property
def _zeroed_observation(self) -> Union[OrderedDict, Tuple]:
# AllenAct-style flattened space (to easily generate an all-zeroes action as an array)
flat_space = su.flatten_space(self.observation_space)
# torch point to correctly unflatten `Discrete` for zeroed output
flat_zeroed = su.torch_point(flat_space, np.zeros_like(flat_space.sample()))
# unflatten zeroed output and convert to numpy
return su.numpy_point(
self.observation_space, su.unflatten(self.observation_space, flat_zeroed)
)
def flatten_output(self, unflattened):
return (
su.flatten(
self.observation_space,
su.torch_point(self.observation_space, unflattened),
)
.cpu()
.numpy()
)
@abc.abstractmethod
def query_expert(
self, task: SubTaskType, expert_sensor_group_name: Optional[str],
) -> Tuple[Any, bool]:
"""Query the expert for the given task (and optional group name).
# Returns
A tuple (x, y) where x is the expert action or policy and y is False \
if the expert could not determine the optimal action (otherwise True). Here y \
is used for masking. Even when y is False, x should still lie in the space of \
possible values (e.g. if x is the expert policy then x should be the correct length, \
sum to 1, and have non-negative entries).
"""
raise NotImplementedError
def get_observation(
self, env: EnvType, task: SubTaskType, *args: Any, **kwargs: Any
) -> Union[OrderedDict, Tuple]:
# If the task is completed, we needn't (perhaps can't) find the expert
# action from the (current) terminal state.
if task.is_done():
return self.flatten_output(self._zeroed_observation)
actions_or_policies = OrderedDict()
for group_name in self.group_spaces:
action_or_policy, expert_was_successful = self.query_expert(
task=task, expert_sensor_group_name=group_name
)
actions_or_policies[group_name] = OrderedDict(
[
(self.ACTION_POLICY_LABEL, action_or_policy),
(self.EXPERT_SUCCESS_LABEL, expert_was_successful),
]
)
return self.flatten_output(
actions_or_policies
if self.use_groups
else actions_or_policies[self._NO_GROUPS_LABEL]
)
class AbstractExpertActionSensor(AbstractExpertSensor, abc.ABC):
def __init__(
self,
action_space: Optional[Union[gym.Space, int]] = None,
uuid: str = "expert_action",
expert_args: Optional[Dict[str, Any]] = None,
nactions: Optional[int] = None,
use_dict_as_groups: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**prepare_locals_for_super(locals()))
@classmethod
def flagged_group_space(cls, group_space: gym.spaces.Space) -> gym.spaces.Dict:
"""gym space resulting from wrapping the given action space, together
with a binary action space corresponding to an expert success flag, in
a Dict space.
# Parameters
group_space : The action space to be flagged and wrapped
"""
return gym.spaces.Dict(
[
(cls.ACTION_POLICY_LABEL, group_space),
(cls.EXPERT_SUCCESS_LABEL, gym.spaces.Discrete(2)),
]
)
class ExpertActionSensor(AbstractExpertActionSensor):
"""(Deprecated) A sensor that obtains the expert action from a given task
(if available)."""
def query_expert(
self, task: SubTaskType, expert_sensor_group_name: Optional[str]
) -> Tuple[Any, bool]:
return task.query_expert(
**self.expert_args, expert_sensor_group_name=expert_sensor_group_name
)
class AbstractExpertPolicySensor(AbstractExpertSensor, abc.ABC):
def __init__(
self,
action_space: Optional[Union[gym.Space, int]] = None,
uuid: str = "expert_policy",
expert_args: Optional[Dict[str, Any]] = None,
nactions: Optional[int] = None,
use_dict_as_groups: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**prepare_locals_for_super(locals()))
@classmethod
def flagged_group_space(cls, group_space: gym.spaces.Space) -> gym.spaces.Dict:
"""gym space resulting from wrapping the policy space corresponding to
`allenact.utils.spaces_utils.policy_space(group_space)` together with a
binary action space corresponding to an expert success flag, in a Dict
space.
# Parameters
group_space : The source action space to be used to derive a policy space, flagged and wrapped
"""
return gym.spaces.Dict(
[
(cls.ACTION_POLICY_LABEL, su.policy_space(group_space)),
(cls.EXPERT_SUCCESS_LABEL, gym.spaces.Discrete(2)),
]
)
class ExpertPolicySensor(AbstractExpertPolicySensor):
"""(Deprecated) A sensor that obtains the expert policy from a given task
(if available)."""
def query_expert(
self, task: SubTaskType, expert_sensor_group_name: Optional[str]
) -> Tuple[Any, bool]:
return task.query_expert(
**self.expert_args, expert_sensor_group_name=expert_sensor_group_name
)
| allenact-main | allenact/base_abstractions/sensor.py |
allenact-main | allenact/base_abstractions/__init__.py |
|
import abc
from typing import List, Any, Dict
from typing import Sequence
from typing import Union
import gym
import networkx as nx
import torch
from gym.spaces import Dict as SpaceDict
from allenact.utils.experiment_utils import Builder
class Preprocessor(abc.ABC):
"""Represents a preprocessor that transforms data from a sensor or another
preprocessor to the input of agents or other preprocessors. The user of
this class needs to implement the process method and the user is also
required to set the below attributes:
# Attributes:
input_uuids : List of input universally unique ids.
uuid : Universally unique id.
observation_space : ``gym.Space`` object corresponding to processed observation spaces.
"""
input_uuids: List[str]
uuid: str
observation_space: gym.Space
def __init__(
self,
input_uuids: List[str],
output_uuid: str,
observation_space: gym.Space,
**kwargs: Any
) -> None:
self.uuid = output_uuid
self.input_uuids = input_uuids
self.observation_space = observation_space
@abc.abstractmethod
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
"""Returns processed observations from sensors or other preprocessors.
# Parameters
obs : Dict with available observations and processed observations.
# Returns
Processed observation.
"""
raise NotImplementedError()
@abc.abstractmethod
def to(self, device: torch.device) -> "Preprocessor":
raise NotImplementedError()
class SensorPreprocessorGraph:
"""Represents a graph of preprocessors, with each preprocessor being
identified through a universally unique id.
Allows for the construction of observations that are a function of
sensor readings. For instance, perhaps rather than giving your agent
a raw RGB image, you'd rather first pass that image through a pre-trained
convolutional network and only give your agent the resulting features
(see e.g. the `ResNetPreprocessor` class).
# Attributes
preprocessors : List containing preprocessors with required input uuids, output uuid of each
sensor must be unique.
observation_spaces: The observation spaces of the values returned when calling `get_observations`.
By default (see the `additionally_exposed_uuids` parameter to to change this default) the observations
returned by the `SensorPreprocessorGraph` **include only the sink nodes** of the graph (i.e.
those that are not used by any other preprocessor).
Thus if one of the input preprocessors takes as input the `'YOUR_SENSOR_UUID'` sensor, then
`'YOUR_SENSOR_UUID'` will not be returned when calling `get_observations`.
device: The `torch.device` upon which the preprocessors are run.
"""
preprocessors: Dict[str, Preprocessor]
observation_spaces: SpaceDict
device: torch.device
def __init__(
self,
source_observation_spaces: SpaceDict,
preprocessors: Sequence[Union[Preprocessor, Builder[Preprocessor]]],
additional_output_uuids: Sequence[str] = tuple(),
) -> None:
"""Initializer.
# Parameters
source_observation_spaces : The observation spaces of all sensors before preprocessing.
This generally should be the output of `SensorSuite.observation_spaces`.
preprocessors : The preprocessors that will be included in the graph.
additional_output_uuids: As described in the documentation for this class, the observations
returned when calling `get_observations` only include, by default, those observations
that are not processed by any preprocessor. If you'd like to include observations that
would otherwise not be included, the uuids of these sensors should be included as
a sequence of strings here.
"""
self.device: torch.device = torch.device("cpu")
obs_spaces: Dict[str, gym.Space] = {
k: source_observation_spaces[k] for k in source_observation_spaces
}
self.preprocessors: Dict[str, Preprocessor] = {}
for preprocessor in preprocessors:
if isinstance(preprocessor, Builder):
preprocessor = preprocessor()
assert (
preprocessor.uuid not in self.preprocessors
), "'{}' is duplicated preprocessor uuid".format(preprocessor.uuid)
self.preprocessors[preprocessor.uuid] = preprocessor
obs_spaces[preprocessor.uuid] = preprocessor.observation_space
g = nx.DiGraph()
for k in obs_spaces:
g.add_node(k)
for k in self.preprocessors:
for j in self.preprocessors[k].input_uuids:
g.add_edge(j, k)
assert nx.is_directed_acyclic_graph(
g
), "preprocessors do not form a direct acyclic graph"
# noinspection PyCallingNonCallable
self.observation_spaces = SpaceDict(
spaces={
uuid: obs_spaces[uuid]
for uuid in obs_spaces
if uuid in additional_output_uuids or g.out_degree(uuid) == 0
}
)
# ensure dependencies are precomputed
self.compute_order = [n for n in nx.dfs_preorder_nodes(g)]
def get(self, uuid: str) -> Preprocessor:
"""Return preprocessor with the given `uuid`.
# Parameters
uuid : The unique id of the preprocessor.
# Returns
The preprocessor with unique id `uuid`.
"""
return self.preprocessors[uuid]
def to(self, device: torch.device) -> "SensorPreprocessorGraph":
for k, v in self.preprocessors.items():
self.preprocessors[k] = v.to(device)
self.device = device
return self
def get_observations(
self, obs: Dict[str, Any], *args: Any, **kwargs: Any
) -> Dict[str, Any]:
"""Get processed observations.
# Returns
Collect observations processed from all sensors and return them packaged inside a Dict.
"""
for uuid in self.compute_order:
if uuid not in obs:
obs[uuid] = self.preprocessors[uuid].process(obs)
return {uuid: obs[uuid] for uuid in self.observation_spaces}
class PreprocessorGraph(SensorPreprocessorGraph):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise DeprecationWarning(
"`PreprocessorGraph` has been deprecated, use `SensorPreprocessorGraph` instead."
)
class ObservationSet:
def __init__(self, *args, **kwargs) -> None:
raise DeprecationWarning(
"`ObservationSet` has been deprecated. Use `SensorPreprocessorGraph` instead."
)
| allenact-main | allenact/base_abstractions/preprocessor.py |
import abc
from collections import OrderedDict
from typing import Any, Union, Callable, TypeVar, Dict, Optional, cast, Protocol
import gym
import torch
import torch.nn as nn
from torch.distributions.utils import lazy_property
from allenact.algorithms.onpolicy_sync.misc import TrackingInfoType
from allenact.base_abstractions.sensor import AbstractExpertActionSensor as Expert
from allenact.utils import spaces_utils as su
from allenact.utils.misc_utils import all_unique
TeacherForcingAnnealingType = TypeVar("TeacherForcingAnnealingType")
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
class Distr(abc.ABC):
@abc.abstractmethod
def log_prob(self, actions: Any):
"""Return the log probability/ies of the provided action/s."""
raise NotImplementedError()
@abc.abstractmethod
def entropy(self):
"""Return the entropy or entropies."""
raise NotImplementedError()
@abc.abstractmethod
def sample(self, sample_shape=torch.Size()):
"""Sample actions."""
raise NotImplementedError()
def mode(self):
"""If available, return the action(s) with highest probability.
It will only be called if using deterministic agents.
"""
raise NotImplementedError()
class CategoricalDistr(torch.distributions.Categorical, Distr):
"""A categorical distribution extending PyTorch's Categorical.
probs or logits are assumed to be passed with step and sampler
dimensions as in: [step, samplers, ...]
"""
def mode(self):
return self._param.argmax(dim=-1, keepdim=False) # match sample()'s shape
def log_prob(self, value: torch.Tensor):
if value.shape == self.logits.shape[:-1]:
return super(CategoricalDistr, self).log_prob(value=value)
elif value.shape == self.logits.shape[:-1] + (1,):
return (
super(CategoricalDistr, self)
.log_prob(value=value.squeeze(-1))
.unsqueeze(-1)
)
else:
raise NotImplementedError(
"Broadcasting in categorical distribution is disabled as it often leads"
f" to unexpected results. We have that `value.shape == {value.shape}` but"
f" expected a shape of "
f" `self.logits.shape[:-1] == {self.logits.shape[:-1]}` or"
f" `self.logits.shape[:-1] + (1,) == {self.logits.shape[:-1] + (1,)}`"
)
@lazy_property
def log_probs_tensor(self):
return torch.log_softmax(self.logits, dim=-1)
@lazy_property
def probs_tensor(self):
return torch.softmax(self.logits, dim=-1)
class ConditionalDistr(Distr):
"""Action distribution conditional which is conditioned on other
information (i.e. part of a hierarchical distribution)
# Attributes
action_group_name : the identifier of the group of actions (`OrderedDict`) produced by this `ConditionalDistr`
"""
action_group_name: str
def __init__(
self,
distr_conditioned_on_input_fn_or_instance: Union[Callable, Distr],
action_group_name: str,
*distr_conditioned_on_input_args,
**distr_conditioned_on_input_kwargs,
):
"""Initialize an ConditionalDistr.
# Parameters
distr_conditioned_on_input_fn_or_instance : Callable to generate `ConditionalDistr` given sampled actions,
or given `Distr`.
action_group_name : the identifier of the group of actions (`OrderedDict`) produced by this `ConditionalDistr`
distr_conditioned_on_input_args : positional arguments for Callable `distr_conditioned_on_input_fn_or_instance`
distr_conditioned_on_input_kwargs : keyword arguments for Callable `distr_conditioned_on_input_fn_or_instance`
"""
self.distr: Optional[Distr] = None
self.distr_conditioned_on_input_fn: Optional[Callable] = None
self.distr_conditioned_on_input_args = distr_conditioned_on_input_args
self.distr_conditioned_on_input_kwargs = distr_conditioned_on_input_kwargs
if isinstance(distr_conditioned_on_input_fn_or_instance, Distr):
self.distr = distr_conditioned_on_input_fn_or_instance
else:
self.distr_conditioned_on_input_fn = (
distr_conditioned_on_input_fn_or_instance
)
self.action_group_name = action_group_name
def log_prob(self, actions):
return self.distr.log_prob(actions)
def entropy(self):
return self.distr.entropy()
def condition_on_input(self, **ready_actions):
if self.distr is None:
assert all(
key not in self.distr_conditioned_on_input_kwargs
for key in ready_actions
)
self.distr = self.distr_conditioned_on_input_fn(
*self.distr_conditioned_on_input_args,
**self.distr_conditioned_on_input_kwargs,
**ready_actions,
)
def reset(self):
if (self.distr is not None) and (
self.distr_conditioned_on_input_fn is not None
):
self.distr = None
def sample(self, sample_shape=torch.Size()) -> OrderedDict:
return OrderedDict([(self.action_group_name, self.distr.sample(sample_shape))])
def mode(self) -> OrderedDict:
return OrderedDict([(self.action_group_name, self.distr.mode())])
class SequentialDistr(Distr):
def __init__(self, *conditional_distrs: ConditionalDistr):
action_group_names = [cd.action_group_name for cd in conditional_distrs]
assert all_unique(
action_group_names
), f"All conditional distribution `action_group_name`, must be unique, given names {action_group_names}"
self.conditional_distrs = conditional_distrs
def sample(self, sample_shape=torch.Size()):
actions = OrderedDict()
for cd in self.conditional_distrs:
cd.condition_on_input(**actions)
actions.update(cd.sample(sample_shape=sample_shape))
return actions
def mode(self):
actions = OrderedDict()
for cd in self.conditional_distrs:
cd.condition_on_input(**actions)
actions.update(cd.mode())
return actions
def conditional_entropy(self):
total = 0
for cd in self.conditional_distrs:
total = total + cd.entropy()
return total
def entropy(self):
raise NotImplementedError(
"Please use 'conditional_entropy' instead of 'entropy' as the `entropy_method_name` "
"parameter in your loss when using `SequentialDistr`."
)
def log_prob(
self, actions: Dict[str, Any], return_dict: bool = False
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
assert len(actions) == len(
self.conditional_distrs
), f"{len(self.conditional_distrs)} conditional distributions for {len(actions)} action groups"
res: Union[int, torch.Tensor, Dict[str, torch.Tensor]] = (
0 if not return_dict else OrderedDict()
)
for cd in self.conditional_distrs:
cd.condition_on_input(**actions)
current_log_prob = cd.log_prob(actions[cd.action_group_name])
if not return_dict:
res = res + current_log_prob
else:
res[cd.action_group_name] = current_log_prob
return res
class TrackingCallback(Protocol):
def __call__(self, type: TrackingInfoType, info: Dict[str, Any], n: int):
...
class TeacherForcingDistr(Distr):
def __init__(
self,
distr: Distr,
obs: Dict[str, Any],
action_space: gym.spaces.Space,
num_active_samplers: Optional[int],
approx_steps: Optional[int],
teacher_forcing: Optional[TeacherForcingAnnealingType],
tracking_callback: Optional[TrackingCallback],
always_enforce: bool = False,
):
self.distr = distr
self.is_sequential = isinstance(self.distr, SequentialDistr)
# action_space is a gym.spaces.Dict for SequentialDistr, or any gym.Space for other Distr
self.action_space = action_space
self.num_active_samplers = num_active_samplers
self.approx_steps = approx_steps
self.teacher_forcing = teacher_forcing
self.tracking_callback = tracking_callback
self.always_enforce = always_enforce
assert (
"expert_action" in obs
), "When using teacher forcing, obs must contain an `expert_action` uuid"
obs_space = Expert.flagged_space(
self.action_space, use_dict_as_groups=self.is_sequential
)
self.expert = su.unflatten(obs_space, obs["expert_action"])
def enforce(
self,
sample: Any,
action_space: gym.spaces.Space,
teacher: OrderedDict,
teacher_force_info: Optional[Dict[str, Any]],
action_name: Optional[str] = None,
):
actions = su.flatten(action_space, sample)
assert (
len(actions.shape) == 3
), f"Got flattened actions with shape {actions.shape} (it should be [1 x `samplers` x `flatdims`])"
if self.num_active_samplers is not None:
assert actions.shape[1] == self.num_active_samplers
expert_actions = su.flatten(action_space, teacher[Expert.ACTION_POLICY_LABEL])
assert (
expert_actions.shape == actions.shape
), f"expert actions shape {expert_actions.shape} doesn't match the model's {actions.shape}"
# expert_success is 0 if the expert action could not be computed and otherwise equals 1.
expert_action_exists_mask = teacher[Expert.EXPERT_SUCCESS_LABEL]
if not self.always_enforce:
teacher_forcing_mask = (
torch.distributions.bernoulli.Bernoulli(
torch.tensor(self.teacher_forcing(self.approx_steps))
)
.sample(expert_action_exists_mask.shape)
.long()
.to(actions.device)
) * expert_action_exists_mask
else:
teacher_forcing_mask = expert_action_exists_mask
if teacher_force_info is not None:
teacher_force_info[
"teacher_ratio/sampled{}".format(
f"_{action_name}" if action_name is not None else ""
)
] = (teacher_forcing_mask.float().mean().item())
extended_shape = teacher_forcing_mask.shape + (1,) * (
len(actions.shape) - len(teacher_forcing_mask.shape)
)
actions = torch.where(
teacher_forcing_mask.byte().view(extended_shape), expert_actions, actions
)
return su.unflatten(action_space, actions)
def log_prob(self, actions: Any):
return self.distr.log_prob(actions)
def entropy(self):
return self.distr.entropy()
def conditional_entropy(self):
if hasattr(self.distr, "conditional_entropy"):
return self.distr.conditional_entropy()
raise NotImplementedError(
f"`conditional_entropy` is not defined for {self.distr}."
)
def sample(self, sample_shape=torch.Size()):
teacher_force_info: Optional[Dict[str, Any]] = None
if self.approx_steps is not None:
teacher_force_info = {
"teacher_ratio/enforced": self.teacher_forcing(self.approx_steps),
}
if self.is_sequential:
res = OrderedDict()
for cd in cast(SequentialDistr, self.distr).conditional_distrs:
cd.condition_on_input(**res)
action_group_name = cd.action_group_name
res[action_group_name] = self.enforce(
cd.sample(sample_shape)[action_group_name],
cast(gym.spaces.Dict, self.action_space)[action_group_name],
self.expert[action_group_name],
teacher_force_info,
action_group_name,
)
else:
res = self.enforce(
self.distr.sample(sample_shape),
self.action_space,
self.expert,
teacher_force_info,
)
if self.tracking_callback is not None and self.num_active_samplers is not None:
self.tracking_callback(
type=TrackingInfoType.TEACHER_FORCING,
info=teacher_force_info,
n=self.num_active_samplers,
)
return res
class AddBias(nn.Module):
"""Adding bias parameters to input values."""
def __init__(self, bias: torch.FloatTensor):
"""Initializer.
# Parameters
bias : data to use as the initial values of the bias.
"""
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1), requires_grad=True)
def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: # type: ignore
"""Adds the stored bias parameters to `x`."""
assert x.dim() in [2, 4]
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias # type:ignore
| allenact-main | allenact/base_abstractions/distributions.py |
import abc
from typing import List, Dict, Any, Sequence, Optional
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.sensor import Sensor
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
class Callback(abc.ABC):
def setup(
self,
name: str,
config: ExperimentConfig,
mode: Literal["train", "valid", "test"],
**kwargs,
) -> None:
"""Called once before training begins."""
def on_train_log(
self,
*,
metrics: List[Dict[str, Any]],
metric_means: Dict[str, float],
tasks_data: List[Any],
step: int,
scalar_name_to_total_experiences_key: Dict[str, str],
**kwargs,
) -> None:
"""Called once train is supposed to log."""
def on_valid_log(
self,
*,
metrics: Dict[str, Any],
metric_means: Dict[str, float],
tasks_data: List[Any],
step: int,
scalar_name_to_total_experiences_key: Dict[str, str],
checkpoint_file_name: str,
**kwargs,
) -> None:
"""Called after validation ends."""
def on_test_log(
self,
*,
metrics: Dict[str, Any],
metric_means: Dict[str, float],
tasks_data: List[Any],
step: int,
scalar_name_to_total_experiences_key: Dict[str, str],
checkpoint_file_name: str,
**kwargs,
) -> None:
"""Called after test ends."""
def after_save_project_state(self, base_dir: str) -> None:
"""Called after saving the project state in base_dir."""
def callback_sensors(self) -> Optional[Sequence[Sensor]]:
"""Determines the data returned to the `tasks_data` parameter in the
above *_log functions."""
| allenact-main | allenact/base_abstractions/callbacks.py |
allenact-main | allenact/algorithms/__init__.py |
|
from enum import Enum
from typing import Dict, Any, Optional
import attr
class TrackingInfoType(Enum):
LOSS = "loss"
TEACHER_FORCING = "teacher_forcing"
UPDATE_INFO = "update_info"
@attr.s(kw_only=True)
class TrackingInfo:
type: TrackingInfoType = attr.ib()
info: Dict[str, Any] = attr.ib()
n: int = attr.ib()
storage_uuid: Optional[str] = attr.ib()
stage_component_uuid: Optional[str] = attr.ib()
| allenact-main | allenact/algorithms/onpolicy_sync/misc.py |
"""Defines the reinforcement learning `OnPolicyRunner`."""
import copy
import enum
import glob
import importlib.util
import inspect
import itertools
import json
import math
import os
import pathlib
import queue
import random
import signal
import subprocess
import sys
import time
import traceback
from collections import defaultdict
from multiprocessing.context import BaseContext
from multiprocessing.process import BaseProcess
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, Set
import filelock
import numpy as np
import torch
import torch.multiprocessing as mp
from setproctitle import setproctitle as ptitle
from torch.distributions.utils import lazy_property
from allenact.algorithms.onpolicy_sync.engine import (
TEST_MODE_STR,
TRAIN_MODE_STR,
VALID_MODE_STR,
OnPolicyInference,
OnPolicyRLEngine,
OnPolicyTrainer,
)
from allenact.base_abstractions.callbacks import Callback
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import (
LoggingPackage,
ScalarMeanTracker,
set_deterministic_cudnn,
set_seed,
)
from allenact.utils.misc_utils import (
NumpyJSONEncoder,
all_equal,
get_git_diff_of_project,
)
from allenact.utils.model_utils import md5_hash_of_state_dict
from allenact.utils.system import find_free_port, get_logger
from allenact.utils.tensor_utils import SummaryWriter
from allenact.utils.viz_utils import VizSuite
CONFIG_KWARGS_STR = "__CONFIG_KWARGS__"
class SaveDirFormat(enum.Enum):
"""Directory formats that can be used when saving tensorboard logs,
checkpoints, etc.
during training/evaluation.
FLAT: the first-level directories are logs, checkpoints, metrics, etc; the second-level are time strings of each experiment
NESTED: the opposite to FLAT.
"""
FLAT = "FLAT"
NESTED = "NESTED"
# Has results queue (aggregated per trainer), checkpoints queue and mp context
# Instantiates train, validate, and test workers
# Logging
# Saves configs, makes folder for trainer models
class OnPolicyRunner(object):
def __init__(
self,
config: ExperimentConfig,
output_dir: str,
loaded_config_src_files: Optional[Dict[str, str]],
seed: Optional[int] = None,
mode: str = "train",
deterministic_cudnn: bool = False,
deterministic_agents: bool = False,
mp_ctx: Optional[BaseContext] = None,
multiprocessing_start_method: str = "default",
extra_tag: str = "",
disable_tensorboard: bool = False,
disable_config_saving: bool = False,
distributed_ip_and_port: str = "127.0.0.1:0",
distributed_preemption_threshold: float = 0.7,
machine_id: int = 0,
save_dir_fmt: SaveDirFormat = SaveDirFormat.FLAT,
callbacks_paths: Optional[str] = None,
):
self.config = config
self.output_dir = output_dir
self.loaded_config_src_files = loaded_config_src_files
self.seed = seed if seed is not None else random.randint(0, 2**31 - 1)
self.deterministic_cudnn = deterministic_cudnn
self.distributed_preemption_threshold = distributed_preemption_threshold
if multiprocessing_start_method == "default":
if torch.cuda.is_available():
multiprocessing_start_method = "forkserver"
else:
# Spawn seems to play nicer with cpus and debugging
multiprocessing_start_method = "spawn"
self.mp_ctx = self.init_context(mp_ctx, multiprocessing_start_method)
self.extra_tag = extra_tag
self.mode = mode.lower().strip()
self.visualizer: Optional[VizSuite] = None
self.deterministic_agents = deterministic_agents
self.disable_tensorboard = disable_tensorboard
self.disable_config_saving = disable_config_saving
assert self.mode in [
TRAIN_MODE_STR,
TEST_MODE_STR,
], "Only 'train' and 'test' modes supported in runner"
if self.deterministic_cudnn:
set_deterministic_cudnn()
set_seed(self.seed)
self.queues: Optional[Dict[str, mp.Queue]] = None
self.processes: Dict[str, List[Union[BaseProcess, mp.Process]]] = defaultdict(
list
)
self.current_checkpoint = None
self._local_start_time_str: Optional[str] = None
self._is_closed: bool = False
self._collect_valid_results: bool = False
self.distributed_ip_and_port = distributed_ip_and_port
self.machine_id = machine_id
self.save_dir_fmt = save_dir_fmt
self.callbacks_paths = callbacks_paths
@lazy_property
def callbacks(self):
return self.setup_callback_classes(self.callbacks_paths)
@property
def local_start_time_str(self) -> str:
if self._local_start_time_str is None:
raise RuntimeError(
"Local start time string does not exist as neither `start_train()` or `start_test()`"
" has been called on this runner."
)
return self._local_start_time_str
@property
def running_validation(self):
pipeline = self.config.training_pipeline()
return (
sum(
MachineParams.instance_from(
self.config.machine_params(VALID_MODE_STR)
).nprocesses
)
> 0
or (
pipeline.rollout_storage_uuid is None
and len(pipeline.valid_pipeline_stage.loss_names) > 0
)
) and self.machine_id == 0
@staticmethod
def init_context(
mp_ctx: Optional[BaseContext] = None,
multiprocessing_start_method: str = "forkserver",
valid_start_methods: Tuple[str, ...] = ("forkserver", "spawn", "fork"),
):
if mp_ctx is None:
assert multiprocessing_start_method in valid_start_methods, (
f"multiprocessing_start_method must be one of {valid_start_methods}."
f" Got '{multiprocessing_start_method}'"
)
mp_ctx = mp.get_context(multiprocessing_start_method)
elif multiprocessing_start_method != mp_ctx.get_start_method():
get_logger().warning(
f"ignoring multiprocessing_start_method '{multiprocessing_start_method}'"
f" and using given context with '{mp_ctx.get_start_method()}'"
)
return mp_ctx
def setup_callback_classes(self, callbacks: Optional[str]) -> Set[Callback]:
"""Get a list of Callback classes from a comma-separated list of files,
paths, and/or functions.
After separating the `callbacks` into a list of strings, each string should either
be a:
1. Name of a function defined on the experiment config that, when called, returns an
object with of type `Callback`.
2. Path to a python file containing a single class that inherits from `Callback`.
3. Module path (e.g. `path.to.module`) where this module contains a single class that
inherits from `Callback`.
"""
if callbacks == "" or callbacks is None:
return set()
setup_dict = dict(
name=f"{self.experiment_name}/{self.local_start_time_str}",
config=self.config,
mode=self.mode,
)
callback_objects = set()
files = callbacks.split(",")
for filename in files:
# Check if the `filename` is a function on the config
if not any(k in filename for k in [".", "/"]):
callback_func = getattr(self.config, filename, None)
if callback_func is not None:
callback = callback_func()
callback.setup(**setup_dict)
callback_objects.add(callback)
continue
# Otherwise find the Callback class in the file or module
module_path = filename.replace("/", ".")
if module_path.endswith(".py"):
module_path = module_path[:-3]
module = importlib.import_module(module_path)
classes = inspect.getmembers(module, inspect.isclass)
callback_classes = [
mod_class[1]
for mod_class in classes
if issubclass(mod_class[1], Callback)
]
assert callback_classes == 1, (
f"Expected a single callback class in {filename}, but found {len(callback_classes)}."
f" These classes were found: {callback_classes}."
)
for mod_class in callback_classes:
# NOTE: initialize the callback class
callback = mod_class[1]()
callback.setup(**setup_dict)
callback_objects.add(callback)
return callback_objects
def _acquire_unique_local_start_time_string(self) -> str:
"""Creates a (unique) local start time string for this experiment.
Ensures through file locks that the local start time string
produced is unique. This implies that, if one has many
experiments starting in parallel, at most one will be started
every second (as the local start time string only records the
time up to the current second).
"""
os.makedirs(self.output_dir, exist_ok=True)
start_time_string_lock_path = os.path.abspath(
os.path.join(self.output_dir, ".allenact_start_time_string.lock")
)
try:
with filelock.FileLock(start_time_string_lock_path, timeout=60):
last_start_time_string_path = os.path.join(
self.output_dir, ".allenact_last_start_time_string"
)
pathlib.Path(last_start_time_string_path).touch()
with open(last_start_time_string_path, "r") as f:
last_start_time_string_list = f.readlines()
while True:
candidate_str = time.strftime(
"%Y-%m-%d_%H-%M-%S", time.localtime(time.time())
)
if (
len(last_start_time_string_list) == 0
or last_start_time_string_list[0].strip() != candidate_str
):
break
time.sleep(0.2)
with open(last_start_time_string_path, "w") as f:
f.write(candidate_str)
except filelock.Timeout as e:
get_logger().exception(
f"Could not acquire the lock for {start_time_string_lock_path} for 60 seconds,"
" this suggests an unexpected deadlock. Please close all AllenAct training processes,"
" delete this lockfile, and try again."
)
raise e
assert candidate_str is not None
return candidate_str
def worker_devices(self, mode: str):
machine_params: MachineParams = MachineParams.instance_from(
self.config.machine_params(mode)
)
devices = machine_params.devices
assert all_equal(devices) or all(
d.index >= 0 for d in devices
), f"Cannot have a mix of CPU and GPU devices (`devices == {devices}`)"
get_logger().info(f"Using {len(devices)} {mode} workers on devices {devices}")
return devices
def local_worker_ids(self, mode: str):
machine_params: MachineParams = MachineParams.instance_from(
self.config.machine_params(mode, machine_id=self.machine_id)
)
ids = machine_params.local_worker_ids
get_logger().info(
f"Using local worker ids {ids} (total {len(ids)} workers in machine {self.machine_id})"
)
return ids
def init_visualizer(self, mode: str):
if not self.disable_tensorboard:
# Note: Avoid instantiating anything in machine_params (use Builder if needed)
machine_params = MachineParams.instance_from(
self.config.machine_params(mode)
)
self.visualizer = machine_params.visualizer
@staticmethod
def init_process(mode: str, id: int, to_close_on_termination: OnPolicyRLEngine):
ptitle(f"{mode}-{id}")
def create_handler(termination_type: str):
def handler(_signo, _frame):
prefix = f"{termination_type} signal sent to worker {mode}-{id}."
if to_close_on_termination.is_closed:
get_logger().info(
f"{prefix} Worker {mode}-{id} is already closed, exiting."
)
sys.exit(0)
elif not to_close_on_termination.is_closing:
get_logger().info(
f"{prefix} Forcing worker {mode}-{id} to close and exiting."
)
# noinspection PyBroadException
try:
to_close_on_termination.close(True)
except Exception:
get_logger().error(
f"Error occurred when closing the RL engine used by work {mode}-{id}."
f" We cannot recover from this and will simply exit. The exception:\n"
f"{traceback.format_exc()}"
)
sys.exit(1)
sys.exit(0)
else:
get_logger().info(
f"{prefix} Worker {mode}-{id} is already closing, ignoring this signal."
)
return handler
signal.signal(signal.SIGTERM, create_handler("Termination"))
signal.signal(signal.SIGINT, create_handler("Interrupt"))
@staticmethod
def init_worker(engine_class, args, kwargs):
mode = kwargs["mode"]
id = kwargs["worker_id"]
worker = None
try:
worker = engine_class(*args, **kwargs)
except Exception:
get_logger().error(f"Encountered Exception. Terminating {mode} worker {id}")
get_logger().exception(traceback.format_exc())
kwargs["results_queue"].put((f"{mode}_stopped", 1 + id))
finally:
return worker
@lazy_property
def _get_callback_sensors(self) -> List[Sensor]:
callback_sensors: List[Sensor] = []
for c in self.callbacks:
sensors = c.callback_sensors()
if sensors is not None:
callback_sensors.extend(sensors)
return callback_sensors
@staticmethod
def train_loop(
id: int = 0,
checkpoint: Optional[str] = None,
restart_pipeline: bool = False,
valid_on_initial_weights: bool = False,
*engine_args,
**engine_kwargs,
):
engine_kwargs["mode"] = TRAIN_MODE_STR
engine_kwargs["worker_id"] = id
engine_kwargs_for_print = {
k: (v if k != "initial_model_state_dict" else "[SUPPRESSED]")
for k, v in engine_kwargs.items()
}
get_logger().info(f"train {id} args {engine_kwargs_for_print}")
trainer: OnPolicyTrainer = OnPolicyRunner.init_worker(
engine_class=OnPolicyTrainer, args=engine_args, kwargs=engine_kwargs
)
if trainer is not None:
OnPolicyRunner.init_process("Train", id, to_close_on_termination=trainer)
trainer.train(
checkpoint_file_name=checkpoint,
restart_pipeline=restart_pipeline,
valid_on_initial_weights=valid_on_initial_weights,
)
@staticmethod
def valid_loop(id: int = 0, *engine_args, **engine_kwargs):
engine_kwargs["mode"] = VALID_MODE_STR
engine_kwargs["worker_id"] = id
get_logger().info(f"valid {id} args {engine_kwargs}")
valid = OnPolicyRunner.init_worker(
engine_class=OnPolicyInference, args=engine_args, kwargs=engine_kwargs
)
if valid is not None:
OnPolicyRunner.init_process("Valid", id, to_close_on_termination=valid)
valid.process_checkpoints() # gets checkpoints via queue
@staticmethod
def test_loop(id: int = 0, *engine_args, **engine_kwargs):
engine_kwargs["mode"] = TEST_MODE_STR
engine_kwargs["worker_id"] = id
get_logger().info(f"test {id} args {engine_kwargs}")
test = OnPolicyRunner.init_worker(OnPolicyInference, engine_args, engine_kwargs)
if test is not None:
OnPolicyRunner.init_process("Test", id, to_close_on_termination=test)
test.process_checkpoints() # gets checkpoints via queue
def _initialize_start_train_or_start_test(self):
self._is_closed = False
if self.queues is not None:
for k, q in self.queues.items():
try:
out = q.get(timeout=1)
raise RuntimeError(
f"{k} queue was not empty before starting new training/testing (contained {out})."
f" This should not happen, please report how you obtained this error"
f" by creating an issue at https://github.com/allenai/allenact/issues."
)
except queue.Empty:
pass
self.queues = {
"results": self.mp_ctx.Queue(),
"checkpoints": self.mp_ctx.Queue(),
}
self._local_start_time_str = self._acquire_unique_local_start_time_string()
def get_port(self):
passed_port = int(self.distributed_ip_and_port.split(":")[1])
if passed_port == 0:
assert (
self.machine_id == 0
), "Only runner with `machine_id` == 0 can search for a free port."
distributed_port = find_free_port(
self.distributed_ip_and_port.split(":")[0]
)
else:
distributed_port = passed_port
get_logger().info(
f"Engines on machine_id == {self.machine_id} using port {distributed_port} and seed {self.seed}"
)
return distributed_port
def start_train(
self,
checkpoint: Optional[str] = None,
restart_pipeline: bool = False,
max_sampler_processes_per_worker: Optional[int] = None,
save_ckpt_after_every_pipeline_stage: bool = True,
collect_valid_results: bool = False,
valid_on_initial_weights: bool = False,
try_restart_after_task_error: bool = False,
):
self._initialize_start_train_or_start_test()
self._collect_valid_results = collect_valid_results
if not self.disable_config_saving:
self.save_project_state()
devices = self.worker_devices(TRAIN_MODE_STR)
num_workers = len(devices)
# Be extra careful to ensure that all models start
# with the same initializations.
set_seed(self.seed)
initial_model_state_dict = self.config.create_model(
sensor_preprocessor_graph=MachineParams.instance_from(
self.config.machine_params(self.mode)
).sensor_preprocessor_graph
).state_dict()
distributed_port = 0 if num_workers == 1 else self.get_port()
if (
num_workers > 1
and "NCCL_ASYNC_ERROR_HANDLING" not in os.environ
and "NCCL_BLOCKING_WAIT" not in os.environ
):
# This ensures the NCCL distributed backend will throw errors
# if we timeout at a call to `barrier()`
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
worker_ids = self.local_worker_ids(TRAIN_MODE_STR)
model_hash = None
for trainer_id in worker_ids:
training_kwargs = dict(
id=trainer_id,
checkpoint=checkpoint,
restart_pipeline=restart_pipeline,
experiment_name=self.experiment_name,
config=self.config,
callback_sensors=self._get_callback_sensors,
results_queue=self.queues["results"],
checkpoints_queue=self.queues["checkpoints"]
if self.running_validation
else None,
checkpoints_dir=self.checkpoint_dir(),
seed=self.seed,
deterministic_cudnn=self.deterministic_cudnn,
mp_ctx=self.mp_ctx,
num_workers=num_workers,
device=devices[trainer_id],
distributed_ip=self.distributed_ip_and_port.split(":")[0],
distributed_port=distributed_port,
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
save_ckpt_after_every_pipeline_stage=save_ckpt_after_every_pipeline_stage,
initial_model_state_dict=initial_model_state_dict
if model_hash is None
else model_hash,
first_local_worker_id=worker_ids[0],
distributed_preemption_threshold=self.distributed_preemption_threshold,
valid_on_initial_weights=valid_on_initial_weights,
try_restart_after_task_error=try_restart_after_task_error,
)
train: BaseProcess = self.mp_ctx.Process(
target=self.train_loop,
kwargs=training_kwargs,
)
try:
train.start()
except (ValueError, OSError, ConnectionRefusedError, EOFError) as e:
# If the `initial_model_state_dict` is too large we sometimes
# run into errors passing it with multiprocessing. In such cases
# we instead hash the state_dict and confirm, in each engine worker, that
# this hash equals the model the engine worker instantiates.
if (
(isinstance(e, ValueError) and e.args[0] == "too many fds")
or (isinstance(e, OSError) and e.errno == 22)
or (isinstance(e, ConnectionRefusedError) and e.errno == 111)
or isinstance(e, EOFError)
):
model_hash = md5_hash_of_state_dict(initial_model_state_dict)
training_kwargs["initial_model_state_dict"] = model_hash
train = self.mp_ctx.Process(
target=self.train_loop,
kwargs=training_kwargs,
)
train.start()
else:
raise e
self.processes[TRAIN_MODE_STR].append(train)
get_logger().info(
f"Started {len(self.processes[TRAIN_MODE_STR])} train processes"
)
# Validation
if self.running_validation:
device = self.worker_devices(VALID_MODE_STR)[0]
self.init_visualizer(VALID_MODE_STR)
valid: BaseProcess = self.mp_ctx.Process(
target=self.valid_loop,
args=(0,),
kwargs=dict(
config=self.config,
callback_sensors=self._get_callback_sensors,
results_queue=self.queues["results"],
checkpoints_queue=self.queues["checkpoints"],
seed=12345, # TODO allow same order for randomly sampled tasks? Is this any useful anyway?
deterministic_cudnn=self.deterministic_cudnn,
deterministic_agents=self.deterministic_agents,
mp_ctx=self.mp_ctx,
device=device,
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
),
)
valid.start()
self.processes[VALID_MODE_STR].append(valid)
get_logger().info(
f"Started {len(self.processes[VALID_MODE_STR])} valid processes"
)
else:
get_logger().info(
"No processes allocated to validation, no validation will be run."
)
metrics_file_template: Optional[str] = None
if self._collect_valid_results:
metrics_dir = self.metric_path(self.local_start_time_str)
os.makedirs(metrics_dir, exist_ok=True)
suffix = f"__valid_{self.local_start_time_str}"
metrics_file_template = os.path.join(
metrics_dir, "metrics" + suffix + "{:012d}.json"
) # template for training steps
get_logger().info(
f"Saving valid metrics with template {metrics_file_template}"
)
# Check output file can be written
with open(metrics_file_template.format(0), "w") as f:
json.dump([], f, indent=4, sort_keys=True, cls=NumpyJSONEncoder)
valid_results = self.log_and_close(
start_time_str=self.local_start_time_str,
nworkers=len(worker_ids), # TODO num_workers once we forward metrics,
metrics_file=metrics_file_template,
)
if not self._collect_valid_results:
return self.local_start_time_str
else:
return self.local_start_time_str, valid_results
def start_test(
self,
checkpoint_path_dir_or_pattern: str,
infer_output_dir: bool = False,
approx_ckpt_step_interval: Optional[Union[float, int]] = None,
max_sampler_processes_per_worker: Optional[int] = None,
inference_expert: bool = False,
) -> List[Dict]:
# Tester always runs on a single machine
assert (
self.machine_id == 0
), f"Received `machine_id={self.machine_id} for test. Only one machine supported."
assert isinstance(
checkpoint_path_dir_or_pattern, str
), "Must provide a --checkpoint path or pattern to test on."
self.extra_tag += (
"__" * (len(self.extra_tag) > 0) + "enforced_test_expert"
) * inference_expert
self._initialize_start_train_or_start_test()
devices = self.worker_devices(TEST_MODE_STR)
self.init_visualizer(TEST_MODE_STR)
num_testers = len(devices)
distributed_port = 0
if num_testers > 1:
distributed_port = find_free_port()
# Tester always runs on a single machine
for tester_it in range(num_testers):
test: BaseProcess = self.mp_ctx.Process(
target=self.test_loop,
args=(tester_it,),
kwargs=dict(
config=self.config,
callback_sensors=self._get_callback_sensors,
results_queue=self.queues["results"],
checkpoints_queue=self.queues["checkpoints"],
seed=12345, # TODO allow same order for randomly sampled tasks? Is this any useful anyway?
deterministic_cudnn=self.deterministic_cudnn,
deterministic_agents=self.deterministic_agents,
mp_ctx=self.mp_ctx,
num_workers=num_testers,
device=devices[tester_it],
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
distributed_port=distributed_port,
enforce_expert=inference_expert,
),
)
test.start()
self.processes[TEST_MODE_STR].append(test)
get_logger().info(
f"Started {len(self.processes[TEST_MODE_STR])} test processes"
)
checkpoint_paths = self.get_checkpoint_files(
checkpoint_path_dir_or_pattern=checkpoint_path_dir_or_pattern,
approx_ckpt_step_interval=approx_ckpt_step_interval,
)
steps = [self.step_from_checkpoint(cp) for cp in checkpoint_paths]
get_logger().info(f"Running test on {len(steps)} steps {steps}")
for checkpoint_path in checkpoint_paths:
# Make all testers work on each checkpoint
for tester_it in range(num_testers):
self.queues["checkpoints"].put(("eval", checkpoint_path))
# Signal all testers to terminate cleanly
for _ in range(num_testers):
self.queues["checkpoints"].put(("quit", None))
if self.save_dir_fmt == SaveDirFormat.NESTED:
if infer_output_dir: # NOTE: we change output_dir here
self.output_dir = self.checkpoint_log_folder_str(checkpoint_paths[0])
suffix = ""
elif self.save_dir_fmt == SaveDirFormat.FLAT:
suffix = f"__test_{self.local_start_time_str}"
else:
raise NotImplementedError
metrics_dir = self.metric_path(self.local_start_time_str)
os.makedirs(metrics_dir, exist_ok=True)
metrics_file_path = os.path.join(metrics_dir, "metrics" + suffix + ".json")
get_logger().info(f"Saving test metrics in {metrics_file_path}")
# Check output file can be written
with open(metrics_file_path, "w") as f:
json.dump([], f, indent=4, sort_keys=True, cls=NumpyJSONEncoder)
return self.log_and_close(
start_time_str=self.checkpoint_start_time_str(checkpoint_paths[0]),
nworkers=num_testers,
test_steps=steps,
metrics_file=metrics_file_path,
)
@staticmethod
def checkpoint_start_time_str(checkpoint_file_name):
parts = checkpoint_file_name.split(os.path.sep)
assert len(parts) > 1, f"{checkpoint_file_name} is not a valid checkpoint path"
start_time_str = parts[-2]
get_logger().info(f"Using checkpoint start time {start_time_str}")
return start_time_str
@staticmethod
def checkpoint_log_folder_str(checkpoint_file_name):
parts = checkpoint_file_name.split(os.path.sep)
assert len(parts) > 1, f"{checkpoint_file_name} is not a valid checkpoint path"
log_folder_str = os.path.sep.join(parts[:-2]) # remove checkpoints/*.pt
get_logger().info(f"Using log folder {log_folder_str}")
return log_folder_str
@property
def experiment_name(self):
if len(self.extra_tag) > 0:
return f"{self.config.tag()}_{self.extra_tag}"
return self.config.tag()
def checkpoint_dir(
self, start_time_str: Optional[str] = None, create_if_none: bool = True
):
path_parts = [
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str or self.local_start_time_str,
]
if self.save_dir_fmt == SaveDirFormat.NESTED:
folder = os.path.join(
self.output_dir,
*path_parts,
"checkpoints",
)
elif self.save_dir_fmt == SaveDirFormat.FLAT:
folder = os.path.join(
self.output_dir,
"checkpoints",
*path_parts,
)
else:
raise NotImplementedError
if create_if_none:
os.makedirs(folder, exist_ok=True)
return folder
def log_writer_path(self, start_time_str: str) -> str:
if self.save_dir_fmt == SaveDirFormat.NESTED:
if self.mode == TEST_MODE_STR:
return os.path.join(
self.output_dir,
"test",
self.config.tag(),
self.local_start_time_str,
)
path = os.path.join(
self.output_dir,
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str,
"train_tb",
)
return path
elif self.save_dir_fmt == SaveDirFormat.FLAT:
path = os.path.join(
self.output_dir,
"tb",
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str,
)
if self.mode == TEST_MODE_STR:
path = os.path.join(path, "test", self.local_start_time_str)
return path
else:
raise NotImplementedError
def metric_path(self, start_time_str: str) -> str:
if self.save_dir_fmt == SaveDirFormat.NESTED:
return os.path.join(
self.output_dir,
"test",
self.config.tag(),
start_time_str,
)
elif self.save_dir_fmt == SaveDirFormat.FLAT:
return os.path.join(
self.output_dir,
"metrics",
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
start_time_str,
)
else:
raise NotImplementedError
def save_project_state(self):
path_parts = [
self.config.tag()
if self.extra_tag == ""
else os.path.join(self.config.tag(), self.extra_tag),
self.local_start_time_str,
]
if self.save_dir_fmt == SaveDirFormat.NESTED:
base_dir = os.path.join(
self.output_dir,
*path_parts,
"used_configs",
)
elif self.save_dir_fmt == SaveDirFormat.FLAT:
base_dir = os.path.join(
self.output_dir,
"used_configs",
*path_parts,
)
else:
raise NotImplementedError
os.makedirs(base_dir, exist_ok=True)
# Saving current git diff
try:
sha, diff_str = get_git_diff_of_project()
with open(os.path.join(base_dir, f"{sha}.patch"), "w") as f:
f.write(diff_str)
get_logger().info(f"Git diff saved to {base_dir}")
except subprocess.CalledProcessError:
get_logger().warning(
"Failed to get a git diff of the current project."
f" Is it possible that {os.getcwd()} is not under version control?"
)
# Saving configs
if self.loaded_config_src_files is not None:
for src_path in self.loaded_config_src_files:
if src_path == CONFIG_KWARGS_STR:
# We also save key-word arguments passed to the experiment
# initializer.
save_path = os.path.join(base_dir, "config_kwargs.json")
assert not os.path.exists(
save_path
), f"{save_path} should not already exist."
with open(save_path, "w") as f:
json.dump(json.loads(self.loaded_config_src_files[src_path]), f)
continue
assert os.path.isfile(src_path), f"Config file {src_path} not found"
src_path = os.path.abspath(src_path)
# To prevent overwriting files with the same name, we loop
# here until we find a prefix (if necessary) to prevent
# name collisions.
k = -1
while True:
prefix = "" if k == -1 else f"namecollision{k}__"
k += 1
dst_path = os.path.join(
base_dir,
f"{prefix}{os.path.basename(src_path)}",
)
if not os.path.exists(dst_path):
os.makedirs(os.path.dirname(dst_path), exist_ok=True)
with open(src_path, "r") as f:
file_contents = f.read()
with open(dst_path, "w") as f:
f.write(
f"### THIS FILE ORIGINALLY LOCATED AT '{src_path}'\n\n{file_contents}"
)
break
get_logger().info(f"Config files saved to {base_dir}")
for callback in self.callbacks:
callback.after_save_project_state(base_dir=base_dir)
def _update_keys(
self,
d: Union[Dict[str, Any], str],
tag_if_not_a_loss: str,
mode: str,
stage_component_uuid: Optional[str] = None,
) -> Union[Dict[str, Any], str]:
midfix = "-" if stage_component_uuid is None else f"-{stage_component_uuid}-"
def _convert(key: str):
if key.startswith("losses/"):
return f"{mode}{midfix}{key}"
else:
return f"{mode}{midfix}{tag_if_not_a_loss}/{key}"
if isinstance(d, str):
return _convert(d)
return {_convert(k): v for k, v in d.items()}
def _process_logging_packages(
self,
log_writer: Optional[SummaryWriter],
pkgs: Union[LoggingPackage, List[LoggingPackage]],
last_steps: Optional[int],
last_storage_uuid_to_total_experiences: Optional[Dict[str, int]],
last_time: Optional[float],
all_results: Optional[List[Any]] = None,
):
mode = pkgs[0].mode
assert all(
pkg.mode == mode for pkg in pkgs
), "All logging packages must be the same mode."
assert mode == self.mode or (
mode == VALID_MODE_STR and self.mode == TRAIN_MODE_STR
), (
"Logging package mode must match the logger mode except when training where the logging package may"
"be of mode 'valid'."
)
training = mode == TRAIN_MODE_STR # Are we logging training packages
current_time = time.time()
training_steps = pkgs[0].training_steps
storage_uuid_to_total_experiences = pkgs[0].storage_uuid_to_total_experiences
callback_metric_means = dict()
def update_keys_misc(
key_or_dict: Union[str, Dict[str, Any]],
stage_component_uuid: Optional[str] = None,
):
# Important to use mode and not self.mode here
return self._update_keys(
d=key_or_dict,
tag_if_not_a_loss="misc",
mode=mode,
stage_component_uuid=stage_component_uuid,
)
def update_keys_metric(
key_or_dict: Union[str, Dict[str, Any]],
stage_component_uuid: Optional[str] = None,
):
# Important to use mode and not self.mode here
return self._update_keys(
d=key_or_dict,
tag_if_not_a_loss="metrics",
mode=mode,
stage_component_uuid=stage_component_uuid,
)
if training and log_writer is not None:
log_writer.add_scalar(
tag=update_keys_misc("pipeline_stage"),
scalar_value=pkgs[0].pipeline_stage,
global_step=training_steps,
)
callback_metric_means[update_keys_misc("pipeline_stage")] = pkgs[
0
].pipeline_stage
storage_uuid_to_total_experiences_key = {}
for storage_uuid, val in storage_uuid_to_total_experiences.items():
total_experiences_key = update_keys_misc(
f"{storage_uuid}_total_experiences"
)
storage_uuid_to_total_experiences_key[storage_uuid] = total_experiences_key
if training and log_writer is not None:
log_writer.add_scalar(
tag=total_experiences_key,
scalar_value=val,
global_step=training_steps,
)
callback_metric_means[total_experiences_key] = val
metrics_and_info_tracker = ScalarMeanTracker()
scalar_name_to_total_storage_experience = {}
scalar_name_to_total_experiences_key = {}
storage_uuid_to_stage_component_uuids = defaultdict(lambda: set())
metric_dicts_list, render, checkpoint_file_name = [], {}, []
tasks_callback_data = []
for pkg in pkgs:
metrics_and_info_tracker.add_scalars(
scalars=update_keys_metric(pkg.metrics_tracker.means()),
n=update_keys_metric(pkg.metrics_tracker.counts()),
)
tasks_callback_data.extend(pkg.task_callback_data)
metric_dicts_list.extend(pkg.metric_dicts)
if pkg.viz_data is not None:
render.update(pkg.viz_data)
checkpoint_file_name.append(pkg.checkpoint_file_name)
for (
(stage_component_uuid, storage_uuid),
info_tracker,
) in pkg.info_trackers.items():
if stage_component_uuid is not None:
storage_uuid_to_stage_component_uuids[storage_uuid].add(
stage_component_uuid
)
info_means = update_keys_misc(
info_tracker.means(),
stage_component_uuid,
)
info_counts = update_keys_misc(
info_tracker.counts(),
stage_component_uuid,
)
metrics_and_info_tracker.add_scalars(
scalars=info_means,
n=info_counts,
)
total_exp_for_storage = pkg.storage_uuid_to_total_experiences[
storage_uuid
]
if stage_component_uuid is None:
assert total_exp_for_storage == training_steps
for scalar_name in info_means:
if scalar_name in scalar_name_to_total_storage_experience:
assert (
total_exp_for_storage
== scalar_name_to_total_storage_experience[scalar_name]
), (
f"For metric {scalar_name}: there is disagreement between the training steps parameter"
f" across different workers ({total_exp_for_storage} !="
f" {scalar_name_to_total_storage_experience[scalar_name]}). This suggests an error in "
f" AllenAct, please report this issue at https://github.com/allenai/allenact/issues."
)
else:
scalar_name_to_total_storage_experience[
scalar_name
] = total_exp_for_storage
scalar_name_to_total_experiences_key[
scalar_name
] = storage_uuid_to_total_experiences_key[storage_uuid]
assert all_equal(
checkpoint_file_name
), f"All {mode} logging packages must have the same checkpoint_file_name."
message = [
f"{mode.upper()}: {training_steps} rollout steps ({pkgs[0].storage_uuid_to_total_experiences})"
]
metrics_and_info_means = metrics_and_info_tracker.means()
callback_metric_means.update(metrics_and_info_means)
for k in sorted(
metrics_and_info_means.keys(),
key=lambda mean_key: (mean_key.count("/"), mean_key),
):
if log_writer is not None:
log_writer.add_scalar(
tag=k,
scalar_value=metrics_and_info_means[k],
global_step=scalar_name_to_total_storage_experience.get(
k, training_steps
),
)
short_key = (
"/".join(k.split("/")[1:])
if k.startswith(f"{mode}-") and "/" in k
else k
)
message.append(f"{short_key} {metrics_and_info_means[k]:.3g}")
if training:
# Log information about FPS and EPS (experiences per second, for non-rollout storage).
# Not needed during testing or validation.
message += [f"elapsed_time {(current_time - last_time):.3g}s"]
if last_steps > 0:
fps = (training_steps - last_steps) / (current_time - last_time)
message += [f"approx_fps {fps:.3g}"]
approx_fps_key = update_keys_misc("approx_fps")
if log_writer is not None:
log_writer.add_scalar(approx_fps_key, fps, training_steps)
callback_metric_means[approx_fps_key] = fps
for (
storage_uuid,
last_total_exp,
) in last_storage_uuid_to_total_experiences.items():
if storage_uuid in storage_uuid_to_total_experiences:
cur_total_exp = storage_uuid_to_total_experiences[storage_uuid]
eps = (cur_total_exp - last_total_exp) / (current_time - last_time)
message += [f"{storage_uuid}/approx_eps {eps:.3g}"]
for stage_component_uuid in storage_uuid_to_stage_component_uuids[
storage_uuid
]:
approx_eps_key = update_keys_misc(
f"approx_eps",
stage_component_uuid,
)
callback_metric_means[approx_eps_key] = eps
scalar_name_to_total_experiences_key[
approx_eps_key
] = storage_uuid_to_total_experiences_key[storage_uuid]
if log_writer is not None:
log_writer.add_scalar(
approx_eps_key,
eps,
cur_total_exp,
)
metrics_and_info_means_with_metrics_dicts_list = copy.deepcopy(
metrics_and_info_means
)
metrics_and_info_means_with_metrics_dicts_list.update(
{"training_steps": training_steps, "tasks": metric_dicts_list}
)
if all_results is not None:
all_results.append(metrics_and_info_means_with_metrics_dicts_list)
num_tasks = sum([pkg.num_non_empty_metrics_dicts_added for pkg in pkgs])
num_tasks_completed_key = update_keys_misc("num_tasks_completed_since_last_log")
if log_writer is not None:
log_writer.add_scalar(num_tasks_completed_key, num_tasks, training_steps)
callback_metric_means[num_tasks_completed_key] = num_tasks
message.append(f"new_tasks_completed {num_tasks}")
if not training:
message.append(f"checkpoint {checkpoint_file_name[0]}")
get_logger().info(" ".join(message))
for callback in self.callbacks:
if mode == TRAIN_MODE_STR:
callback.on_train_log(
metrics=metric_dicts_list,
metric_means=callback_metric_means,
step=training_steps,
tasks_data=tasks_callback_data,
scalar_name_to_total_experiences_key=scalar_name_to_total_experiences_key,
)
if mode == VALID_MODE_STR:
callback.on_valid_log(
metrics=metrics_and_info_means_with_metrics_dicts_list,
metric_means=callback_metric_means,
step=training_steps,
checkpoint_file_name=checkpoint_file_name[0],
tasks_data=tasks_callback_data,
scalar_name_to_total_experiences_key=scalar_name_to_total_experiences_key,
)
if mode == TEST_MODE_STR:
callback.on_test_log(
metrics=metrics_and_info_means_with_metrics_dicts_list,
metric_means=callback_metric_means,
step=training_steps,
checkpoint_file_name=checkpoint_file_name[0],
tasks_data=tasks_callback_data,
scalar_name_to_total_experiences_key=scalar_name_to_total_experiences_key,
)
if self.visualizer is not None:
self.visualizer.log(
log_writer=log_writer,
task_outputs=metric_dicts_list,
render=render,
num_steps=training_steps,
)
return training_steps, storage_uuid_to_total_experiences, current_time
def process_valid_package(
self,
log_writer: Optional[SummaryWriter],
pkg: LoggingPackage,
all_results: Optional[List[Any]] = None,
):
return self._process_logging_packages(
log_writer=log_writer,
pkgs=[pkg],
last_steps=None,
last_storage_uuid_to_total_experiences=None,
last_time=None,
all_results=all_results,
)
def process_train_packages(
self,
log_writer: Optional[SummaryWriter],
pkgs: List[LoggingPackage],
last_steps: int,
last_storage_uuid_to_total_experiences: Dict[str, int],
last_time: float,
):
return self._process_logging_packages(
log_writer=log_writer,
pkgs=pkgs,
last_steps=last_steps,
last_storage_uuid_to_total_experiences=last_storage_uuid_to_total_experiences,
last_time=last_time,
)
def process_test_packages(
self,
log_writer: Optional[SummaryWriter],
pkgs: List[LoggingPackage],
all_results: Optional[List[Any]] = None,
):
return self._process_logging_packages(
log_writer=log_writer,
pkgs=pkgs,
last_steps=None,
last_storage_uuid_to_total_experiences=None,
last_time=None,
all_results=all_results,
)
def log_and_close(
self,
start_time_str: str,
nworkers: int,
test_steps: Sequence[int] = (),
metrics_file: Optional[str] = None,
) -> List[Dict]:
ptitle(f"AllenAct-Logging-{self.local_start_time_str}")
finalized = False
log_writer: Optional[SummaryWriter] = None
if not self.disable_tensorboard:
log_writer = SummaryWriter(
log_dir=self.log_writer_path(start_time_str),
filename_suffix=f"__{self.mode}_{self.local_start_time_str}",
)
# To aggregate/buffer metrics from trainers/testers
collected: List[LoggingPackage] = []
last_train_steps = 0
last_storage_uuid_to_total_experiences = {}
last_train_time = time.time()
# test_steps = sorted(test_steps, reverse=True)
eval_results: List[Dict] = []
unfinished_workers = nworkers
try:
while True:
try:
package: Union[
LoggingPackage, Union[Tuple[str, Any], Tuple[str, Any, Any]]
] = self.queues["results"].get(timeout=1)
if isinstance(package, LoggingPackage):
pkg_mode = package.mode
if pkg_mode == TRAIN_MODE_STR:
collected.append(package)
if len(collected) >= nworkers:
collected = sorted(
collected,
key=lambda pkg: (
pkg.training_steps,
*sorted(
pkg.storage_uuid_to_total_experiences.items()
),
),
)
if (
collected[nworkers - 1].training_steps
== collected[0].training_steps
and collected[
nworkers - 1
].storage_uuid_to_total_experiences
== collected[0].storage_uuid_to_total_experiences
): # ensure all workers have provided the same training_steps and total_experiences
(
last_train_steps,
last_storage_uuid_to_total_experiences,
last_train_time,
) = self.process_train_packages(
log_writer=log_writer,
pkgs=collected[:nworkers],
last_steps=last_train_steps,
last_storage_uuid_to_total_experiences=last_storage_uuid_to_total_experiences,
last_time=last_train_time,
)
collected = collected[nworkers:]
elif len(collected) > 2 * nworkers:
get_logger().warning(
f"Unable to aggregate train packages from all {nworkers} workers"
f"after {len(collected)} packages collected"
)
elif (
pkg_mode == VALID_MODE_STR
): # they all come from a single worker
if (
package.training_steps is not None
): # no validation samplers
self.process_valid_package(
log_writer=log_writer,
pkg=package,
all_results=eval_results
if self._collect_valid_results
else None,
)
if metrics_file is not None:
with open(
metrics_file.format(package.training_steps), "w"
) as f:
json.dump(
eval_results[-1],
f,
indent=4,
sort_keys=True,
cls=NumpyJSONEncoder,
)
get_logger().info(
"Written valid results file {}".format(
metrics_file.format(
package.training_steps
),
)
)
if (
finalized and self.queues["checkpoints"].empty()
): # assume queue is actually empty after trainer finished and no checkpoints in queue
break
elif pkg_mode == TEST_MODE_STR:
collected.append(package)
if len(collected) >= nworkers:
collected = sorted(
collected, key=lambda x: x.training_steps
) # sort by num_steps
if (
collected[nworkers - 1].training_steps
== collected[0].training_steps
): # ensure nworkers have provided the same num_steps
self.process_test_packages(
log_writer=log_writer,
pkgs=collected[:nworkers],
all_results=eval_results,
)
collected = collected[nworkers:]
with open(metrics_file, "w") as f:
json.dump(
eval_results,
f,
indent=4,
sort_keys=True,
cls=NumpyJSONEncoder,
)
get_logger().info(
f"Updated {metrics_file} up to checkpoint"
f" {test_steps[len(eval_results) - 1]}"
)
else:
get_logger().error(
f"Runner received unknown package of type {pkg_mode}"
)
else:
pkg_mode = package[0]
if pkg_mode == "train_stopped":
if package[1] == 0:
finalized = True
if not self.running_validation:
get_logger().info(
"Terminating runner after trainer done (no validation)"
)
break
else:
raise Exception(
f"Train worker {package[1] - 1} abnormally terminated"
)
elif pkg_mode == "valid_stopped":
raise Exception(
f"Valid worker {package[1] - 1} abnormally terminated"
)
elif pkg_mode == "test_stopped":
if package[1] == 0:
unfinished_workers -= 1
if unfinished_workers == 0:
get_logger().info(
"Last tester finished. Terminating"
)
finalized = True
break
else:
raise RuntimeError(
f"Test worker {package[1] - 1} abnormally terminated"
)
else:
get_logger().error(
f"Runner received invalid package tuple {package}"
)
except queue.Empty as _:
if all(
p.exitcode is not None
for p in itertools.chain(*self.processes.values())
):
break
except KeyboardInterrupt:
get_logger().info("KeyboardInterrupt. Terminating runner.")
except Exception:
get_logger().error("Encountered Exception. Terminating runner.")
get_logger().exception(traceback.format_exc())
finally:
if finalized:
get_logger().info("Done")
if log_writer is not None:
log_writer.close()
self.close()
return eval_results
def get_checkpoint_files(
self,
checkpoint_path_dir_or_pattern: str,
approx_ckpt_step_interval: Optional[int] = None,
):
if os.path.isdir(checkpoint_path_dir_or_pattern):
# The fragment is a path to a directory, lets use this directory
# as the base dir to search for checkpoints
checkpoint_path_dir_or_pattern = os.path.join(
checkpoint_path_dir_or_pattern, "*.pt"
)
ckpt_paths = glob.glob(checkpoint_path_dir_or_pattern, recursive=True)
if len(ckpt_paths) == 0:
raise FileNotFoundError(
f"Could not find any checkpoints at {os.path.abspath(checkpoint_path_dir_or_pattern)}, is it possible"
f" the path has been mispecified?"
)
step_count_ckpt_pairs = [(self.step_from_checkpoint(p), p) for p in ckpt_paths]
step_count_ckpt_pairs.sort()
ckpts_paths = [p for _, p in step_count_ckpt_pairs]
step_counts = np.array([sc for sc, _ in step_count_ckpt_pairs])
if approx_ckpt_step_interval is not None:
assert (
approx_ckpt_step_interval > 0
), "`approx_ckpt_step_interval` must be >0"
inds_to_eval = set()
for i in range(
math.ceil(step_count_ckpt_pairs[-1][0] / approx_ckpt_step_interval) + 1
):
inds_to_eval.add(
int(np.argmin(np.abs(step_counts - i * approx_ckpt_step_interval)))
)
ckpts_paths = [ckpts_paths[ind] for ind in sorted(list(inds_to_eval))]
return ckpts_paths
@staticmethod
def step_from_checkpoint(ckpt_path: str) -> int:
parts = os.path.basename(ckpt_path).split("__")
for part in parts:
if "steps_" in part:
possible_num = part.split("_")[-1].split(".")[0]
if possible_num.isdigit():
return int(possible_num)
get_logger().warning(
f"The checkpoint {os.path.basename(ckpt_path)} does not follow the checkpoint naming convention"
f" used by AllenAct. As a fall back we must load the checkpoint into memory to find the"
f" training step count, this may increase startup time if the checkpoints are large or many"
f" must be loaded in sequence."
)
ckpt = torch.load(ckpt_path, map_location="cpu")
return ckpt["total_steps"]
def close(self, verbose=True):
if self._is_closed:
return
def logif(s: Union[str, Exception]):
if verbose:
if isinstance(s, str):
get_logger().info(s)
elif isinstance(s, Exception):
get_logger().exception(traceback.format_exc())
else:
raise NotImplementedError()
# First send termination signals
for process_type in self.processes:
for it, process in enumerate(self.processes[process_type]):
if process.is_alive():
logif(f"Terminating {process_type} {it}")
process.terminate()
# Now join processes
for process_type in self.processes:
for it, process in enumerate(self.processes[process_type]):
try:
logif(f"Joining {process_type} {it}")
process.join(1)
logif(f"Closed {process_type} {it}")
except Exception as e:
logif(f"Exception raised when closing {process_type} {it}")
logif(e)
self.processes.clear()
self._is_closed = True
def __del__(self):
self.close(verbose=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close(verbose=True)
| allenact-main | allenact/algorithms/onpolicy_sync/runner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from collections import OrderedDict
from typing import TypeVar, Generic, Tuple, Optional, Union, Dict, List, Any
import gym
import torch
from gym.spaces.dict import Dict as SpaceDict
import torch.nn as nn
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
DistributionType = TypeVar("DistributionType")
MemoryDimType = Tuple[str, Optional[int]]
MemoryShapeType = Tuple[MemoryDimType, ...]
MemorySpecType = Tuple[MemoryShapeType, torch.dtype]
FullMemorySpecType = Dict[str, MemorySpecType]
ObservationType = Dict[str, Union[torch.Tensor, Dict[str, Any]]]
ActionType = Union[torch.Tensor, OrderedDict, Tuple, int]
class ActorCriticModel(Generic[DistributionType], nn.Module):
"""Abstract class defining a deep (recurrent) actor critic agent.
When defining a new agent, you should subclass this class and implement the abstract methods.
# Attributes
action_space : The space of actions available to the agent. This is of type `gym.spaces.Space`.
observation_space: The observation space expected by the agent. This is of type `gym.spaces.dict`.
"""
def __init__(self, action_space: gym.Space, observation_space: SpaceDict):
"""Initializer.
# Parameters
action_space : The space of actions available to the agent.
observation_space: The observation space expected by the agent.
"""
super().__init__()
self.action_space = action_space
self.observation_space = observation_space
self.memory_spec: Optional[List[Optional[FullMemorySpecType]]] = None
@property
def recurrent_memory_specification(self) -> Optional[FullMemorySpecType]:
"""The memory specification for the `ActorCriticModel`. See docs for
`_recurrent_memory_shape`
# Returns
The memory specification from `_recurrent_memory_shape`.
"""
if self.memory_spec is None:
self.memory_spec = [self._recurrent_memory_specification()]
spec = self.memory_spec[0]
if spec is None:
return None
for key in spec:
dims, _ = spec[key]
dim_names = [d[0] for d in dims]
assert (
"step" not in dim_names
), "`step` is automatically added and cannot be reused"
assert "sampler" in dim_names, "`sampler` dim must be defined"
return self.memory_spec[0]
@abc.abstractmethod
def _recurrent_memory_specification(self) -> Optional[FullMemorySpecType]:
"""Implementation of memory specification for the `ActorCriticModel`.
# Returns
If None, it indicates the model is memory-less.
Otherwise, it is a one-level dictionary (a map) with string keys (memory type identification) and
tuple values (memory type specification). Each specification tuple contains:
1. Memory type named shape, e.g.
`(("layer", 1), ("sampler", None), ("agent", 2), ("hidden", 32))`
for a two-agent GRU memory, where
the `sampler` dimension placeholder *always* precedes the optional `agent` dimension;
the optional `agent` dimension has the number of agents in the model and is *always* the one after
`sampler` if present;
and `layer` and `hidden` correspond to the standard RNN hidden state parametrization.
2. The data type, e.g. `torch.float32`.
The `sampler` dimension placeholder is mandatory for all memories.
For a single-agent ActorCritic model it is often more convenient to skip the agent dimension, e.g.
`(("layer", 1), ("sampler", None), ("hidden", 32))` for a GRU memory.
"""
raise NotImplementedError()
@abc.abstractmethod
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: ActionType,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
"""Transforms input observations (& previous hidden state) into action
probabilities and the state value.
# Parameters
observations : Multi-level map from key strings to tensors of shape [steps, samplers, (agents,) ...] with the
current observations.
memory : `Memory` object with recurrent memory. The shape of each tensor is determined by the corresponding
entry in `_recurrent_memory_specification`.
prev_actions : ActionType with tensors of shape [steps, samplers, ...] with the previous actions.
masks : tensor of shape [steps, samplers, agents, 1] with zeros indicating steps where a new episode/task
starts.
# Returns
A tuple whose first element is an object of class ActorCriticOutput which stores
the agents' probability distribution over possible actions (shape [steps, samplers, ...]),
the agents' value for the state (shape [steps, samplers, ..., 1]), and any extra information needed for
loss computations. The second element is an optional `Memory`, which is only used in models with recurrent
memory.
"""
raise NotImplementedError()
class LinearActorCriticHead(nn.Module):
def __init__(self, input_size: int, num_actions: int):
super().__init__()
self.input_size = input_size
self.num_actions = num_actions
self.actor_and_critic = nn.Linear(input_size, 1 + num_actions)
nn.init.orthogonal_(self.actor_and_critic.weight)
nn.init.constant_(self.actor_and_critic.bias, 0)
def forward(self, x) -> Tuple[CategoricalDistr, torch.Tensor]:
out = self.actor_and_critic(x)
logits = out[..., :-1]
values = out[..., -1:]
# noinspection PyArgumentList
return (
# logits are [step, sampler, ...]
CategoricalDistr(logits=logits),
# values are [step, sampler, flattened]
values.view(*values.shape[:2], -1),
)
class LinearCriticHead(nn.Module):
def __init__(self, input_size: int):
super().__init__()
self.fc = nn.Linear(input_size, 1)
nn.init.orthogonal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
def forward(self, x):
return self.fc(x).view(*x.shape[:2], -1) # [steps, samplers, flattened]
class LinearActorHead(nn.Module):
def __init__(self, num_inputs: int, num_outputs: int):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
nn.init.orthogonal_(self.linear.weight, gain=0.01)
nn.init.constant_(self.linear.bias, 0)
def forward(self, x: torch.FloatTensor): # type: ignore
x = self.linear(x) # type:ignore
# noinspection PyArgumentList
return CategoricalDistr(logits=x) # logits are [step, sampler, ...]
| allenact-main | allenact/algorithms/onpolicy_sync/policy.py |
allenact-main | allenact/algorithms/onpolicy_sync/__init__.py |
|
"""Defines the reinforcement learning `OnPolicyRLEngine`."""
import datetime
import logging
import numbers
import os
import random
import time
import traceback
from functools import partial
from multiprocessing.context import BaseContext
from typing import Any, Dict, List, Optional, Sequence, Union, cast
import filelock
import torch
import torch.distributed as dist # type: ignore
import torch.distributions # type: ignore
import torch.multiprocessing as mp # type: ignore
import torch.nn as nn
import torch.optim as optim
# noinspection PyProtectedMember
from torch._C._distributed_c10d import ReduceOp
from allenact.algorithms.onpolicy_sync.misc import TrackingInfo, TrackingInfoType
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.misc_utils import str2bool
from allenact.utils.model_utils import md5_hash_of_state_dict
try:
# noinspection PyProtectedMember,PyUnresolvedReferences
from torch.optim.lr_scheduler import _LRScheduler
except (ImportError, ModuleNotFoundError):
raise ImportError("`_LRScheduler` was not found in `torch.optim.lr_scheduler`")
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.algorithms.onpolicy_sync.storage import (
ExperienceStorage,
MiniBatchStorageMixin,
RolloutStorage,
StreamingStorageMixin,
)
from allenact.algorithms.onpolicy_sync.vector_sampled_tasks import (
COMPLETE_TASK_CALLBACK_KEY,
COMPLETE_TASK_METRICS_KEY,
SingleProcessVectorSampledTasks,
VectorSampledTasks,
)
from allenact.base_abstractions.distributions import TeacherForcingDistr
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import (
ActorCriticOutput,
GenericAbstractLoss,
Memory,
RLStepResult,
)
from allenact.utils import spaces_utils as su
from allenact.utils.experiment_utils import (
LoggingPackage,
PipelineStage,
ScalarMeanTracker,
StageComponent,
TrainingPipeline,
set_deterministic_cudnn,
set_seed,
)
from allenact.utils.system import get_logger
from allenact.utils.tensor_utils import batch_observations, detach_recursively
from allenact.utils.viz_utils import VizSuite
try:
# When debugging we don't want to timeout in the VectorSampledTasks
# noinspection PyPackageRequirements
import pydevd
DEBUGGING = str2bool(os.getenv("ALLENACT_DEBUG", "true"))
except ImportError:
DEBUGGING = str2bool(os.getenv("ALLENACT_DEBUG", "false"))
DEBUG_VST_TIMEOUT: Optional[int] = (lambda x: int(x) if x is not None else x)(
os.getenv("ALLENACT_DEBUG_VST_TIMEOUT", None)
)
TRAIN_MODE_STR = "train"
VALID_MODE_STR = "valid"
TEST_MODE_STR = "test"
class OnPolicyRLEngine(object):
"""The reinforcement learning primary controller.
This `OnPolicyRLEngine` class handles all training, validation, and
testing as well as logging and checkpointing. You are not expected
to instantiate this class yourself, instead you should define an
experiment which will then be used to instantiate an
`OnPolicyRLEngine` and perform any desired tasks.
"""
def __init__(
self,
experiment_name: str,
config: ExperimentConfig,
results_queue: mp.Queue, # to output aggregated results
checkpoints_queue: Optional[
mp.Queue
], # to write/read (trainer/evaluator) ready checkpoints
checkpoints_dir: str,
mode: str = "train",
callback_sensors: Optional[Sequence[Sensor]] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
mp_ctx: Optional[BaseContext] = None,
worker_id: int = 0,
num_workers: int = 1,
device: Union[str, torch.device, int] = "cpu",
distributed_ip: str = "127.0.0.1",
distributed_port: int = 0,
deterministic_agents: bool = False,
max_sampler_processes_per_worker: Optional[int] = None,
initial_model_state_dict: Optional[Union[Dict[str, Any], int]] = None,
try_restart_after_task_error: bool = False,
**kwargs,
):
"""Initializer.
# Parameters
config : The ExperimentConfig defining the experiment to run.
output_dir : Root directory at which checkpoints and logs should be saved.
seed : Seed used to encourage deterministic behavior (it is difficult to ensure
completely deterministic behavior due to CUDA issues and nondeterminism
in environments).
mode : "train", "valid", or "test".
deterministic_cudnn : Whether to use deterministic cudnn. If `True` this may lower
training performance this is necessary (but not sufficient) if you desire
deterministic behavior.
extra_tag : An additional label to add to the experiment when saving tensorboard logs.
"""
self.config = config
self.results_queue = results_queue
self.checkpoints_queue = checkpoints_queue
self.mp_ctx = mp_ctx
self.checkpoints_dir = checkpoints_dir
self.worker_id = worker_id
self.num_workers = num_workers
self.device = torch.device("cpu") if device == -1 else torch.device(device) # type: ignore
if self.device != torch.device("cpu"):
torch.cuda.set_device(device)
self.distributed_ip = distributed_ip
self.distributed_port = distributed_port
self.try_restart_after_task_error = try_restart_after_task_error
self.mode = mode.lower().strip()
assert self.mode in [
TRAIN_MODE_STR,
VALID_MODE_STR,
TEST_MODE_STR,
], f"Only {TRAIN_MODE_STR}, {VALID_MODE_STR}, {TEST_MODE_STR}, modes supported"
self.callback_sensors = callback_sensors
self.deterministic_cudnn = deterministic_cudnn
if self.deterministic_cudnn:
set_deterministic_cudnn()
self.seed = seed
set_seed(self.seed)
self.experiment_name = experiment_name
assert (
max_sampler_processes_per_worker is None
or max_sampler_processes_per_worker >= 1
), "`max_sampler_processes_per_worker` must be either `None` or a positive integer."
self.max_sampler_processes_per_worker = max_sampler_processes_per_worker
machine_params = config.machine_params(self.mode)
self.machine_params: MachineParams
if isinstance(machine_params, MachineParams):
self.machine_params = machine_params
else:
self.machine_params = MachineParams(**machine_params)
self.num_samplers_per_worker = self.machine_params.nprocesses
self.num_samplers = self.num_samplers_per_worker[self.worker_id]
self._vector_tasks: Optional[
Union[VectorSampledTasks, SingleProcessVectorSampledTasks]
] = None
self.sensor_preprocessor_graph = None
self.actor_critic: Optional[ActorCriticModel] = None
create_model_kwargs = {}
if self.machine_params.sensor_preprocessor_graph is not None:
self.sensor_preprocessor_graph = self.machine_params.sensor_preprocessor_graph.to(
self.device
)
create_model_kwargs[
"sensor_preprocessor_graph"
] = self.sensor_preprocessor_graph
set_seed(self.seed)
self.actor_critic = cast(
ActorCriticModel, self.config.create_model(**create_model_kwargs),
).to(self.device)
if initial_model_state_dict is not None:
if isinstance(initial_model_state_dict, int):
assert (
md5_hash_of_state_dict(self.actor_critic.state_dict())
== initial_model_state_dict
), (
f"Could not reproduce the correct model state dict on worker {self.worker_id} despite seeding."
f" Please ensure that your model's initialization is reproducable when `set_seed(...)`"
f"] has been called with a fixed seed before initialization."
)
else:
self.actor_critic.load_state_dict(
state_dict=cast(
"OrderedDict[str, Tensor]", initial_model_state_dict
)
)
else:
assert mode != TRAIN_MODE_STR or self.num_workers == 1, (
"When training with multiple workers you must pass a,"
" non-`None` value for the `initial_model_state_dict` argument."
)
if get_logger().level == logging.DEBUG:
model_hash = md5_hash_of_state_dict(self.actor_critic.state_dict())
get_logger().debug(
f"[{self.mode} worker {self.worker_id}] model weights hash: {model_hash}"
)
self.is_distributed = False
self.store: Optional[torch.distributed.TCPStore] = None # type:ignore
if self.num_workers > 1:
self.store = torch.distributed.TCPStore( # type:ignore
host_name=self.distributed_ip,
port=self.distributed_port,
world_size=self.num_workers,
is_master=self.worker_id == 0,
timeout=datetime.timedelta(
seconds=3 * (DEBUG_VST_TIMEOUT if DEBUGGING else 1 * 60) + 300
),
)
cpu_device = self.device == torch.device("cpu") # type:ignore
# "gloo" required during testing to ensure that `barrier()` doesn't time out.
backend = "gloo" if cpu_device or self.mode == TEST_MODE_STR else "nccl"
get_logger().debug(
f"Worker {self.worker_id}: initializing distributed {backend} backend with device {self.device}."
)
dist.init_process_group( # type:ignore
backend=backend,
store=self.store,
rank=self.worker_id,
world_size=self.num_workers,
# During testing, we sometimes found that default timeout was too short
# resulting in the run terminating surprisingly, we increase it here.
timeout=datetime.timedelta(minutes=3000)
if (self.mode == TEST_MODE_STR or DEBUGGING)
else dist.default_pg_timeout,
)
self.is_distributed = True
self.deterministic_agents = deterministic_agents
self._is_closing: bool = (
False # Useful for letting the RL runner know if this is closing
)
self._is_closed: bool = False
# Keeping track of metrics and losses during training/inference
self.single_process_metrics: List = []
self.single_process_task_callback_data: List = []
self.tracking_info_list: List[TrackingInfo] = []
# Variables that wil only be instantiated in the trainer
self.optimizer: Optional[optim.optimizer.Optimizer] = None
# noinspection PyProtectedMember
self.lr_scheduler: Optional[_LRScheduler] = None
self.insufficient_data_for_update: Optional[
torch.distributed.PrefixStore
] = None
# Training pipeline will be instantiated during training and inference.
# During inference however, it will be instantiated anew on each run of `run_eval`
# and will be set to `None` after the eval run is complete.
self.training_pipeline: Optional[TrainingPipeline] = None
@property
def vector_tasks(
self,
) -> Union[VectorSampledTasks, SingleProcessVectorSampledTasks]:
if self._vector_tasks is None and self.num_samplers > 0:
if self.is_distributed:
total_processes = sum(
self.num_samplers_per_worker
) # TODO this will break the fixed seed for multi-device test
else:
total_processes = self.num_samplers
seeds = self.worker_seeds(
total_processes,
initial_seed=self.seed, # do not update the RNG state (creation might happen after seed resetting)
)
# TODO: The `self.max_sampler_processes_per_worker == 1` case below would be
# great to have but it does not play nicely with us wanting to kill things
# using SIGTERM/SIGINT signals. Would be nice to figure out a solution to
# this at some point.
# if self.max_sampler_processes_per_worker == 1:
# # No need to instantiate a new task sampler processes if we're
# # restricted to one sampler process for this worker.
# self._vector_tasks = SingleProcessVectorSampledTasks(
# make_sampler_fn=self.config.make_sampler_fn,
# sampler_fn_args_list=self.get_sampler_fn_args(seeds),
# )
# else:
self._vector_tasks = VectorSampledTasks(
make_sampler_fn=self.config.make_sampler_fn,
sampler_fn_args=self.get_sampler_fn_args(seeds),
callback_sensors=self.callback_sensors,
multiprocessing_start_method="forkserver"
if self.mp_ctx is None
else None,
mp_ctx=self.mp_ctx,
max_processes=self.max_sampler_processes_per_worker,
read_timeout=DEBUG_VST_TIMEOUT if DEBUGGING else 1 * 60,
)
return self._vector_tasks
@staticmethod
def worker_seeds(nprocesses: int, initial_seed: Optional[int]) -> List[int]:
"""Create a collection of seeds for workers without modifying the RNG
state."""
rstate = None # type:ignore
if initial_seed is not None:
rstate = random.getstate()
random.seed(initial_seed)
seeds = [random.randint(0, (2 ** 31) - 1) for _ in range(nprocesses)]
if initial_seed is not None:
random.setstate(rstate)
return seeds
def get_sampler_fn_args(self, seeds: Optional[List[int]] = None):
sampler_devices = self.machine_params.sampler_devices
if self.mode == TRAIN_MODE_STR:
fn = self.config.train_task_sampler_args
elif self.mode == VALID_MODE_STR:
fn = self.config.valid_task_sampler_args
elif self.mode == TEST_MODE_STR:
fn = self.config.test_task_sampler_args
else:
raise NotImplementedError(
f"self.mode must be one of {TRAIN_MODE_STR}, {VALID_MODE_STR}, or {TEST_MODE_STR}."
)
if self.is_distributed:
total_processes = sum(self.num_samplers_per_worker)
process_offset = sum(self.num_samplers_per_worker[: self.worker_id])
else:
total_processes = self.num_samplers
process_offset = 0
sampler_devices_as_ints: Optional[List[int]] = None
if (
self.is_distributed or self.mode == TEST_MODE_STR
) and self.device.index is not None:
sampler_devices_as_ints = [self.device.index]
elif sampler_devices is not None:
sampler_devices_as_ints = [
-1 if sd.index is None else sd.index for sd in sampler_devices
]
return [
fn(
process_ind=process_offset + it,
total_processes=total_processes,
devices=sampler_devices_as_ints,
seeds=seeds,
)
for it in range(self.num_samplers)
]
def checkpoint_load(
self, ckpt: Union[str, Dict[str, Any]], restart_pipeline: bool
) -> Dict[str, Union[Dict[str, Any], torch.Tensor, float, int, str, List]]:
if isinstance(ckpt, str):
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Loading checkpoint from {ckpt}"
)
# Map location CPU is almost always better than mapping to a CUDA device.
ckpt = torch.load(os.path.abspath(ckpt), map_location="cpu")
ckpt = cast(
Dict[str, Union[Dict[str, Any], torch.Tensor, float, int, str, List]], ckpt,
)
self.actor_critic.load_state_dict(ckpt["model_state_dict"]) # type:ignore
if "training_pipeline_state_dict" in ckpt and not restart_pipeline:
self.training_pipeline.load_state_dict(
cast(Dict[str, Any], ckpt["training_pipeline_state_dict"])
)
return ckpt
# aggregates task metrics currently in queue
def aggregate_task_metrics(
self, logging_pkg: LoggingPackage, num_tasks: int = -1,
) -> LoggingPackage:
if num_tasks > 0:
if len(self.single_process_metrics) != num_tasks:
error_msg = (
"shorter"
if len(self.single_process_metrics) < num_tasks
else "longer"
)
get_logger().error(
f"Metrics out is {error_msg} than expected number of tasks."
" This should only happen if a positive number of `num_tasks` were"
" set during testing but the queue did not contain this number of entries."
" Please file an issue at https://github.com/allenai/allenact/issues."
)
num_empty_tasks_dequeued = 0
for metrics_dict in self.single_process_metrics:
num_empty_tasks_dequeued += not logging_pkg.add_metrics_dict(
single_task_metrics_dict=metrics_dict
)
self.single_process_metrics = []
if num_empty_tasks_dequeued != 0:
get_logger().warning(
f"Discarded {num_empty_tasks_dequeued} empty task metrics"
)
return logging_pkg
def _preprocess_observations(self, batched_observations):
if self.sensor_preprocessor_graph is None:
return batched_observations
return self.sensor_preprocessor_graph.get_observations(batched_observations)
def remove_paused(self, observations):
paused, keep, running = [], [], []
for it, obs in enumerate(observations):
if obs is None:
paused.append(it)
else:
keep.append(it)
running.append(obs)
for p in reversed(paused):
self.vector_tasks.pause_at(p)
# Group samplers along new dim:
batch = batch_observations(running, device=self.device)
return len(paused), keep, batch
def initialize_storage_and_viz(
self,
storage_to_initialize: Optional[Sequence[ExperienceStorage]],
visualizer: Optional[VizSuite] = None,
):
keep: Optional[List] = None
if visualizer is not None or (
storage_to_initialize is not None
and any(isinstance(s, RolloutStorage) for s in storage_to_initialize)
):
# No rollout storage, thus we are not
observations = self.vector_tasks.get_observations()
npaused, keep, batch = self.remove_paused(observations)
observations = (
self._preprocess_observations(batch) if len(keep) > 0 else batch
)
assert npaused == 0, f"{npaused} samplers are paused during initialization."
num_samplers = len(keep)
else:
observations = {}
num_samplers = 0
npaused = 0
recurrent_memory_specification = (
self.actor_critic.recurrent_memory_specification
)
if storage_to_initialize is not None:
for s in storage_to_initialize:
s.to(self.device)
s.set_partition(index=self.worker_id, num_parts=self.num_workers)
s.initialize(
observations=observations,
num_samplers=num_samplers,
recurrent_memory_specification=recurrent_memory_specification,
action_space=self.actor_critic.action_space,
)
if visualizer is not None and num_samplers > 0:
visualizer.collect(vector_task=self.vector_tasks, alive=keep)
return npaused
@property
def num_active_samplers(self):
if self.vector_tasks is None:
return 0
return self.vector_tasks.num_unpaused_tasks
def act(
self,
rollout_storage: RolloutStorage,
dist_wrapper_class: Optional[type] = None,
):
with torch.no_grad():
agent_input = rollout_storage.agent_input_for_next_step()
actor_critic_output, memory = self.actor_critic(**agent_input)
distr = actor_critic_output.distributions
if dist_wrapper_class is not None:
distr = dist_wrapper_class(distr=distr, obs=agent_input["observations"])
actions = distr.sample() if not self.deterministic_agents else distr.mode()
return actions, actor_critic_output, memory, agent_input["observations"]
def aggregate_and_send_logging_package(
self,
tracking_info_list: List[TrackingInfo],
logging_pkg: Optional[LoggingPackage] = None,
send_logging_package: bool = True,
):
if logging_pkg is None:
logging_pkg = LoggingPackage(
mode=self.mode,
training_steps=self.training_pipeline.total_steps,
pipeline_stage=self.training_pipeline.current_stage_index,
storage_uuid_to_total_experiences=self.training_pipeline.storage_uuid_to_total_experiences,
)
self.aggregate_task_metrics(logging_pkg=logging_pkg)
for callback_dict in self.single_process_task_callback_data:
logging_pkg.task_callback_data.append(callback_dict)
self.single_process_task_callback_data = []
for tracking_info in tracking_info_list:
if tracking_info.n < 0:
get_logger().warning(
f"Obtained a train_info_dict with {tracking_info.n} elements."
f" Full info: ({tracking_info.type}, {tracking_info.info}, {tracking_info.n})."
)
else:
tracking_info_dict = tracking_info.info
if tracking_info.type == TrackingInfoType.LOSS:
tracking_info_dict = {
f"losses/{k}": v for k, v in tracking_info_dict.items()
}
logging_pkg.add_info_dict(
info_dict=tracking_info_dict,
n=tracking_info.n,
stage_component_uuid=tracking_info.stage_component_uuid,
storage_uuid=tracking_info.storage_uuid,
)
if send_logging_package:
self.results_queue.put(logging_pkg)
return logging_pkg
@staticmethod
def _active_memory(memory, keep):
return memory.sampler_select(keep) if memory is not None else memory
def probe(self, dones: List[bool], npaused, period=100000):
"""Debugging util. When called from
self.collect_step_across_all_task_samplers(...), calls render for the
0-th task sampler of the 0-th distributed worker for the first
beginning episode spaced at least period steps from the beginning of
the previous one.
For valid, train, it currently renders all episodes for the 0-th task sampler of the
0-th distributed worker. If this is not wanted, it must be hard-coded for now below.
# Parameters
dones : dones list from self.collect_step_across_all_task_samplers(...)
npaused : number of newly paused tasks returned by self.removed_paused(...)
period : minimal spacing in sampled steps between the beginning of episodes to be shown.
"""
sampler_id = 0
done = dones[sampler_id]
if self.mode != TRAIN_MODE_STR:
setattr(
self, "_probe_npaused", getattr(self, "_probe_npaused", 0) + npaused
)
if self._probe_npaused == self.num_samplers: # type:ignore
del self._probe_npaused # type:ignore
return
period = 0
if self.worker_id == 0:
if done:
if period > 0 and (
getattr(self, "_probe_steps", None) is None
or (
self._probe_steps < 0 # type:ignore
and (
self.training_pipeline.total_steps
+ self._probe_steps # type:ignore
)
>= period
)
):
self._probe_steps = self.training_pipeline.total_steps
if period == 0 or (
getattr(self, "_probe_steps", None) is not None
and self._probe_steps >= 0
and ((self.training_pipeline.total_steps - self._probe_steps) < period)
):
if (
period == 0
or not done
or self._probe_steps == self.training_pipeline.total_steps
):
self.vector_tasks.call_at(sampler_id, "render", ["human"])
else:
# noinspection PyAttributeOutsideInit
self._probe_steps = -self._probe_steps
def collect_step_across_all_task_samplers(
self,
rollout_storage_uuid: str,
uuid_to_storage: Dict[str, ExperienceStorage],
visualizer=None,
dist_wrapper_class=None,
) -> int:
rollout_storage = cast(RolloutStorage, uuid_to_storage[rollout_storage_uuid])
actions, actor_critic_output, memory, _ = self.act(
rollout_storage=rollout_storage, dist_wrapper_class=dist_wrapper_class,
)
# Flatten actions
flat_actions = su.flatten(self.actor_critic.action_space, actions)
assert len(flat_actions.shape) == 3, (
"Distribution samples must include step and task sampler dimensions [step, sampler, ...]. The simplest way"
"to accomplish this is to pass param tensors (like `logits` in a `CategoricalDistr`) with these dimensions"
"to the Distribution."
)
# Convert flattened actions into list of actions and send them
outputs: List[RLStepResult] = self.vector_tasks.step(
su.action_list(self.actor_critic.action_space, flat_actions)
)
# Save after task completion metrics
for step_result in outputs:
if step_result.info is not None:
if COMPLETE_TASK_METRICS_KEY in step_result.info:
self.single_process_metrics.append(
step_result.info[COMPLETE_TASK_METRICS_KEY]
)
del step_result.info[COMPLETE_TASK_METRICS_KEY]
if COMPLETE_TASK_CALLBACK_KEY in step_result.info:
self.single_process_task_callback_data.append(
step_result.info[COMPLETE_TASK_CALLBACK_KEY]
)
del step_result.info[COMPLETE_TASK_CALLBACK_KEY]
rewards: Union[List, torch.Tensor]
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
rewards = torch.tensor(
rewards, dtype=torch.float, device=self.device, # type:ignore
)
# We want rewards to have dimensions [sampler, reward]
if len(rewards.shape) == 1:
# Rewards are of shape [sampler,]
rewards = rewards.unsqueeze(-1)
elif len(rewards.shape) > 1:
raise NotImplementedError()
# If done then clean the history of observations.
masks = (
1.0
- torch.tensor(
dones, dtype=torch.float32, device=self.device, # type:ignore
)
).view(
-1, 1
) # [sampler, 1]
npaused, keep, batch = self.remove_paused(observations)
# TODO self.probe(...) can be useful for debugging (we might want to control it from main?)
# self.probe(dones, npaused)
if npaused > 0:
if self.mode == TRAIN_MODE_STR:
raise NotImplementedError(
"When trying to get a new task from a task sampler (using the `.next_task()` method)"
" the task sampler returned `None`. This is not currently supported during training"
" (and almost certainly a bug in the implementation of the task sampler or in the "
" initialization of the task sampler for training)."
)
for s in uuid_to_storage.values():
if isinstance(s, RolloutStorage):
s.sampler_select(keep)
to_add_to_storage = dict(
observations=self._preprocess_observations(batch)
if len(keep) > 0
else batch,
memory=self._active_memory(memory, keep),
actions=flat_actions[0, keep],
action_log_probs=actor_critic_output.distributions.log_prob(actions)[
0, keep
],
value_preds=actor_critic_output.values[0, keep],
rewards=rewards[keep],
masks=masks[keep],
)
for storage in uuid_to_storage.values():
storage.add(**to_add_to_storage)
# TODO we always miss tensors for the last action in the last episode of each worker
if visualizer is not None:
if len(keep) > 0:
visualizer.collect(
rollout=rollout_storage,
vector_task=self.vector_tasks,
alive=keep,
actor_critic=actor_critic_output,
)
else:
visualizer.collect(actor_critic=actor_critic_output)
return npaused
def distributed_weighted_sum(
self,
to_share: Union[torch.Tensor, float, int],
weight: Union[torch.Tensor, float, int],
):
"""Weighted sum of scalar across distributed workers."""
if self.is_distributed:
aggregate = torch.tensor(to_share * weight).to(self.device)
dist.all_reduce(aggregate)
return aggregate.item()
else:
if abs(1 - weight) > 1e-5:
get_logger().warning(
f"Scaling non-distributed value with weight {weight}"
)
return torch.tensor(to_share * weight).item()
def distributed_reduce(
self, to_share: Union[torch.Tensor, float, int], op: ReduceOp
):
"""Weighted sum of scalar across distributed workers."""
if self.is_distributed:
aggregate = torch.tensor(to_share).to(self.device)
dist.all_reduce(aggregate, op=op)
return aggregate.item()
else:
return torch.tensor(to_share).item()
def backprop_step(
self,
total_loss: torch.Tensor,
max_grad_norm: float,
local_to_global_batch_size_ratio: float = 1.0,
):
raise NotImplementedError
def save_error_data(self, batch: Dict[str, Any]):
raise NotImplementedError
@property
def step_count(self) -> int:
if (
self.training_pipeline.current_stage is None
): # Might occur during testing when all stages are complete
return 0
return self.training_pipeline.current_stage.steps_taken_in_stage
def compute_losses_track_them_and_backprop(
self,
stage: PipelineStage,
stage_component: StageComponent,
storage: ExperienceStorage,
skip_backprop: bool = False,
):
training = self.mode == TRAIN_MODE_STR
assert training or skip_backprop
if training and self.is_distributed:
self.insufficient_data_for_update.set(
"insufficient_data_for_update", str(0)
)
dist.barrier(
device_ids=None
if self.device == torch.device("cpu")
else [self.device.index]
)
training_settings = stage_component.training_settings
loss_names = stage_component.loss_names
losses = [self.training_pipeline.get_loss(ln) for ln in loss_names]
loss_weights = [stage.uuid_to_loss_weight[ln] for ln in loss_names]
loss_update_repeats_list = training_settings.update_repeats
if isinstance(loss_update_repeats_list, numbers.Integral):
loss_update_repeats_list = [loss_update_repeats_list] * len(loss_names)
if skip_backprop and isinstance(storage, MiniBatchStorageMixin):
if loss_update_repeats_list != [1] * len(loss_names):
loss_update_repeats_list = [1] * len(loss_names)
get_logger().warning(
"Does not make sense to do multiple updates when"
" skip_backprop is `True` and you are using a storage of type"
" `MiniBatchStorageMixin`. This is likely a problem caused by"
" using a custom valid/test stage component that is inheriting its"
" TrainingSettings from the TrainingPipeline's TrainingSettings. We will override"
" the requested number of updates repeats (which was"
f" {dict(zip(loss_names, loss_update_repeats_list))}) to be 1 for all losses."
)
enough_data_for_update = True
for current_update_repeat_index in range(
max(loss_update_repeats_list, default=0)
):
if isinstance(storage, MiniBatchStorageMixin):
batch_iterator = storage.batched_experience_generator(
num_mini_batch=training_settings.num_mini_batch
)
elif isinstance(storage, StreamingStorageMixin):
assert (
training_settings.num_mini_batch is None
or training_settings.num_mini_batch == 1
)
def single_batch_generator(streaming_storage: StreamingStorageMixin):
try:
yield cast(
StreamingStorageMixin, streaming_storage
).next_batch()
except EOFError:
if not training:
raise
if streaming_storage.empty():
yield None
else:
cast(
StreamingStorageMixin, streaming_storage
).reset_stream()
stage.stage_component_uuid_to_stream_memory[
stage_component.uuid
].clear()
yield cast(
StreamingStorageMixin, streaming_storage
).next_batch()
batch_iterator = single_batch_generator(streaming_storage=storage)
else:
raise NotImplementedError(
f"Storage {storage} must be a subclass of `MiniBatchStorageMixin` or `StreamingStorageMixin`."
)
for batch in batch_iterator:
if batch is None:
# This should only happen in a `StreamingStorageMixin` when it cannot
# generate an initial batch or when we are in testing/validation and
# we've reached the end of the dataset over which to test/validate.
if training:
assert isinstance(storage, StreamingStorageMixin)
get_logger().warning(
f"Worker {self.worker_id}: could not run update in {storage}, potentially because"
f" not enough data has been accumulated to be able to fill an initial batch."
)
else:
pass
enough_data_for_update = False
if training and self.is_distributed:
self.insufficient_data_for_update.add(
"insufficient_data_for_update",
1 * (not enough_data_for_update),
)
dist.barrier(
device_ids=None
if self.device == torch.device("cpu")
else [self.device.index]
)
if (
int(
self.insufficient_data_for_update.get(
"insufficient_data_for_update"
)
)
!= 0
):
enough_data_for_update = False
break
info: Dict[str, float] = {}
bsize: Optional[int] = None
total_loss: Optional[torch.Tensor] = None
actor_critic_output_for_batch: Optional[ActorCriticOutput] = None
batch_memory = Memory()
for loss, loss_name, loss_weight, max_update_repeats_for_loss in zip(
losses, loss_names, loss_weights, loss_update_repeats_list
):
if current_update_repeat_index >= max_update_repeats_for_loss:
continue
if isinstance(loss, AbstractActorCriticLoss):
bsize = batch["bsize"]
if actor_critic_output_for_batch is None:
try:
actor_critic_output_for_batch, _ = self.actor_critic(
observations=batch["observations"],
memory=batch["memory"],
prev_actions=batch["prev_actions"],
masks=batch["masks"],
)
except ValueError:
save_path = self.save_error_data(batch=batch)
get_logger().error(
f"Encountered a value error! Likely because of nans in the output/input."
f" Saving all error information to {save_path}."
)
raise
loss_return = loss.loss(
step_count=self.step_count,
batch=batch,
actor_critic_output=actor_critic_output_for_batch,
)
per_epoch_info = {}
if len(loss_return) == 2:
current_loss, current_info = loss_return
elif len(loss_return) == 3:
current_loss, current_info, per_epoch_info = loss_return
else:
raise NotImplementedError
elif isinstance(loss, GenericAbstractLoss):
loss_output = loss.loss(
model=self.actor_critic,
batch=batch,
batch_memory=batch_memory,
stream_memory=stage.stage_component_uuid_to_stream_memory[
stage_component.uuid
],
)
current_loss = loss_output.value
current_info = loss_output.info
per_epoch_info = loss_output.per_epoch_info
batch_memory = loss_output.batch_memory
stage.stage_component_uuid_to_stream_memory[
stage_component.uuid
] = loss_output.stream_memory
bsize = loss_output.bsize
else:
raise NotImplementedError(
f"Loss of type {type(loss)} is not supported. Losses must be subclasses of"
f" `AbstractActorCriticLoss` or `GenericAbstractLoss`."
)
if total_loss is None:
total_loss = loss_weight * current_loss
else:
total_loss = total_loss + loss_weight * current_loss
for key, value in current_info.items():
info[f"{loss_name}/{key}"] = value
if per_epoch_info is not None:
for key, value in per_epoch_info.items():
if max(loss_update_repeats_list, default=0) > 1:
info[
f"{loss_name}/{key}_epoch{current_update_repeat_index:02d}"
] = value
info[f"{loss_name}/{key}_combined"] = value
else:
info[f"{loss_name}/{key}"] = value
assert total_loss is not None, (
f"No {stage_component.uuid} losses specified for training in stage"
f" {self.training_pipeline.current_stage_index}"
)
total_loss_scalar = total_loss.item()
info[f"total_loss"] = total_loss_scalar
self.tracking_info_list.append(
TrackingInfo(
type=TrackingInfoType.LOSS,
info=info,
n=bsize,
storage_uuid=stage_component.storage_uuid,
stage_component_uuid=stage_component.uuid,
)
)
to_track = {
"rollout_epochs": max(loss_update_repeats_list, default=0),
"worker_batch_size": bsize,
}
aggregate_bsize = None
if training:
aggregate_bsize = self.distributed_weighted_sum(bsize, 1)
to_track["global_batch_size"] = aggregate_bsize
to_track["lr"] = self.optimizer.param_groups[0]["lr"]
if training_settings.num_mini_batch is not None:
to_track[
"rollout_num_mini_batch"
] = training_settings.num_mini_batch
for k, v in to_track.items():
# We need to set the bsize to 1 for `worker_batch_size` below as we're trying to record the
# average batch size per worker, not the average per worker weighted by the size of the batches
# of those workers.
self.tracking_info_list.append(
TrackingInfo(
type=TrackingInfoType.UPDATE_INFO,
info={k: v},
n=1 if k == "worker_batch_size" else bsize,
storage_uuid=stage_component.storage_uuid,
stage_component_uuid=stage_component.uuid,
)
)
if not skip_backprop:
self.backprop_step(
total_loss=total_loss,
max_grad_norm=training_settings.max_grad_norm,
local_to_global_batch_size_ratio=bsize / aggregate_bsize,
)
stage.stage_component_uuid_to_stream_memory[
stage_component.uuid
] = detach_recursively(
input=stage.stage_component_uuid_to_stream_memory[
stage_component.uuid
],
inplace=True,
)
def close(self, verbose=True):
self._is_closing = True
if "_is_closed" in self.__dict__ and self._is_closed:
return
def logif(s: Union[str, Exception]):
if verbose:
if isinstance(s, str):
get_logger().info(s)
elif isinstance(s, Exception):
get_logger().error(traceback.format_exc())
else:
raise NotImplementedError()
if "_vector_tasks" in self.__dict__ and self._vector_tasks is not None:
try:
logif(
f"[{self.mode} worker {self.worker_id}] Closing OnPolicyRLEngine.vector_tasks."
)
self._vector_tasks.close()
logif(f"[{self.mode} worker {self.worker_id}] Closed.")
except Exception as e:
logif(
f"[{self.mode} worker {self.worker_id}] Exception raised when closing OnPolicyRLEngine.vector_tasks:"
)
logif(e)
self._is_closed = True
self._is_closing = False
@property
def is_closed(self):
return self._is_closed
@property
def is_closing(self):
return self._is_closing
def __del__(self):
self.close(verbose=False)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close(verbose=False)
class OnPolicyTrainer(OnPolicyRLEngine):
def __init__(
self,
experiment_name: str,
config: ExperimentConfig,
results_queue: mp.Queue,
checkpoints_queue: Optional[mp.Queue],
checkpoints_dir: str = "",
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
mp_ctx: Optional[BaseContext] = None,
worker_id: int = 0,
num_workers: int = 1,
device: Union[str, torch.device, int] = "cpu",
distributed_ip: str = "127.0.0.1",
distributed_port: int = 0,
deterministic_agents: bool = False,
distributed_preemption_threshold: float = 0.7,
max_sampler_processes_per_worker: Optional[int] = None,
save_ckpt_after_every_pipeline_stage: bool = True,
first_local_worker_id: int = 0,
**kwargs,
):
kwargs["mode"] = TRAIN_MODE_STR
super().__init__(
experiment_name=experiment_name,
config=config,
results_queue=results_queue,
checkpoints_queue=checkpoints_queue,
checkpoints_dir=checkpoints_dir,
seed=seed,
deterministic_cudnn=deterministic_cudnn,
mp_ctx=mp_ctx,
worker_id=worker_id,
num_workers=num_workers,
device=device,
distributed_ip=distributed_ip,
distributed_port=distributed_port,
deterministic_agents=deterministic_agents,
max_sampler_processes_per_worker=max_sampler_processes_per_worker,
**kwargs,
)
self.save_ckpt_after_every_pipeline_stage = save_ckpt_after_every_pipeline_stage
self.actor_critic.train()
self.training_pipeline: TrainingPipeline = config.training_pipeline()
if self.num_workers != 1:
# Ensure that we're only using early stopping criterions in the non-distributed setting.
if any(
stage.early_stopping_criterion is not None
for stage in self.training_pipeline.pipeline_stages
):
raise NotImplementedError(
"Early stopping criterions are currently only allowed when using a single training worker, i.e."
" no distributed (multi-GPU) training. If this is a feature you'd like please create an issue"
" at https://github.com/allenai/allenact/issues or (even better) create a pull request with this "
" feature and we'll be happy to review it."
)
self.optimizer: optim.optimizer.Optimizer = (
self.training_pipeline.optimizer_builder(
params=[p for p in self.actor_critic.parameters() if p.requires_grad]
)
)
# noinspection PyProtectedMember
self.lr_scheduler: Optional[_LRScheduler] = None
if self.training_pipeline.lr_scheduler_builder is not None:
self.lr_scheduler = self.training_pipeline.lr_scheduler_builder(
optimizer=self.optimizer
)
if self.is_distributed:
# Tracks how many workers have finished their rollout
self.num_workers_done = torch.distributed.PrefixStore( # type:ignore
"num_workers_done", self.store
)
# Tracks the number of steps taken by each worker in current rollout
self.num_workers_steps = torch.distributed.PrefixStore( # type:ignore
"num_workers_steps", self.store
)
self.distributed_preemption_threshold = distributed_preemption_threshold
# Flag for finished worker in current epoch
self.offpolicy_epoch_done = torch.distributed.PrefixStore( # type:ignore
"offpolicy_epoch_done", self.store
)
# Flag for finished worker in current epoch with custom component
self.insufficient_data_for_update = torch.distributed.PrefixStore( # type:ignore
"insufficient_data_for_update", self.store
)
else:
self.num_workers_done = None
self.num_workers_steps = None
self.distributed_preemption_threshold = 1.0
self.offpolicy_epoch_done = None
# Keeping track of training state
self.former_steps: Optional[int] = None
self.last_log: Optional[int] = None
self.last_save: Optional[int] = None
# The `self._last_aggregated_train_task_metrics` attribute defined
# below is used for early stopping criterion computations
self._last_aggregated_train_task_metrics: ScalarMeanTracker = (
ScalarMeanTracker()
)
self.first_local_worker_id = first_local_worker_id
def advance_seed(
self, seed: Optional[int], return_same_seed_per_worker=False
) -> Optional[int]:
if seed is None:
return seed
seed = (seed ^ (self.training_pipeline.total_steps + 1)) % (
2 ** 31 - 1
) # same seed for all workers
if (not return_same_seed_per_worker) and (
self.mode == TRAIN_MODE_STR or self.mode == TEST_MODE_STR
):
return self.worker_seeds(self.num_workers, seed)[
self.worker_id
] # doesn't modify the current rng state
else:
return self.worker_seeds(1, seed)[0] # doesn't modify the current rng state
def deterministic_seeds(self) -> None:
if self.seed is not None:
set_seed(self.advance_seed(self.seed)) # known state for all workers
seeds = self.worker_seeds(
self.num_samplers, None
) # use the latest seed for workers and update rng state
if self.vector_tasks is not None:
self.vector_tasks.set_seeds(seeds)
def save_error_data(self, batch: Dict[str, Any]) -> str:
model_path = os.path.join(
self.checkpoints_dir,
"error_for_exp_{}__stage_{:02d}__steps_{:012d}.pt".format(
self.experiment_name,
self.training_pipeline.current_stage_index,
self.training_pipeline.total_steps,
),
)
with filelock.FileLock(
os.path.join(self.checkpoints_dir, "error.lock"), timeout=60
):
if not os.path.exists(model_path):
save_dict = {
"model_state_dict": self.actor_critic.state_dict(), # type:ignore
"total_steps": self.training_pipeline.total_steps, # Total steps including current stage
"optimizer_state_dict": self.optimizer.state_dict(), # type: ignore
"training_pipeline_state_dict": self.training_pipeline.state_dict(),
"trainer_seed": self.seed,
"batch": batch,
}
if self.lr_scheduler is not None:
save_dict["scheduler_state"] = cast(
_LRScheduler, self.lr_scheduler
).state_dict()
torch.save(save_dict, model_path)
return model_path
def aggregate_and_send_logging_package(
self,
tracking_info_list: List[TrackingInfo],
logging_pkg: Optional[LoggingPackage] = None,
send_logging_package: bool = True,
):
logging_pkg = super().aggregate_and_send_logging_package(
tracking_info_list=tracking_info_list,
logging_pkg=logging_pkg,
send_logging_package=send_logging_package,
)
if self.mode == TRAIN_MODE_STR:
# Technically self.mode should always be "train" here (as this is the training engine),
# this conditional is defensive
self._last_aggregated_train_task_metrics.add_scalars(
scalars=logging_pkg.metrics_tracker.means(),
n=logging_pkg.metrics_tracker.counts(),
)
return logging_pkg
def checkpoint_save(self, pipeline_stage_index: Optional[int] = None) -> str:
model_path = os.path.join(
self.checkpoints_dir,
"exp_{}__stage_{:02d}__steps_{:012d}.pt".format(
self.experiment_name,
self.training_pipeline.current_stage_index
if pipeline_stage_index is None
else pipeline_stage_index,
self.training_pipeline.total_steps,
),
)
save_dict = {
"model_state_dict": self.actor_critic.state_dict(), # type:ignore
"total_steps": self.training_pipeline.total_steps, # Total steps including current stage
"optimizer_state_dict": self.optimizer.state_dict(), # type: ignore
"training_pipeline_state_dict": self.training_pipeline.state_dict(),
"trainer_seed": self.seed,
}
if self.lr_scheduler is not None:
save_dict["scheduler_state"] = cast(
_LRScheduler, self.lr_scheduler
).state_dict()
torch.save(save_dict, model_path)
return model_path
def checkpoint_load(
self, ckpt: Union[str, Dict[str, Any]], restart_pipeline: bool = False
) -> Dict[str, Union[Dict[str, Any], torch.Tensor, float, int, str, List]]:
if restart_pipeline:
if "training_pipeline_state_dict" in ckpt:
del ckpt["training_pipeline_state_dict"]
ckpt = super().checkpoint_load(ckpt, restart_pipeline=restart_pipeline)
if restart_pipeline:
self.training_pipeline.restart_pipeline()
else:
self.seed = cast(int, ckpt["trainer_seed"])
self.optimizer.load_state_dict(ckpt["optimizer_state_dict"]) # type: ignore
if self.lr_scheduler is not None and "scheduler_state" in ckpt:
self.lr_scheduler.load_state_dict(ckpt["scheduler_state"]) # type: ignore
self.deterministic_seeds()
return ckpt
@property
def step_count(self):
return self.training_pipeline.current_stage.steps_taken_in_stage
@step_count.setter
def step_count(self, val: int) -> None:
self.training_pipeline.current_stage.steps_taken_in_stage = val
@property
def log_interval(self):
return self.training_pipeline.current_stage.training_settings.metric_accumulate_interval
@property
def approx_steps(self):
if self.is_distributed:
# the actual number of steps gets synchronized after each rollout
return (
self.step_count - self.former_steps
) * self.num_workers + self.former_steps
else:
return self.step_count # this is actually accurate
def act(
self,
rollout_storage: RolloutStorage,
dist_wrapper_class: Optional[type] = None,
):
if self.training_pipeline.current_stage.teacher_forcing is not None:
assert dist_wrapper_class is None
def tracking_callback(type: TrackingInfoType, info: Dict[str, Any], n: int):
self.tracking_info_list.append(
TrackingInfo(
type=type,
info=info,
n=n,
storage_uuid=self.training_pipeline.rollout_storage_uuid,
stage_component_uuid=None,
)
)
dist_wrapper_class = partial(
TeacherForcingDistr,
action_space=self.actor_critic.action_space,
num_active_samplers=self.num_active_samplers,
approx_steps=self.approx_steps,
teacher_forcing=self.training_pipeline.current_stage.teacher_forcing,
tracking_callback=tracking_callback,
)
actions, actor_critic_output, memory, step_observation = super().act(
rollout_storage=rollout_storage, dist_wrapper_class=dist_wrapper_class,
)
self.step_count += self.num_active_samplers
return actions, actor_critic_output, memory, step_observation
def advantage_stats(self, advantages: torch.Tensor) -> Dict[str, torch.Tensor]:
r"""Computes the mean and variances of advantages (possibly over multiple workers).
For multiple workers, this method is equivalent to first collecting all versions of
advantages and then computing the mean and variance locally over that.
# Parameters
advantages: Tensors to compute mean and variance over. Assumed to be solely the
worker's local copy of this tensor, the resultant mean and variance will be computed
as though _all_ workers' versions of this tensor were concatenated together in
distributed training.
"""
# Step count has already been updated with the steps from all workers
global_rollout_steps = self.step_count - self.former_steps
if self.is_distributed:
summed_advantages = advantages.sum()
dist.all_reduce(summed_advantages)
mean = summed_advantages / global_rollout_steps
summed_squares = (advantages - mean).pow(2).sum()
dist.all_reduce(summed_squares)
std = (summed_squares / (global_rollout_steps - 1)).sqrt()
else:
# noinspection PyArgumentList
mean, std = advantages.mean(), advantages.std()
return {"mean": mean, "std": std}
def backprop_step(
self,
total_loss: torch.Tensor,
max_grad_norm: float,
local_to_global_batch_size_ratio: float = 1.0,
):
self.optimizer.zero_grad() # type: ignore
if isinstance(total_loss, torch.Tensor):
total_loss.backward()
if self.is_distributed:
# From https://github.com/pytorch/pytorch/issues/43135
reductions, all_params = [], []
for p in self.actor_critic.parameters():
# you can also organize grads to larger buckets to make all_reduce more efficient
if p.requires_grad:
if p.grad is None:
p.grad = torch.zeros_like(p.data)
else: # local_global_batch_size_tuple is not None, since we're distributed:
p.grad = p.grad * local_to_global_batch_size_ratio
reductions.append(
dist.all_reduce(p.grad, async_op=True,) # sum
) # synchronize
all_params.append(p)
for reduction, p in zip(reductions, all_params):
reduction.wait()
nn.utils.clip_grad_norm_(
self.actor_critic.parameters(), max_norm=max_grad_norm, # type: ignore
)
self.optimizer.step() # type: ignore
def _save_checkpoint_then_send_checkpoint_for_validation_and_update_last_save_counter(
self, pipeline_stage_index: Optional[int] = None
):
self.deterministic_seeds()
if self.worker_id == self.first_local_worker_id:
model_path = self.checkpoint_save(pipeline_stage_index=pipeline_stage_index)
if self.checkpoints_queue is not None:
self.checkpoints_queue.put(("eval", model_path))
self.last_save = self.training_pipeline.total_steps
def run_pipeline(self, valid_on_initial_weights: bool = False):
cur_stage_training_settings = (
self.training_pipeline.current_stage.training_settings
)
rollout_storage = self.training_pipeline.rollout_storage
uuid_to_storage = self.training_pipeline.current_stage_storage
self.initialize_storage_and_viz(
storage_to_initialize=cast(
List[ExperienceStorage], list(uuid_to_storage.values())
)
)
self.tracking_info_list.clear()
self.last_log = self.training_pipeline.total_steps
if self.last_save is None:
self.last_save = self.training_pipeline.total_steps
should_save_checkpoints = (
self.checkpoints_dir != ""
and cur_stage_training_settings.save_interval is not None
and cur_stage_training_settings.save_interval > 0
)
already_saved_checkpoint = False
if (
valid_on_initial_weights
and should_save_checkpoints
and self.checkpoints_queue is not None
):
if self.worker_id == self.first_local_worker_id:
model_path = self.checkpoint_save()
if self.checkpoints_queue is not None:
self.checkpoints_queue.put(("eval", model_path))
while True:
pipeline_stage_changed = self.training_pipeline.before_rollout(
train_metrics=self._last_aggregated_train_task_metrics
) # This is `False` at the very start of training, i.e. pipeline starts with a stage initialized
self._last_aggregated_train_task_metrics.reset()
training_is_complete = self.training_pipeline.current_stage is None
# `training_is_complete` should imply `pipeline_stage_changed`
assert pipeline_stage_changed or not training_is_complete
# Saving checkpoints and initializing storage when the pipeline stage changes
if pipeline_stage_changed:
# Here we handle saving a checkpoint after a pipeline stage ends. We
# do this:
# (1) after every pipeline stage if the `self.save_ckpt_after_every_pipeline_stage`
# boolean is True, and
# (2) when we have reached the end of ALL training (i.e. all stages are complete).
if (
should_save_checkpoints
and ( # Might happen if the `save_interval` was hit just previously, see below
not already_saved_checkpoint
)
and (
self.save_ckpt_after_every_pipeline_stage
or training_is_complete
)
):
self._save_checkpoint_then_send_checkpoint_for_validation_and_update_last_save_counter(
pipeline_stage_index=self.training_pipeline.current_stage_index
- 1
if not training_is_complete
else len(self.training_pipeline.pipeline_stages) - 1
)
# If training is complete, break out
if training_is_complete:
break
# Here we handle updating our training settings after a pipeline stage ends.
# Update the training settings we're using
cur_stage_training_settings = (
self.training_pipeline.current_stage.training_settings
)
# If the pipeline stage changed we must initialize any new custom storage and
# stop updating any custom storage that is no longer in use (this second bit
# is done by simply updating `uuid_to_storage` to the new custom storage objects).
new_uuid_to_storage = self.training_pipeline.current_stage_storage
storage_to_initialize = [
s
for uuid, s in new_uuid_to_storage.items()
if uuid
not in uuid_to_storage # Don't initialize storage already in use
]
self.initialize_storage_and_viz(
storage_to_initialize=storage_to_initialize,
)
uuid_to_storage = new_uuid_to_storage
already_saved_checkpoint = False
if self.is_distributed:
self.num_workers_done.set("done", str(0))
self.num_workers_steps.set("steps", str(0))
# Ensure all workers are done before incrementing num_workers_{steps, done}
dist.barrier(
device_ids=None
if self.device == torch.device("cpu")
else [self.device.index]
)
self.former_steps = self.step_count
former_storage_experiences = {
k: v.total_experiences
for k, v in self.training_pipeline.current_stage_storage.items()
}
if self.training_pipeline.rollout_storage_uuid is None:
# In this case we're not expecting to collect storage experiences, i.e. everything
# will be off-policy.
# self.step_count is normally updated by the `self.collect_step_across_all_task_samplers`
# call below, but since we're not collecting onpolicy experiences, we need to update
# it here. The step count here is now just effectively a count of the number of times
# we've called `compute_losses_track_them_and_backprop` below.
self.step_count += 1
before_update_info = dict(
next_value=None,
use_gae=cur_stage_training_settings.use_gae,
gamma=cur_stage_training_settings.gamma,
tau=cur_stage_training_settings.gae_lambda,
adv_stats_callback=self.advantage_stats,
)
else:
vector_tasks_already_restarted = False
step = -1
while step < cur_stage_training_settings.num_steps - 1:
step += 1
try:
num_paused = self.collect_step_across_all_task_samplers(
rollout_storage_uuid=self.training_pipeline.rollout_storage_uuid,
uuid_to_storage=uuid_to_storage,
)
except (TimeoutError, EOFError) as e:
if (
not self.try_restart_after_task_error
) or self.mode != TRAIN_MODE_STR:
# Apparently you can just call `raise` here and doing so will just raise the exception as though
# it was not caught (so the stacktrace isn't messed up)
raise
elif vector_tasks_already_restarted:
raise RuntimeError(
f"[{self.mode} worker {self.worker_id}] `vector_tasks` has timed out twice in the same"
f" rollout. This suggests that this error was not recoverable. Timeout exception:\n{traceback.format_exc()}"
)
else:
get_logger().warning(
f"[{self.mode} worker {self.worker_id}] `vector_tasks` appears to have crashed during"
f" training due to an {type(e).__name__} error. You have set"
f" `try_restart_after_task_error` to `True` so we will attempt to restart these tasks from"
f" the beginning. USE THIS FEATURE AT YOUR OWN"
f" RISK. Exception:\n{traceback.format_exc()}."
)
self.vector_tasks.close()
self._vector_tasks = None
vector_tasks_already_restarted = True
for (
storage
) in self.training_pipeline.current_stage_storage.values():
storage.after_updates()
self.initialize_storage_and_viz(
storage_to_initialize=cast(
List[ExperienceStorage],
list(uuid_to_storage.values()),
)
)
step = -1
continue
# A more informative error message should already have been thrown in be given in
# `collect_step_across_all_task_samplers` if `num_paused != 0` here but this serves
# as a sanity check.
assert num_paused == 0
if self.is_distributed:
# Preempt stragglers
# Each worker will stop collecting steps for the current rollout whenever a
# 100 * distributed_preemption_threshold percentage of workers are finished collecting their
# rollout steps, and we have collected at least 25% but less than 90% of the steps.
num_done = int(self.num_workers_done.get("done"))
if (
num_done
> self.distributed_preemption_threshold * self.num_workers
and 0.25 * cur_stage_training_settings.num_steps
<= step
< 0.9 * cur_stage_training_settings.num_steps
):
get_logger().debug(
f"[{self.mode} worker {self.worker_id}] Preempted after {step}"
f" steps (out of {cur_stage_training_settings.num_steps})"
f" with {num_done} workers done"
)
break
with torch.no_grad():
actor_critic_output, _ = self.actor_critic(
**rollout_storage.agent_input_for_next_step()
)
self.training_pipeline.rollout_count += 1
if self.is_distributed:
# Mark that a worker is done collecting experience
self.num_workers_done.add("done", 1)
self.num_workers_steps.add(
"steps", self.step_count - self.former_steps
)
# Ensure all workers are done before updating step counter
dist.barrier(
device_ids=None
if self.device == torch.device("cpu")
else [self.device.index]
)
ndone = int(self.num_workers_done.get("done"))
assert (
ndone == self.num_workers
), f"# workers done {ndone} != # workers {self.num_workers}"
# get the actual step_count
self.step_count = (
int(self.num_workers_steps.get("steps")) + self.former_steps
)
before_update_info = dict(
next_value=actor_critic_output.values.detach(),
use_gae=cur_stage_training_settings.use_gae,
gamma=cur_stage_training_settings.gamma,
tau=cur_stage_training_settings.gae_lambda,
adv_stats_callback=self.advantage_stats,
)
# Prepare storage for iteration during updates
for storage in self.training_pipeline.current_stage_storage.values():
storage.before_updates(**before_update_info)
for sc in self.training_pipeline.current_stage.stage_components:
component_storage = uuid_to_storage[sc.storage_uuid]
# before_update = time.time()
self.compute_losses_track_them_and_backprop(
stage=self.training_pipeline.current_stage,
stage_component=sc,
storage=component_storage,
)
# after_update = time.time()
# delta = after_update - before_update
# get_logger().info(
# f"Worker {self.worker_id}: {sc.uuid} took {delta:.2g}s ({sc.training_settings.update_repeats}"
# f" repeats * {sc.training_settings.num_mini_batch} batches)"
# )
for storage in self.training_pipeline.current_stage_storage.values():
storage.after_updates()
# We update the storage step counts saved in
# `self.training_pipeline.current_stage.storage_uuid_to_steps_taken_in_stage` here rather than with
# `self.steps` above because some storage step counts may only change after the update calls above.
# This may seem a bit weird but consider a storage that corresponds to a fixed dataset
# used for imitation learning. For such a dataset, the "steps" will only increase as
# new batches are sampled during update calls.
# Note: We don't need to sort the keys below to ensure that distributed updates happen correctly
# as `self.training_pipeline.current_stage_storage` is an ordered `dict`.
# First we calculate the change in counts (possibly aggregating across devices)
change_in_storage_experiences = {}
for k in sorted(self.training_pipeline.current_stage_storage.keys()):
delta = (
self.training_pipeline.current_stage_storage[k].total_experiences
- former_storage_experiences[k]
)
assert delta >= 0
change_in_storage_experiences[k] = self.distributed_weighted_sum(
to_share=delta, weight=1
)
# Then we update `self.training_pipeline.current_stage.storage_uuid_to_steps_taken_in_stage` with the above
# computed changes.
for storage_uuid, delta in change_in_storage_experiences.items():
self.training_pipeline.current_stage.storage_uuid_to_steps_taken_in_stage[
storage_uuid
] += delta
if self.lr_scheduler is not None:
self.lr_scheduler.step(epoch=self.training_pipeline.total_steps)
if (
self.training_pipeline.total_steps - self.last_log >= self.log_interval
or self.training_pipeline.current_stage.is_complete
):
self.aggregate_and_send_logging_package(
tracking_info_list=self.tracking_info_list
)
self.tracking_info_list.clear()
self.last_log = self.training_pipeline.total_steps
# Here we handle saving a checkpoint every `save_interval` steps, saving after
# a pipeline stage completes is controlled above
if should_save_checkpoints and (
self.training_pipeline.total_steps - self.last_save
>= cur_stage_training_settings.save_interval
):
self._save_checkpoint_then_send_checkpoint_for_validation_and_update_last_save_counter()
already_saved_checkpoint = True
if (
cur_stage_training_settings.advance_scene_rollout_period is not None
) and (
self.training_pipeline.rollout_count
% cur_stage_training_settings.advance_scene_rollout_period
== 0
):
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Force advance"
f" tasks with {self.training_pipeline.rollout_count} rollouts"
)
self.vector_tasks.next_task(force_advance_scene=True)
self.initialize_storage_and_viz(
storage_to_initialize=cast(
List[ExperienceStorage], list(uuid_to_storage.values())
)
)
def train(
self,
checkpoint_file_name: Optional[str] = None,
restart_pipeline: bool = False,
valid_on_initial_weights: bool = False,
):
assert (
self.mode == TRAIN_MODE_STR
), "train only to be called from a train instance"
training_completed_successfully = False
# noinspection PyBroadException
try:
if checkpoint_file_name is not None:
self.checkpoint_load(checkpoint_file_name, restart_pipeline)
self.run_pipeline(valid_on_initial_weights=valid_on_initial_weights)
training_completed_successfully = True
except KeyboardInterrupt:
get_logger().info(
f"[{self.mode} worker {self.worker_id}] KeyboardInterrupt, exiting."
)
except Exception as e:
get_logger().error(
f"[{self.mode} worker {self.worker_id}] Encountered {type(e).__name__}, exiting."
)
get_logger().error(traceback.format_exc())
finally:
if training_completed_successfully:
if self.worker_id == 0:
self.results_queue.put(("train_stopped", 0))
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Training finished successfully."
)
else:
self.results_queue.put(("train_stopped", 1 + self.worker_id))
self.close()
class OnPolicyInference(OnPolicyRLEngine):
def __init__(
self,
config: ExperimentConfig,
results_queue: mp.Queue, # to output aggregated results
checkpoints_queue: mp.Queue, # to write/read (trainer/evaluator) ready checkpoints
checkpoints_dir: str = "",
mode: str = "valid", # or "test"
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
mp_ctx: Optional[BaseContext] = None,
device: Union[str, torch.device, int] = "cpu",
deterministic_agents: bool = False,
worker_id: int = 0,
num_workers: int = 1,
distributed_port: int = 0,
enforce_expert: bool = False,
**kwargs,
):
super().__init__(
experiment_name="",
config=config,
results_queue=results_queue,
checkpoints_queue=checkpoints_queue,
checkpoints_dir=checkpoints_dir,
mode=mode,
seed=seed,
deterministic_cudnn=deterministic_cudnn,
mp_ctx=mp_ctx,
deterministic_agents=deterministic_agents,
device=device,
worker_id=worker_id,
num_workers=num_workers,
distributed_port=distributed_port,
**kwargs,
)
self.enforce_expert = enforce_expert
def run_eval(
self,
checkpoint_file_path: str,
rollout_steps: int = 100,
visualizer: Optional[VizSuite] = None,
update_secs: float = 20.0,
verbose: bool = False,
) -> LoggingPackage:
assert self.actor_critic is not None, "called `run_eval` with no actor_critic"
# Sanity check that we haven't entered an invalid state. During eval the training_pipeline
# should be only set in this function and always unset at the end of it.
assert self.training_pipeline is None, (
"`training_pipeline` should be `None` before calling `run_eval`."
" This is necessary as we want to initialize new storages."
)
self.training_pipeline = self.config.training_pipeline()
ckpt = self.checkpoint_load(checkpoint_file_path, restart_pipeline=False)
total_steps = cast(int, ckpt["total_steps"])
eval_pipeline_stage = cast(
PipelineStage,
getattr(self.training_pipeline, f"{self.mode}_pipeline_stage"),
)
assert (
len(eval_pipeline_stage.stage_components) <= 1
), "Only one StageComponent is supported during inference."
uuid_to_storage = self.training_pipeline.get_stage_storage(eval_pipeline_stage)
assert len(uuid_to_storage) > 0, (
"No storage found for eval pipeline stage, this is a bug in AllenAct,"
" please submit an issue on GitHub (https://github.com/allenai/allenact/issues)."
)
uuid_to_rollout_storage = {
uuid: storage
for uuid, storage in uuid_to_storage.items()
if isinstance(storage, RolloutStorage)
}
uuid_to_non_rollout_storage = {
uuid: storage
for uuid, storage in uuid_to_storage.items()
if not isinstance(storage, RolloutStorage)
}
if len(uuid_to_rollout_storage) > 1 or len(uuid_to_non_rollout_storage) > 1:
raise NotImplementedError(
"Only one RolloutStorage and non-RolloutStorage object is allowed within an evaluation pipeline stage."
" If you'd like to evaluate against multiple storages please"
" submit an issue on GitHub (https://github.com/allenai/allenact/issues). For the moment you'll need"
" to evaluate against these storages separately."
)
rollout_storage = self.training_pipeline.rollout_storage
if visualizer is not None:
assert visualizer.empty()
num_paused = self.initialize_storage_and_viz(
storage_to_initialize=cast(
List[ExperienceStorage], list(uuid_to_storage.values())
),
visualizer=visualizer,
)
assert num_paused == 0, f"{num_paused} tasks paused when initializing eval"
if rollout_storage is not None:
num_tasks = sum(
self.vector_tasks.command(
"sampler_attr", ["length"] * self.num_active_samplers
)
) + ( # We need to add this as the first tasks have already been sampled
self.num_active_samplers
)
else:
num_tasks = 0
# get_logger().debug("worker {self.worker_id} number of tasks {num_tasks}")
steps = 0
self.actor_critic.eval()
last_time: float = time.time()
init_time: float = last_time
frames: int = 0
if verbose:
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Running evaluation on {num_tasks} tasks"
f" for ckpt {checkpoint_file_path}"
)
if self.enforce_expert:
dist_wrapper_class = partial(
TeacherForcingDistr,
action_space=self.actor_critic.action_space,
num_active_samplers=None,
approx_steps=None,
teacher_forcing=None,
tracking_callback=None,
always_enforce=True,
)
else:
dist_wrapper_class = None
logging_pkg = LoggingPackage(
mode=self.mode,
training_steps=total_steps,
storage_uuid_to_total_experiences=self.training_pipeline.storage_uuid_to_total_experiences,
)
should_compute_onpolicy_losses = (
len(eval_pipeline_stage.loss_names) > 0
and eval_pipeline_stage.stage_components[0].storage_uuid
== self.training_pipeline.rollout_storage_uuid
)
while self.num_active_samplers > 0:
frames += self.num_active_samplers
num_newly_paused = self.collect_step_across_all_task_samplers(
rollout_storage_uuid=self.training_pipeline.rollout_storage_uuid,
uuid_to_storage=uuid_to_rollout_storage,
visualizer=visualizer,
dist_wrapper_class=dist_wrapper_class,
)
steps += 1
if should_compute_onpolicy_losses and num_newly_paused > 0:
# The `collect_step_across_all_task_samplers` method will automatically drop
# parts of the rollout storage that correspond to paused tasks (namely by calling"
# `rollout_storage.sampler_select(UNPAUSED_TASK_INDS)`). This makes sense when you don't need to
# compute losses for tasks but is a bit limiting here as we're throwing away data before
# using it to compute losses. As changing this is non-trivial we'll just warn the user
# for now.
get_logger().warning(
f"[{self.mode} worker {self.worker_id}] {num_newly_paused * rollout_storage.step} steps"
f" will be dropped when computing losses in evaluation. This is a limitation of the current"
f" implementation of rollout collection in AllenAct. If you'd like to see this"
f" functionality improved please submit an issue on GitHub"
f" (https://github.com/allenai/allenact/issues)."
)
if self.num_active_samplers == 0 or steps % rollout_steps == 0:
if should_compute_onpolicy_losses and self.num_active_samplers > 0:
with torch.no_grad():
actor_critic_output, _ = self.actor_critic(
**rollout_storage.agent_input_for_next_step()
)
before_update_info = dict(
next_value=actor_critic_output.values.detach(),
use_gae=eval_pipeline_stage.training_settings.use_gae,
gamma=eval_pipeline_stage.training_settings.gamma,
tau=eval_pipeline_stage.training_settings.gae_lambda,
adv_stats_callback=lambda advantages: {
"mean": advantages.mean(),
"std": advantages.std(),
},
)
# Prepare storage for iteration during loss computation
for storage in uuid_to_rollout_storage.values():
storage.before_updates(**before_update_info)
# Compute losses
with torch.no_grad():
for sc in eval_pipeline_stage.stage_components:
self.compute_losses_track_them_and_backprop(
stage=eval_pipeline_stage,
stage_component=sc,
storage=uuid_to_rollout_storage[sc.storage_uuid],
skip_backprop=True,
)
for storage in uuid_to_rollout_storage.values():
storage.after_updates()
cur_time = time.time()
if self.num_active_samplers == 0 or cur_time - last_time >= update_secs:
logging_pkg = self.aggregate_and_send_logging_package(
tracking_info_list=self.tracking_info_list,
logging_pkg=logging_pkg,
send_logging_package=False,
)
self.tracking_info_list.clear()
if verbose:
npending: int
lengths: List[int]
if self.num_active_samplers > 0:
lengths = self.vector_tasks.command(
"sampler_attr", ["length"] * self.num_active_samplers,
)
npending = sum(lengths)
else:
lengths = []
npending = 0
est_time_to_complete = (
"{:.2f}".format(
(
(cur_time - init_time)
* (npending / (num_tasks - npending))
/ 60
)
)
if npending != num_tasks
else "???"
)
get_logger().info(
f"[{self.mode} worker {self.worker_id}]"
f" For ckpt {checkpoint_file_path}"
f" {frames / (cur_time - init_time):.1f} fps,"
f" {npending}/{num_tasks} tasks pending ({lengths})."
f" ~{est_time_to_complete} min. to complete."
)
if logging_pkg.num_non_empty_metrics_dicts_added != 0:
get_logger().info(
", ".join(
[
f"[{self.mode} worker {self.worker_id}]"
f" num_{self.mode}_tasks_complete {logging_pkg.num_non_empty_metrics_dicts_added}",
*[
f"{k} {v:.3g}"
for k, v in logging_pkg.metrics_tracker.means().items()
],
*[
f"{k0[1]}/{k1} {v1:.3g}"
for k0, v0 in logging_pkg.info_trackers.items()
for k1, v1 in v0.means().items()
],
]
)
)
last_time = cur_time
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Task evaluation complete, all task samplers paused."
)
if rollout_storage is not None:
self.vector_tasks.resume_all()
self.vector_tasks.set_seeds(self.worker_seeds(self.num_samplers, self.seed))
self.vector_tasks.reset_all()
logging_pkg = self.aggregate_and_send_logging_package(
tracking_info_list=self.tracking_info_list,
logging_pkg=logging_pkg,
send_logging_package=False,
)
self.tracking_info_list.clear()
logging_pkg.viz_data = (
visualizer.read_and_reset() if visualizer is not None else None
)
should_compute_offpolicy_losses = (
len(eval_pipeline_stage.loss_names) > 0
and not should_compute_onpolicy_losses
)
if should_compute_offpolicy_losses:
# In this case we are evaluating a non-rollout storage, e.g. some off-policy data
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Non-rollout storage detected, will now compute losses"
f" using this storage."
)
offpolicy_eval_done = False
while not offpolicy_eval_done:
before_update_info = dict(
next_value=None,
use_gae=eval_pipeline_stage.training_settings.use_gae,
gamma=eval_pipeline_stage.training_settings.gamma,
tau=eval_pipeline_stage.training_settings.gae_lambda,
adv_stats_callback=lambda advantages: {
"mean": advantages.mean(),
"std": advantages.std(),
},
)
# Prepare storage for iteration during loss computation
for storage in uuid_to_non_rollout_storage.values():
storage.before_updates(**before_update_info)
# Compute losses
assert len(eval_pipeline_stage.stage_components) == 1
try:
for sc in eval_pipeline_stage.stage_components:
with torch.no_grad():
self.compute_losses_track_them_and_backprop(
stage=eval_pipeline_stage,
stage_component=sc,
storage=uuid_to_non_rollout_storage[sc.storage_uuid],
skip_backprop=True,
)
except EOFError:
offpolicy_eval_done = True
for storage in uuid_to_non_rollout_storage.values():
storage.after_updates()
total_bsize = sum(
tif.info.get("worker_batch_size", 0)
for tif in self.tracking_info_list
)
logging_pkg = self.aggregate_and_send_logging_package(
tracking_info_list=self.tracking_info_list,
logging_pkg=logging_pkg,
send_logging_package=False,
)
self.tracking_info_list.clear()
cur_time = time.time()
if verbose and (cur_time - last_time >= update_secs):
get_logger().info(
f"[{self.mode} worker {self.worker_id}]"
f" For ckpt {checkpoint_file_path}"
f" {total_bsize / (cur_time - init_time):.1f} its/sec."
)
if logging_pkg.info_trackers != 0:
get_logger().info(
", ".join(
[
f"[{self.mode} worker {self.worker_id}]"
f" num_{self.mode}_iters_complete {total_bsize}",
*[
f"{'/'.join(k0)}/{k1} {v1:.3g}"
for k0, v0 in logging_pkg.info_trackers.items()
for k1, v1 in v0.means().items()
],
]
)
)
last_time = cur_time
# Call after_updates here to reset all storages
for storage in uuid_to_storage.values():
storage.after_updates()
# Set the training pipeline to `None` so that the storages do not
# persist across calls to `run_eval`
self.training_pipeline = None
logging_pkg.checkpoint_file_name = checkpoint_file_path
return logging_pkg
@staticmethod
def skip_to_latest(checkpoints_queue: mp.Queue, command: Optional[str], data):
assert (
checkpoints_queue is not None
), "Attempting to process checkpoints queue but this queue is `None`."
cond = True
while cond:
sentinel = ("skip.AUTO.sentinel", time.time())
checkpoints_queue.put(
sentinel
) # valid since a single valid process is the only consumer
forwarded = False
while not forwarded:
new_command: Optional[str]
new_data: Any
(
new_command,
new_data,
) = checkpoints_queue.get() # block until next command arrives
if new_command == command:
data = new_data
elif new_command == sentinel[0]:
assert (
new_data == sentinel[1]
), f"Wrong sentinel found: {new_data} vs {sentinel[1]}"
forwarded = True
else:
raise ValueError(
f"Unexpected command {new_command} with data {new_data}"
)
time.sleep(1)
cond = not checkpoints_queue.empty()
return data
def process_checkpoints(self):
assert (
self.mode != TRAIN_MODE_STR
), "process_checkpoints only to be called from a valid or test instance"
assert (
self.checkpoints_queue is not None
), "Attempting to process checkpoints queue but this queue is `None`."
visualizer: Optional[VizSuite] = None
finalized = False
# noinspection PyBroadException
try:
while True:
command: Optional[str]
ckp_file_path: Any
(
command,
ckp_file_path,
) = self.checkpoints_queue.get() # block until first command arrives
# get_logger().debug(
# "{} {} command {} data {}".format(
# self.mode, self.worker_id, command, data
# )
# )
if command == "eval":
if self.mode == VALID_MODE_STR:
# skip to latest using
# 1. there's only consumer in valid
# 2. there's no quit/exit/close message issued by runner nor trainer
ckp_file_path = self.skip_to_latest(
checkpoints_queue=self.checkpoints_queue,
command=command,
data=ckp_file_path,
)
if (
visualizer is None
and self.machine_params.visualizer is not None
):
visualizer = self.machine_params.visualizer
eval_package = self.run_eval(
checkpoint_file_path=ckp_file_path,
visualizer=visualizer,
verbose=True,
update_secs=20 if self.mode == TEST_MODE_STR else 5 * 60,
)
self.results_queue.put(eval_package)
if self.is_distributed:
dist.barrier()
elif command in ["quit", "exit", "close"]:
finalized = True
break
else:
raise NotImplementedError()
except KeyboardInterrupt:
get_logger().info(
f"[{self.mode} worker {self.worker_id}] KeyboardInterrupt, exiting."
)
except Exception as e:
get_logger().error(
f"[{self.mode} worker {self.worker_id}] Encountered {type(e).__name__}, exiting."
)
get_logger().error(traceback.format_exc())
finally:
if finalized:
if self.mode == TEST_MODE_STR:
self.results_queue.put(("test_stopped", 0))
get_logger().info(
f"[{self.mode} worker {self.worker_id}] Complete, all checkpoints processed."
)
else:
if self.mode == TEST_MODE_STR:
self.results_queue.put(("test_stopped", self.worker_id + 1))
self.close(verbose=self.mode == TEST_MODE_STR)
| allenact-main | allenact/algorithms/onpolicy_sync/engine.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import signal
import time
import traceback
from multiprocessing.connection import Connection
from multiprocessing.context import BaseContext
from multiprocessing.process import BaseProcess
from threading import Thread
from typing import (
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import numpy as np
from gym.spaces.dict import Dict as SpaceDict
from setproctitle import setproctitle as ptitle
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import SensorSuite, Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.misc_utils import partition_sequence
from allenact.utils.system import get_logger
from allenact.utils.tensor_utils import tile_images
try:
# Use torch.multiprocessing if we can.
# We have yet to find a reason to not use it and
# you are required to use it when sending a torch.Tensor
# between processes
import torch.multiprocessing as mp
except ImportError:
import multiprocessing as mp # type: ignore
DEFAULT_MP_CONTEXT_TYPE = "forkserver"
COMPLETE_TASK_METRICS_KEY = "__AFTER_TASK_METRICS__"
COMPLETE_TASK_CALLBACK_KEY = "__AFTER_TASK_CALLBACK__"
STEP_COMMAND = "step"
NEXT_TASK_COMMAND = "next_task"
RENDER_COMMAND = "render"
CLOSE_COMMAND = "close"
OBSERVATION_SPACE_COMMAND = "observation_space"
ACTION_SPACE_COMMAND = "action_space"
CALL_COMMAND = "call"
SAMPLER_COMMAND = "call_sampler"
ATTR_COMMAND = "attr"
SAMPLER_ATTR_COMMAND = "sampler_attr"
RESET_COMMAND = "reset"
SEED_COMMAND = "seed"
PAUSE_COMMAND = "pause"
RESUME_COMMAND = "resume"
class DelaySignalHandling:
# Modified from https://stackoverflow.com/a/21919644
def __init__(self):
self.int_signal_received: Optional[Any] = None
self.term_signal_received: Optional[Any] = None
self.old_int_handler = None
self.old_term_handler = None
def __enter__(self):
self.int_signal_received: Optional[Any] = None
self.term_signal_received: Optional[Any] = None
self.old_int_handler = signal.signal(signal.SIGINT, self.int_handler)
self.old_term_handler = signal.signal(signal.SIGTERM, self.term_handler)
def int_handler(self, sig, frame):
self.int_signal_received = (sig, frame)
get_logger().debug("SIGINT received. Delaying KeyboardInterrupt.")
def term_handler(self, sig, frame):
self.term_signal_received = (sig, frame)
get_logger().debug("SIGTERM received. Delaying termination.")
def __exit__(self, type, value, traceback):
signal.signal(signal.SIGINT, self.old_int_handler)
signal.signal(signal.SIGTERM, self.old_term_handler)
if self.term_signal_received:
# For some reason there appear to be cases where the original termination
# handler is not callable. It is unclear to me exactly why this is the case
# but here we add a guard to double check that the handler is callable and,
# if it's not, we re-send the termination signal to the process and let
# the python internals handle it (note that we've already reset the termination
# handler to what it was originaly above in the signal.signal(...) code).
if callable(self.old_term_handler):
self.old_term_handler(*self.term_signal_received)
else:
get_logger().debug(
"Termination handler could not be called after delaying signal handling."
f" Resending the SIGTERM signal. Last (sig, frame) == ({self.term_signal_received})."
)
os.kill(os.getpid(), signal.SIGTERM)
if self.int_signal_received:
if callable(self.old_int_handler):
self.old_int_handler(*self.int_signal_received)
else:
signal.default_int_handler(*self.int_signal_received)
class VectorSampledTasks:
"""Vectorized collection of tasks. Creates multiple processes where each
process runs its own TaskSampler. Each process generates one Task from its
TaskSampler at a time and this class allows for interacting with these
tasks in a vectorized manner. When a task on a process completes, the
process samples another task from its task sampler. All the tasks are
synchronized (for step and new_task methods).
# Attributes
make_sampler_fn : function which creates a single TaskSampler.
sampler_fn_args : sequence of dictionaries describing the args
to pass to make_sampler_fn on each individual process.
auto_resample_when_done : automatically sample a new Task from the TaskSampler when
the Task completes. If False, a new Task will not be resampled until all
Tasks on all processes have completed. This functionality is provided for seamless training
of vectorized Tasks.
multiprocessing_start_method : the multiprocessing method used to
spawn worker processes. Valid methods are
``{'spawn', 'forkserver', 'fork'}`` ``'forkserver'`` is the
recommended method as it works well with CUDA. If
``'fork'`` is used, the subproccess must be started before
any other GPU useage.
"""
observation_space: SpaceDict
_workers: List[Union[mp.Process, Thread, BaseProcess]]
_is_waiting: bool
_num_task_samplers: int
_auto_resample_when_done: bool
_mp_ctx: BaseContext
_connection_read_fns: List[Callable[[], Any]]
_connection_write_fns: List[Callable[[Any], None]]
_read_timeout: Optional[float]
def __init__(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args: Sequence[Dict[str, Any]] = None,
callback_sensors: Optional[Sequence[Sensor]] = None,
auto_resample_when_done: bool = True,
multiprocessing_start_method: Optional[str] = "forkserver",
mp_ctx: Optional[BaseContext] = None,
should_log: bool = True,
max_processes: Optional[int] = None,
read_timeout: Optional[
float
] = 60, # Seconds to wait for a task to return a response before timing out
) -> None:
self._is_waiting = False
self._is_closed = True
self.should_log = should_log
self.max_processes = max_processes
self.read_timeout = read_timeout
assert (
sampler_fn_args is not None and len(sampler_fn_args) > 0
), "number of processes to be created should be greater than 0"
self._num_task_samplers = len(sampler_fn_args)
self._num_processes = (
self._num_task_samplers
if max_processes is None
else min(max_processes, self._num_task_samplers)
)
self._auto_resample_when_done = auto_resample_when_done
assert (multiprocessing_start_method is None) != (
mp_ctx is None
), "Exactly one of `multiprocessing_start_method`, and `mp_ctx` must be not None."
if multiprocessing_start_method is not None:
assert multiprocessing_start_method in self._valid_start_methods, (
"multiprocessing_start_method must be one of {}. Got '{}'"
).format(self._valid_start_methods, multiprocessing_start_method)
self._mp_ctx = mp.get_context(multiprocessing_start_method)
else:
self._mp_ctx = cast(BaseContext, mp_ctx)
self.npaused_per_process = [0] * self._num_processes
self.sampler_index_to_process_ind_and_subprocess_ind: Optional[
List[List[int]]
] = None
self._reset_sampler_index_to_process_ind_and_subprocess_ind()
self._workers: Optional[List[Union[mp.Process, Thread, BaseProcess]]] = None
for args in sampler_fn_args:
args["mp_ctx"] = self._mp_ctx
(
connection_poll_fns,
connection_read_fns,
self._connection_write_fns,
) = self._spawn_workers( # noqa
make_sampler_fn=make_sampler_fn,
sampler_fn_args_list=[
args_list for args_list in self._partition_to_processes(sampler_fn_args)
],
callback_sensor_suite=(
SensorSuite(callback_sensors)
if isinstance(callback_sensors, Sequence)
else callback_sensors
),
)
self._connection_read_fns = [
self._create_read_function_with_timeout(
read_fn=read_fn, poll_fn=poll_fn, timeout=self.read_timeout
)
for read_fn, poll_fn in zip(connection_read_fns, connection_poll_fns)
]
self._is_closed = False
for write_fn in self._connection_write_fns:
write_fn((OBSERVATION_SPACE_COMMAND, None))
# Note that we increase the read timeout below as initialization can take some time
observation_spaces = [
space
for read_fn in self._connection_read_fns
for space in read_fn(timeout_to_use=5 * self.read_timeout if self.read_timeout is not None else None) # type: ignore
]
if any(os is None for os in observation_spaces):
raise NotImplementedError(
"It appears that the `all_observation_spaces_equal`"
" is not True for some task sampler created by"
" VectorSampledTasks. This is not currently supported."
)
if any(observation_spaces[0] != os for os in observation_spaces):
raise NotImplementedError(
"It appears that the observation spaces of the samplers"
" created in VectorSampledTasks are not equal."
" This is not currently supported."
)
self.observation_space = observation_spaces[0]
for write_fn in self._connection_write_fns:
write_fn((ACTION_SPACE_COMMAND, None))
self.action_spaces = [
space for read_fn in self._connection_read_fns for space in read_fn()
]
@staticmethod
def _create_read_function_with_timeout(
*,
read_fn: Callable[[], Any],
poll_fn: Callable[[float], bool],
timeout: Optional[float],
) -> Callable[[], Any]:
def read_with_timeout(timeout_to_use: Optional[float] = timeout):
if timeout_to_use is not None:
# noinspection PyArgumentList
if not poll_fn(timeout=timeout_to_use):
raise TimeoutError(
f"Did not receive output from `VectorSampledTask` worker for {timeout_to_use} seconds."
)
return read_fn()
return read_with_timeout
def _reset_sampler_index_to_process_ind_and_subprocess_ind(self):
self.sampler_index_to_process_ind_and_subprocess_ind = [
[i, j]
for i, part in enumerate(
partition_sequence([1] * self._num_task_samplers, self._num_processes)
)
for j in range(len(part))
]
def _partition_to_processes(self, seq: Union[Iterator, Sequence]):
subparts_list: List[List] = [[] for _ in range(self._num_processes)]
seq = list(seq)
assert len(seq) == len(self.sampler_index_to_process_ind_and_subprocess_ind)
for sampler_index, (process_ind, subprocess_ind) in enumerate(
self.sampler_index_to_process_ind_and_subprocess_ind
):
assert len(subparts_list[process_ind]) == subprocess_ind
subparts_list[process_ind].append(seq[sampler_index])
return subparts_list
@property
def is_closed(self) -> bool:
"""Has the vector task been closed."""
return self._is_closed
@property
def num_unpaused_tasks(self) -> int:
"""Number of unpaused processes.
# Returns
Number of unpaused processes.
"""
return self._num_task_samplers - sum(self.npaused_per_process)
@property
def mp_ctx(self):
"""Get the multiprocessing process used by the vector task.
# Returns
The multiprocessing context.
"""
return self._mp_ctx
@staticmethod
def _task_sampling_loop_worker(
worker_id: Union[int, str],
connection_read_fn: Callable,
connection_write_fn: Callable,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args_list: List[Dict[str, Any]],
callback_sensor_suite: Optional[SensorSuite],
auto_resample_when_done: bool,
should_log: bool,
child_pipe: Optional[Connection] = None,
parent_pipe: Optional[Connection] = None,
) -> None:
"""process worker for creating and interacting with the
Tasks/TaskSampler."""
ptitle(f"VectorSampledTask: {worker_id}")
sp_vector_sampled_tasks = SingleProcessVectorSampledTasks(
make_sampler_fn=make_sampler_fn,
sampler_fn_args_list=sampler_fn_args_list,
callback_sensor_suite=callback_sensor_suite,
auto_resample_when_done=auto_resample_when_done,
should_log=should_log,
)
if parent_pipe is not None:
parent_pipe.close() # Means this pipe will close when the calling process closes it
try:
while True:
read_input = connection_read_fn()
# TODO: Was the below necessary?
# with DelaySignalHandling():
# # Delaying signal handling here is necessary to ensure that we don't
# # (when processing a SIGTERM/SIGINT signal) attempt to send data to
# # a generator while it is already processing other data.
if len(read_input) == 3:
sampler_index, command, data = read_input
assert command != CLOSE_COMMAND, "Must close all processes at once."
assert (
command != RESUME_COMMAND
), "Must resume all task samplers at once."
if command == PAUSE_COMMAND:
sp_vector_sampled_tasks.pause_at(sampler_index=sampler_index)
connection_write_fn("done")
else:
connection_write_fn(
sp_vector_sampled_tasks.command_at(
sampler_index=sampler_index, command=command, data=data,
)
)
else:
commands, data_list = read_input
assert (
commands != PAUSE_COMMAND
), "Cannot pause all task samplers at once."
if commands == CLOSE_COMMAND:
# Will close the `sp_vector_sampled_tasks` in the `finally` clause below
break
elif commands == RESUME_COMMAND:
sp_vector_sampled_tasks.resume_all()
connection_write_fn("done")
else:
if isinstance(commands, str):
commands = [
commands
] * sp_vector_sampled_tasks.num_unpaused_tasks
connection_write_fn(
sp_vector_sampled_tasks.command(
commands=commands, data_list=data_list
)
)
except KeyboardInterrupt:
if should_log:
get_logger().info(f"Worker {worker_id} KeyboardInterrupt")
except Exception as e:
get_logger().error(
f"Worker {worker_id} encountered an exception:\n{traceback.format_exc()}"
)
raise e
finally:
try:
sp_vector_sampled_tasks.close()
except Exception:
pass
if child_pipe is not None:
child_pipe.close()
if should_log:
get_logger().info(f"Worker {worker_id} closing.")
def _spawn_workers(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args_list: Sequence[Sequence[Dict[str, Any]]],
callback_sensor_suite: Optional[SensorSuite],
) -> Tuple[
List[Callable[[], bool]], List[Callable[[], Any]], List[Callable[[Any], None]]
]:
parent_connections, worker_connections = zip(
*[self._mp_ctx.Pipe(duplex=True) for _ in range(self._num_processes)]
)
self._workers = []
k = 0
id: Union[int, str]
for id, (worker_conn, parent_conn, current_sampler_fn_args_list) in enumerate(
zip(worker_connections, parent_connections, sampler_fn_args_list)
):
if len(current_sampler_fn_args_list) != 1:
id = f"{id}({k}-{k + len(current_sampler_fn_args_list) - 1})"
k += len(current_sampler_fn_args_list)
if self.should_log:
get_logger().info(
f"Starting {id}-th VectorSampledTask worker with args {current_sampler_fn_args_list}"
)
ps = self._mp_ctx.Process( # type: ignore
target=self._task_sampling_loop_worker,
kwargs=dict(
worker_id=id,
connection_read_fn=worker_conn.recv,
connection_write_fn=worker_conn.send,
make_sampler_fn=make_sampler_fn,
sampler_fn_args_list=current_sampler_fn_args_list,
callback_sensor_suite=callback_sensor_suite,
auto_resample_when_done=self._auto_resample_when_done,
should_log=self.should_log,
child_pipe=worker_conn,
parent_pipe=parent_conn,
),
)
self._workers.append(ps)
ps.daemon = True
ps.start()
worker_conn.close() # Means this pipe will close when the child process closes it
time.sleep(
0.1
) # Useful to ensure things don't lock up when spawning many envs
return (
[p.poll for p in parent_connections],
[p.recv for p in parent_connections],
[p.send for p in parent_connections],
)
def next_task(self, **kwargs):
"""Move to the the next Task for all TaskSamplers.
# Parameters
kwargs : key word arguments passed to the `next_task` function of the samplers.
# Returns
List of initial observations for each of the new tasks.
"""
return self.command(
commands=NEXT_TASK_COMMAND, data_list=[kwargs] * self.num_unpaused_tasks
)
def get_observations(self):
"""Get observations for all unpaused tasks.
# Returns
List of observations for each of the unpaused tasks.
"""
return self.call(["get_observations"] * self.num_unpaused_tasks,)
def command_at(
self, sampler_index: int, command: str, data: Optional[Any] = None
) -> Any:
"""Runs the command on the selected task and returns the result.
# Parameters
# Returns
Result of the command.
"""
self._is_waiting = True
(
process_ind,
subprocess_ind,
) = self.sampler_index_to_process_ind_and_subprocess_ind[sampler_index]
self._connection_write_fns[process_ind]((subprocess_ind, command, data))
result = self._connection_read_fns[process_ind]()
self._is_waiting = False
return result
def call_at(
self,
sampler_index: int,
function_name: str,
function_args: Optional[List[Any]] = None,
) -> Any:
"""Calls a function (which is passed by name) on the selected task and
returns the result.
# Parameters
index : Which task to call the function on.
function_name : The name of the function to call on the task.
function_args : Optional function args.
# Returns
Result of calling the function.
"""
return self.command_at(
sampler_index=sampler_index,
command=CALL_COMMAND,
data=(function_name, function_args),
)
def next_task_at(self, sampler_index: int) -> List[RLStepResult]:
"""Move to the the next Task from the TaskSampler in index_process
process in the vector.
# Parameters
index_process : Index of the process to be reset.
# Returns
List of length one containing the observations the newly sampled task.
"""
return [
self.command_at(
sampler_index=sampler_index, command=NEXT_TASK_COMMAND, data=None
)
]
def step_at(self, sampler_index: int, action: Any) -> List[RLStepResult]:
"""Step in the index_process task in the vector.
# Parameters
sampler_index : Index of the sampler to be reset.
action : The action to take.
# Returns
List containing the output of step method on the task in the indexed process.
"""
return [
self.command_at(
sampler_index=sampler_index, command=STEP_COMMAND, data=action
)
]
def async_step(self, actions: Sequence[Any]) -> None:
"""Asynchronously step in the vectorized Tasks.
# Parameters
actions : actions to be performed in the vectorized Tasks.
"""
self._is_waiting = True
for write_fn, action in zip(
self._connection_write_fns, self._partition_to_processes(actions)
):
write_fn((STEP_COMMAND, action))
def wait_step(self) -> List[Dict[str, Any]]:
"""Wait until all the asynchronized processes have synchronized."""
observations = []
for read_fn in self._connection_read_fns:
observations.extend(read_fn())
self._is_waiting = False
return observations
def step(self, actions: Sequence[Any]):
"""Perform actions in the vectorized tasks.
# Parameters
actions: List of size _num_samplers containing action to be taken in each task.
# Returns
List of outputs from the step method of tasks.
"""
self.async_step(actions)
return self.wait_step()
def reset_all(self):
"""Reset all task samplers to their initial state (except for the RNG
seed)."""
self.command(commands=RESET_COMMAND, data_list=None)
def set_seeds(self, seeds: List[int]):
"""Sets new tasks' RNG seeds.
# Parameters
seeds: List of size _num_samplers containing new RNG seeds.
"""
self.command(commands=SEED_COMMAND, data_list=seeds)
def close(self) -> None:
if self._is_closed:
return
if self._is_waiting:
for read_fn in self._connection_read_fns:
try:
# noinspection PyArgumentList
read_fn(0) # Time out immediately
except Exception:
pass
for write_fn in self._connection_write_fns:
try:
write_fn((CLOSE_COMMAND, None))
except Exception:
pass
for process in self._workers:
try:
process.join(timeout=0.1)
except Exception:
pass
for process in self._workers:
if process.is_alive():
process.kill()
self._is_closed = True
def pause_at(self, sampler_index: int) -> None:
"""Pauses computation on the Task in process `index` without destroying
the Task. This is useful for not needing to call steps on all Tasks
when only some are active (for example during the last samples of
running eval).
# Parameters
index : which process to pause. All indexes after this
one will be shifted down by one.
"""
if self._is_waiting:
for read_fn in self._connection_read_fns:
read_fn()
(
process_ind,
subprocess_ind,
) = self.sampler_index_to_process_ind_and_subprocess_ind[sampler_index]
self.command_at(sampler_index=sampler_index, command=PAUSE_COMMAND, data=None)
for i in range(
sampler_index + 1, len(self.sampler_index_to_process_ind_and_subprocess_ind)
):
other_process_and_sub_process_inds = self.sampler_index_to_process_ind_and_subprocess_ind[
i
]
if other_process_and_sub_process_inds[0] == process_ind:
other_process_and_sub_process_inds[1] -= 1
else:
break
self.sampler_index_to_process_ind_and_subprocess_ind.pop(sampler_index)
self.npaused_per_process[process_ind] += 1
def resume_all(self) -> None:
"""Resumes any paused processes."""
self._is_waiting = True
for connection_write_fn in self._connection_write_fns:
connection_write_fn((RESUME_COMMAND, None))
for connection_read_fn in self._connection_read_fns:
connection_read_fn()
self._is_waiting = False
self._reset_sampler_index_to_process_ind_and_subprocess_ind()
for i in range(len(self.npaused_per_process)):
self.npaused_per_process[i] = 0
def command(
self, commands: Union[List[str], str], data_list: Optional[List]
) -> List[Any]:
""""""
self._is_waiting = True
if isinstance(commands, str):
commands = [commands] * self.num_unpaused_tasks
if data_list is None:
data_list = [None] * self.num_unpaused_tasks
for write_fn, subcommands, subdata_list in zip(
self._connection_write_fns,
self._partition_to_processes(commands),
self._partition_to_processes(data_list),
):
write_fn((subcommands, subdata_list))
results = []
for read_fn in self._connection_read_fns:
results.extend(read_fn())
self._is_waiting = False
return results
def call(
self,
function_names: Union[str, List[str]],
function_args_list: Optional[List[Any]] = None,
) -> List[Any]:
"""Calls a list of functions (which are passed by name) on the
corresponding task (by index).
# Parameters
function_names : The name of the functions to call on the tasks.
function_args_list : List of function args for each function.
If provided, len(function_args_list) should be as long as len(function_names).
# Returns
List of results of calling the functions.
"""
self._is_waiting = True
if isinstance(function_names, str):
function_names = [function_names] * self.num_unpaused_tasks
if function_args_list is None:
function_args_list = [None] * len(function_names)
assert len(function_names) == len(function_args_list)
func_names_and_args_list = zip(function_names, function_args_list)
for write_fn, func_names_and_args in zip(
self._connection_write_fns,
self._partition_to_processes(func_names_and_args_list),
):
write_fn((CALL_COMMAND, func_names_and_args))
results = []
for read_fn in self._connection_read_fns:
results.extend(read_fn())
self._is_waiting = False
return results
def attr_at(self, sampler_index: int, attr_name: str) -> Any:
"""Gets the attribute (specified by name) on the selected task and
returns it.
# Parameters
index : Which task to call the function on.
attr_name : The name of the function to call on the task.
# Returns
Result of calling the function.
"""
return self.command_at(sampler_index, command=ATTR_COMMAND, data=attr_name)
def attr(self, attr_names: Union[List[str], str]) -> List[Any]:
"""Gets the attributes (specified by name) on the tasks.
# Parameters
attr_names : The name of the functions to call on the tasks.
# Returns
List of results of calling the functions.
"""
if isinstance(attr_names, str):
attr_names = [attr_names] * self.num_unpaused_tasks
return self.command(commands=ATTR_COMMAND, data_list=attr_names)
def render(
self, mode: str = "human", *args, **kwargs
) -> Union[np.ndarray, None, List[np.ndarray]]:
"""Render observations from all Tasks in a tiled image or list of
images."""
images = self.command(
commands=RENDER_COMMAND,
data_list=[(args, {"mode": "rgb", **kwargs})] * self.num_unpaused_tasks,
)
if mode == "raw_rgb_list":
return images
tile = tile_images(images)
if mode == "human":
import cv2
cv2.imshow("vectask", tile[:, :, ::-1])
cv2.waitKey(1)
return None
elif mode == "rgb_array":
return tile
else:
raise NotImplementedError
@property
def _valid_start_methods(self) -> Set[str]:
return {"forkserver", "spawn", "fork"}
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class SingleProcessVectorSampledTasks(object):
"""Vectorized collection of tasks.
Simultaneously handles the state of multiple TaskSamplers and their associated tasks.
Allows for interacting with these tasks in a vectorized manner. When a task completes,
another task is sampled from the appropriate task sampler. All the tasks are
synchronized (for step and new_task methods).
# Attributes
make_sampler_fn : function which creates a single TaskSampler.
sampler_fn_args : sequence of dictionaries describing the args
to pass to make_sampler_fn on each individual process.
auto_resample_when_done : automatically sample a new Task from the TaskSampler when
the Task completes. If False, a new Task will not be resampled until all
Tasks on all processes have completed. This functionality is provided for seamless training
of vectorized Tasks.
"""
observation_space: SpaceDict
_vector_task_generators: List[Generator]
_num_task_samplers: int
_auto_resample_when_done: bool
def __init__(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args_list: Sequence[Dict[str, Any]] = None,
callback_sensor_suite: Optional[SensorSuite] = None,
auto_resample_when_done: bool = True,
should_log: bool = True,
) -> None:
self._is_closed = True
assert (
sampler_fn_args_list is not None and len(sampler_fn_args_list) > 0
), "number of processes to be created should be greater than 0"
self._num_task_samplers = len(sampler_fn_args_list)
self._auto_resample_when_done = auto_resample_when_done
self.should_log = should_log
self._vector_task_generators: List[Generator] = self._create_generators(
make_sampler_fn=make_sampler_fn,
sampler_fn_args=[{"mp_ctx": None, **args} for args in sampler_fn_args_list],
callback_sensor_suite=callback_sensor_suite,
)
self._is_closed = False
observation_spaces = [
vsi.send((OBSERVATION_SPACE_COMMAND, None))
for vsi in self._vector_task_generators
]
if any(os is None for os in observation_spaces):
raise NotImplementedError(
"It appears that the `all_observation_spaces_equal`"
" is not True for some task sampler created by"
" VectorSampledTasks. This is not currently supported."
)
if any(observation_spaces[0] != os for os in observation_spaces):
raise NotImplementedError(
"It appears that the observation spaces of the samplers"
" created in VectorSampledTasks are not equal."
" This is not currently supported."
)
self.observation_space = observation_spaces[0]
self.action_spaces = [
vsi.send((ACTION_SPACE_COMMAND, None))
for vsi in self._vector_task_generators
]
self._paused: List[Tuple[int, Generator]] = []
@property
def is_closed(self) -> bool:
"""Has the vector task been closed."""
return self._is_closed
@property
def mp_ctx(self) -> Optional[BaseContext]:
return None
@property
def num_unpaused_tasks(self) -> int:
"""Number of unpaused processes.
# Returns
Number of unpaused processes.
"""
return self._num_task_samplers - len(self._paused)
@staticmethod
def _task_sampling_loop_generator_fn(
worker_id: int,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args: Dict[str, Any],
callback_sensor_suite: Optional[SensorSuite],
auto_resample_when_done: bool,
should_log: bool,
) -> Generator:
"""Generator for working with Tasks/TaskSampler."""
task_sampler = make_sampler_fn(**sampler_fn_args)
current_task = task_sampler.next_task()
if current_task is None:
raise RuntimeError(
"Newly created task sampler had `None` as it's first task. This likely means that"
" it was not provided with any tasks to generate. This can happen if, e.g., during testing"
" you have started more processes than you had tasks to test. Currently this is not supported:"
" every task sampler must be able to generate at least one task."
)
try:
command, data = yield "started"
while command != CLOSE_COMMAND:
if command == STEP_COMMAND:
step_result: RLStepResult = current_task.step(data)
if current_task.is_done():
metrics = current_task.metrics()
if metrics is not None and len(metrics) != 0:
if step_result.info is None:
step_result = step_result.clone({"info": {}})
step_result.info[COMPLETE_TASK_METRICS_KEY] = metrics
if callback_sensor_suite is not None:
task_callback_data = callback_sensor_suite.get_observations(
env=current_task.env, task=current_task
)
if step_result.info is None:
step_result = step_result.clone({"info": {}})
step_result.info[
COMPLETE_TASK_CALLBACK_KEY
] = task_callback_data
if auto_resample_when_done:
current_task = task_sampler.next_task()
if current_task is None:
step_result = step_result.clone({"observation": None})
else:
step_result = step_result.clone(
{"observation": current_task.get_observations()}
)
command, data = yield step_result
elif command == NEXT_TASK_COMMAND:
if data is not None:
current_task = task_sampler.next_task(**data)
else:
current_task = task_sampler.next_task()
observations = current_task.get_observations()
command, data = yield observations
elif command == RENDER_COMMAND:
command, data = yield current_task.render(*data[0], **data[1])
elif (
command == OBSERVATION_SPACE_COMMAND
or command == ACTION_SPACE_COMMAND
):
res = getattr(current_task, command)
command, data = yield res
elif command == CALL_COMMAND:
function_name, function_args = data
if function_args is None or len(function_args) == 0:
result = getattr(current_task, function_name)()
else:
result = getattr(current_task, function_name)(*function_args)
command, data = yield result
elif command == SAMPLER_COMMAND:
function_name, function_args = data
if function_args is None or len(function_args) == 0:
result = getattr(task_sampler, function_name)()
else:
result = getattr(task_sampler, function_name)(*function_args)
command, data = yield result
elif command == ATTR_COMMAND:
property_name = data
result = getattr(current_task, property_name)
command, data = yield result
elif command == SAMPLER_ATTR_COMMAND:
property_name = data
result = getattr(task_sampler, property_name)
command, data = yield result
elif command == RESET_COMMAND:
task_sampler.reset()
current_task = task_sampler.next_task()
if current_task is None:
raise RuntimeError(
"After resetting the task sampler it seems to have"
" no new tasks (the `task_sampler.next_task()` call"
" returned `None` after the reset). This suggests that"
" the task sampler's reset method was not implemented"
f" correctly (task sampler type is {type(task_sampler)})."
)
command, data = yield "done"
elif command == SEED_COMMAND:
task_sampler.set_seed(data)
command, data = yield "done"
else:
raise NotImplementedError()
except KeyboardInterrupt:
if should_log:
get_logger().info(
"SingleProcessVectorSampledTask {} KeyboardInterrupt".format(
worker_id
)
)
except Exception as e:
get_logger().error(traceback.format_exc())
raise e
finally:
if should_log:
get_logger().info(
"SingleProcessVectorSampledTask {} closing.".format(worker_id)
)
task_sampler.close()
def _create_generators(
self,
make_sampler_fn: Callable[..., TaskSampler],
sampler_fn_args: Sequence[Dict[str, Any]],
callback_sensor_suite: Optional[SensorSuite],
) -> List[Generator]:
generators = []
for id, current_sampler_fn_args in enumerate(sampler_fn_args):
if self.should_log:
get_logger().info(
f"Starting {id}-th SingleProcessVectorSampledTasks generator with args {current_sampler_fn_args}."
)
generators.append(
self._task_sampling_loop_generator_fn(
worker_id=id,
make_sampler_fn=make_sampler_fn,
sampler_fn_args=current_sampler_fn_args,
callback_sensor_suite=callback_sensor_suite,
auto_resample_when_done=self._auto_resample_when_done,
should_log=self.should_log,
)
)
if next(generators[-1]) != "started":
raise RuntimeError("Generator failed to start.")
return generators
def next_task(self, **kwargs):
"""Move to the the next Task for all TaskSamplers.
# Parameters
kwargs : key word arguments passed to the `next_task` function of the samplers.
# Returns
List of initial observations for each of the new tasks.
"""
return [
g.send((NEXT_TASK_COMMAND, kwargs)) for g in self._vector_task_generators
]
def get_observations(self):
"""Get observations for all unpaused tasks.
# Returns
List of observations for each of the unpaused tasks.
"""
return self.call(["get_observations"] * self.num_unpaused_tasks,)
def next_task_at(self, index_process: int) -> List[RLStepResult]:
"""Move to the the next Task from the TaskSampler in index_process
process in the vector.
# Parameters
index_process : Index of the generator to be reset.
# Returns
List of length one containing the observations the newly sampled task.
"""
return [
self._vector_task_generators[index_process].send((NEXT_TASK_COMMAND, None))
]
def step_at(self, index_process: int, action: int) -> List[RLStepResult]:
"""Step in the index_process task in the vector.
# Parameters
index_process : Index of the process to be reset.
action : The action to take.
# Returns
List containing the output of step method on the task in the indexed process.
"""
return self._vector_task_generators[index_process].send((STEP_COMMAND, action))
def step(self, actions: List[List[int]]):
"""Perform actions in the vectorized tasks.
# Parameters
actions: List of size _num_samplers containing action to be taken in each task.
# Returns
List of outputs from the step method of tasks.
"""
return [
g.send((STEP_COMMAND, action))
for g, action in zip(self._vector_task_generators, actions)
]
def reset_all(self):
"""Reset all task samplers to their initial state (except for the RNG
seed)."""
return [g.send((RESET_COMMAND, None)) for g in self._vector_task_generators]
def set_seeds(self, seeds: List[int]):
"""Sets new tasks' RNG seeds.
# Parameters
seeds: List of size _num_samplers containing new RNG seeds.
"""
return [
g.send((SEED_COMMAND, seed))
for g, seed in zip(self._vector_task_generators, seeds)
]
def close(self) -> None:
if self._is_closed:
return
for g in self._vector_task_generators:
try:
try:
g.send((CLOSE_COMMAND, None))
except StopIteration:
pass
except KeyboardInterrupt:
pass
self._is_closed = True
def pause_at(self, sampler_index: int) -> None:
"""Pauses computation on the Task in process `index` without destroying
the Task. This is useful for not needing to call steps on all Tasks
when only some are active (for example during the last samples of
running eval).
# Parameters
index : which process to pause. All indexes after this
one will be shifted down by one.
"""
generator = self._vector_task_generators.pop(sampler_index)
self._paused.append((sampler_index, generator))
def resume_all(self) -> None:
"""Resumes any paused processes."""
for index, generator in reversed(self._paused):
self._vector_task_generators.insert(index, generator)
self._paused = []
def command_at(
self, sampler_index: int, command: str, data: Optional[Any] = None
) -> Any:
"""Calls a function (which is passed by name) on the selected task and
returns the result.
# Parameters
index : Which task to call the function on.
function_name : The name of the function to call on the task.
function_args : Optional function args.
# Returns
Result of calling the function.
"""
return self._vector_task_generators[sampler_index].send((command, data))
def command(
self, commands: Union[List[str], str], data_list: Optional[List]
) -> List[Any]:
""""""
if isinstance(commands, str):
commands = [commands] * self.num_unpaused_tasks
if data_list is None:
data_list = [None] * self.num_unpaused_tasks
return [
g.send((command, data))
for g, command, data in zip(
self._vector_task_generators, commands, data_list
)
]
def call_at(
self,
sampler_index: int,
function_name: str,
function_args: Optional[List[Any]] = None,
) -> Any:
"""Calls a function (which is passed by name) on the selected task and
returns the result.
# Parameters
index : Which task to call the function on.
function_name : The name of the function to call on the task.
function_args : Optional function args.
# Returns
Result of calling the function.
"""
return self._vector_task_generators[sampler_index].send(
(CALL_COMMAND, (function_name, function_args))
)
def call(
self,
function_names: Union[str, List[str]],
function_args_list: Optional[List[Any]] = None,
) -> List[Any]:
"""Calls a list of functions (which are passed by name) on the
corresponding task (by index).
# Parameters
function_names : The name of the functions to call on the tasks.
function_args_list : List of function args for each function.
If provided, len(function_args_list) should be as long as len(function_names).
# Returns
List of results of calling the functions.
"""
if isinstance(function_names, str):
function_names = [function_names] * self.num_unpaused_tasks
if function_args_list is None:
function_args_list = [None] * len(function_names)
assert len(function_names) == len(function_args_list)
return [
g.send((CALL_COMMAND, args))
for g, args in zip(
self._vector_task_generators, zip(function_names, function_args_list)
)
]
def attr_at(self, sampler_index: int, attr_name: str) -> Any:
"""Gets the attribute (specified by name) on the selected task and
returns it.
# Parameters
index : Which task to call the function on.
attr_name : The name of the function to call on the task.
# Returns
Result of calling the function.
"""
return self._vector_task_generators[sampler_index].send(
(ATTR_COMMAND, attr_name)
)
def attr(self, attr_names: Union[List[str], str]) -> List[Any]:
"""Gets the attributes (specified by name) on the tasks.
# Parameters
attr_names : The name of the functions to call on the tasks.
# Returns
List of results of calling the functions.
"""
if isinstance(attr_names, str):
attr_names = [attr_names] * self.num_unpaused_tasks
return [
g.send((ATTR_COMMAND, attr_name))
for g, attr_name in zip(self._vector_task_generators, attr_names)
]
def render(
self, mode: str = "human", *args, **kwargs
) -> Union[np.ndarray, None, List[np.ndarray]]:
"""Render observations from all Tasks in a tiled image or a list of
images."""
images = [
g.send((RENDER_COMMAND, (args, {"mode": "rgb", **kwargs})))
for g in self._vector_task_generators
]
if mode == "raw_rgb_list":
return images
for index, _ in reversed(self._paused):
images.insert(index, np.zeros_like(images[0]))
tile = tile_images(images)
if mode == "human":
import cv2
cv2.imshow("vectask", tile[:, :, ::-1])
cv2.waitKey(1)
return None
elif mode == "rgb_array":
return tile
else:
raise NotImplementedError
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| allenact-main | allenact/algorithms/onpolicy_sync/vector_sampled_tasks.py |
# Original work Copyright (c) Facebook, Inc. and its affiliates.
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import random
from typing import (
Union,
List,
Dict,
Tuple,
Sequence,
cast,
Optional,
Callable,
Any,
Generator,
)
import gym
import numpy as np
import torch
import allenact.utils.spaces_utils as su
from allenact.algorithms.onpolicy_sync.policy import (
FullMemorySpecType,
ObservationType,
ActionType,
)
from allenact.base_abstractions.misc import Memory
from allenact.utils.system import get_logger
class ExperienceStorage(abc.ABC):
@abc.abstractmethod
def initialize(self, *, observations: ObservationType, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def add(
self,
observations: ObservationType,
memory: Optional[Memory],
actions: torch.Tensor,
action_log_probs: torch.Tensor,
value_preds: torch.Tensor,
rewards: torch.Tensor,
masks: torch.Tensor,
):
"""
# Parameters
observations : Observations after taking `actions`
memory: Memory after having observed the last set of observations.
actions: Actions taken to reach the current state, i.e. taking these actions has led to a new state with
new `observations`.
action_log_probs : Log probs of `actions`
value_preds : Value predictions corresponding to the last observations
(i.e. the states before taking `actions`).
rewards : Rewards from taking `actions` in the last set of states.
masks : Masks corresponding to the current states, having 0 entries where `observations` correspond to
observations from the beginning of a new episode.
"""
raise NotImplementedError
def before_updates(self, **kwargs):
pass
def after_updates(self, **kwargs) -> int:
pass
@abc.abstractmethod
def to(self, device: torch.device):
pass
@abc.abstractmethod
def set_partition(self, index: int, num_parts: int):
raise NotImplementedError
@property
@abc.abstractmethod
def total_experiences(self) -> int:
raise NotImplementedError
class RolloutStorage(ExperienceStorage, abc.ABC):
# noinspection PyMethodOverriding
@abc.abstractmethod
def initialize(
self,
*,
observations: ObservationType,
num_samplers: int,
recurrent_memory_specification: FullMemorySpecType,
action_space: gym.Space,
**kwargs,
):
raise NotImplementedError
@abc.abstractmethod
def agent_input_for_next_step(self) -> Dict[str, Any]:
raise NotImplementedError
@abc.abstractmethod
def sampler_select(self, keep_list: Sequence[int]):
raise NotImplementedError
class StreamingStorageMixin(abc.ABC):
@abc.abstractmethod
def next_batch(self) -> Dict[str, Any]:
raise NotImplementedError
def reset_stream(self):
raise NotImplementedError
@abc.abstractmethod
def empty(self) -> bool:
raise NotImplementedError
class MiniBatchStorageMixin(abc.ABC):
@abc.abstractmethod
def batched_experience_generator(
self, num_mini_batch: int,
) -> Generator[Dict[str, Any], None, None]:
raise NotImplementedError
class RolloutBlockStorage(RolloutStorage, MiniBatchStorageMixin):
"""Class for storing rollout information for RL trainers."""
FLATTEN_SEPARATOR: str = "._AUTOFLATTEN_."
def __init__(self, init_size: int = 50):
self.full_size = init_size
self.flattened_to_unflattened: Dict[str, Dict[str, List[str]]] = {
"memory": dict(),
"observations": dict(),
}
self.unflattened_to_flattened: Dict[str, Dict[Tuple[str, ...], str]] = {
"memory": dict(),
"observations": dict(),
}
self.dim_names = ["step", "sampler", None]
self.memory_specification: Optional[FullMemorySpecType] = None
self.action_space: Optional[gym.Space] = None
self.memory_first_last: Optional[Memory] = None
self._observations_full: Memory = Memory()
self._value_preds_full: Optional[torch.Tensor] = None
self._returns_full: Optional[torch.Tensor] = None
self._rewards_full: Optional[torch.Tensor] = None
self._action_log_probs_full: Optional[torch.Tensor] = None
self.step = 0
self._total_steps = 0
self._before_update_called = False
self.device = torch.device("cpu")
# self._advantages and self._normalized_advantages are only computed
# when `before_updates` is called
self._advantages: Optional[torch.Tensor] = None
self._normalized_advantages: Optional[torch.Tensor] = None
self._masks_full: Optional[torch.Tensor] = None
self._actions_full: Optional[torch.Tensor] = None
self._prev_actions_full: Optional[torch.Tensor] = None
def initialize(
self,
*,
observations: ObservationType,
num_samplers: int,
recurrent_memory_specification: FullMemorySpecType,
action_space: gym.Space,
**kwargs,
):
if self.memory_specification is None:
self.memory_specification = recurrent_memory_specification or {}
self.action_space = action_space
self.memory_first_last: Memory = self.create_memory(
spec=self.memory_specification, num_samplers=num_samplers,
).to(self.device)
for key in self.memory_specification:
self.flattened_to_unflattened["memory"][key] = [key]
self.unflattened_to_flattened["memory"][(key,)] = key
self._masks_full = torch.zeros(
self.full_size + 1, num_samplers, 1, device=self.device
)
action_flat_dim = su.flatdim(self.action_space)
self._actions_full = torch.zeros(
self.full_size, num_samplers, action_flat_dim, device=self.device
)
self._prev_actions_full = torch.zeros(
self.full_size + 1, num_samplers, action_flat_dim, device=self.device
)
assert self.step == 0, "Must call `after_updates` before calling `initialize`"
self.insert_observations(observations=observations, time_step=0)
self.prev_actions[0].zero_() # Have to zero previous actions
self.masks[0].zero_() # Have to zero masks
@property
def total_experiences(self) -> int:
return self._total_steps
@total_experiences.setter
def total_experiences(self, value: int):
self._total_steps = value
def set_partition(self, index: int, num_parts: int):
pass
@property
def value_preds(self) -> torch.Tensor:
return self._value_preds_full[: self.step + 1]
@property
def rewards(self) -> torch.Tensor:
return self._rewards_full[: self.step]
@property
def returns(self) -> torch.Tensor:
return self._returns_full[: self.step + 1]
@property
def action_log_probs(self) -> torch.Tensor:
return self._action_log_probs_full[: self.step]
@property
def actions(self) -> torch.Tensor:
return self._actions_full[: self.step]
@property
def prev_actions(self) -> torch.Tensor:
return self._prev_actions_full[: self.step + 1]
@property
def masks(self) -> torch.Tensor:
return self._masks_full[: self.step + 1]
@property
def observations(self) -> Memory:
return self._observations_full.slice(dim=0, start=0, stop=self.step + 1)
@staticmethod
def create_memory(spec: Optional[FullMemorySpecType], num_samplers: int,) -> Memory:
if spec is None:
return Memory()
memory = Memory()
for key in spec:
dims_template, dtype = spec[key]
dim_names = ["step"] + [d[0] for d in dims_template]
sampler_dim = dim_names.index("sampler")
all_dims = [2] + [d[1] for d in dims_template]
all_dims[sampler_dim] = num_samplers
memory.check_append(
key=key,
tensor=torch.zeros(*all_dims, dtype=dtype),
sampler_dim=sampler_dim,
)
return memory
def to(self, device: torch.device):
for key in [
"_observations_full",
"memory_first_last",
"_actions_full",
"_prev_actions_full",
"_masks_full",
"_rewards_full",
"_value_preds_full",
"_returns_full",
"_action_log_probs_full",
]:
val = getattr(self, key)
if val is not None:
setattr(self, key, val.to(device))
self.device = device
def insert_observations(
self, observations: ObservationType, time_step: int,
):
self.insert_tensors(
storage=self._observations_full,
storage_name="observations",
unflattened=observations,
time_step=time_step,
)
def insert_memory(
self, memory: Optional[Memory], time_step: int,
):
if memory is None:
assert len(self.memory_first_last) == 0
return
# `min(time_step, 1)` as we only store the first and last memories:
# * first memory is used for loss computation when the agent model has to compute
# all its outputs again given the full batch.
# * last memory ised used by the agent when collecting rollouts
self.insert_tensors(
storage=self.memory_first_last,
storage_name="memory",
unflattened=memory,
time_step=min(time_step, 1),
)
def insert_tensors(
self,
storage: Memory,
storage_name: str,
unflattened: Union[ObservationType, Memory],
prefix: str = "",
path: Sequence[str] = (),
time_step: int = 0,
):
path = list(path)
for name in unflattened:
current_data = unflattened[name]
if isinstance(current_data, Dict):
self.insert_tensors(
storage=storage,
storage_name=storage_name,
unflattened=cast(ObservationType, current_data),
prefix=prefix + name + self.FLATTEN_SEPARATOR,
path=path + [name],
time_step=time_step,
)
continue
sampler_dim = self.dim_names.index("sampler")
if isinstance(current_data, tuple):
sampler_dim = current_data[1]
current_data = current_data[0]
flatten_name = prefix + name
if flatten_name not in storage:
assert storage_name == "observations"
storage[flatten_name] = (
torch.zeros_like(current_data) # type:ignore
.repeat(
self.full_size + 1, # required for observations (and memory)
*(1 for _ in range(len(current_data.shape))),
)
.to(self.device),
sampler_dim,
)
assert (
flatten_name not in self.flattened_to_unflattened[storage_name]
), f"new flattened name {flatten_name} already existing in flattened spaces[{storage_name}]"
self.flattened_to_unflattened[storage_name][flatten_name] = path + [
name
]
self.unflattened_to_flattened[storage_name][
tuple(path + [name])
] = flatten_name
try:
if storage_name == "observations":
# current_data has a step dimension
assert time_step >= 0
storage[flatten_name][0][time_step : time_step + 1].copy_(
current_data
)
elif storage_name == "memory":
# current_data does not have a step dimension
storage[flatten_name][0][time_step].copy_(current_data)
else:
raise NotImplementedError
except:
get_logger().error(
f"Error while inserting data in storage for name {flatten_name}"
)
raise
def create_tensor_storage(
self, num_steps: int, template: torch.Tensor
) -> torch.Tensor:
return torch.cat([torch.zeros_like(template).to(self.device)] * num_steps)
def _double_storage_size(self):
def pad_tensor_with_zeros(old_t: Optional[torch.Tensor]):
if old_t is None:
return None
assert old_t.shape[0] in [self.full_size, self.full_size + 1]
padded_t = torch.zeros(
old_t.shape[0] + self.full_size,
*old_t.shape[1:],
dtype=old_t.dtype,
device=old_t.device,
)
padded_t[: old_t.shape[0]] = old_t
return padded_t
for key in list(self._observations_full.keys()):
obs_tensor, sampler_dim = self._observations_full[key]
self._observations_full[key] = (
pad_tensor_with_zeros(obs_tensor),
sampler_dim,
)
self._actions_full = pad_tensor_with_zeros(self._actions_full)
self._prev_actions_full = pad_tensor_with_zeros(self._prev_actions_full)
self._masks_full = pad_tensor_with_zeros(self._masks_full)
self._rewards_full = pad_tensor_with_zeros(self._rewards_full)
self._value_preds_full = pad_tensor_with_zeros(self._value_preds_full)
self._returns_full = pad_tensor_with_zeros(self._returns_full)
self._action_log_probs_full = pad_tensor_with_zeros(self._action_log_probs_full)
self.full_size *= 2
def add(
self,
observations: ObservationType,
memory: Optional[Memory],
actions: torch.Tensor,
action_log_probs: torch.Tensor,
value_preds: torch.Tensor,
rewards: torch.Tensor,
masks: torch.Tensor,
):
"""See `ExperienceStorage.add` documentation."""
assert (
len(masks.shape) == 2 and masks.shape[1] == 1
), f"Can only add a single step worth of data at a time (mask shape = {masks.shape})."
self.total_experiences += masks.shape[0]
if self.step == self.full_size:
self._double_storage_size()
elif self.step > self.full_size:
raise RuntimeError
self.insert_observations(observations, time_step=self.step + 1)
self.insert_memory(memory, time_step=self.step + 1)
assert actions.shape == self._actions_full.shape[1:]
self._actions_full[self.step].copy_(actions) # type:ignore
self._prev_actions_full[self.step + 1].copy_(actions) # type:ignore
self._masks_full[self.step + 1].copy_(masks) # type:ignore
if self._rewards_full is None:
# We delay the instantiation of storage for `rewards`, `value_preds`, `action_log_probs` and `returns`
# as we do not, a priori, know what shape these will be. For instance, if we are in a multi-agent setting
# then there may be many rewards (one for each agent).
self._rewards_full = self.create_tensor_storage(
self.full_size, rewards.unsqueeze(0)
) # add step
value_returns_template = value_preds.unsqueeze(0) # add step
self._value_preds_full = self.create_tensor_storage(
self.full_size + 1, value_returns_template
)
self._returns_full = self.create_tensor_storage(
self.full_size + 1, value_returns_template
)
self._action_log_probs_full = self.create_tensor_storage(
self.full_size, action_log_probs.unsqueeze(0)
)
self._value_preds_full[self.step].copy_(value_preds) # type:ignore
self._rewards_full[self.step].copy_(rewards) # type:ignore
self._action_log_probs_full[self.step].copy_( # type:ignore
action_log_probs
)
self.step += 1
self._before_update_called = False
# We set the below to be None just for extra safety.
self._advantages = None
self._normalized_advantages = None
def sampler_select(self, keep_list: Sequence[int]):
keep_list = list(keep_list)
if self._actions_full.shape[1] == len(keep_list): # samplers dim
return # we are keeping everything, no need to copy
self._observations_full = self._observations_full.sampler_select(keep_list)
self.memory_first_last = self.memory_first_last.sampler_select(keep_list)
self._actions_full = self._actions_full[:, keep_list]
self._prev_actions_full = self._prev_actions_full[:, keep_list]
self._action_log_probs_full = self._action_log_probs_full[:, keep_list]
self._masks_full = self._masks_full[:, keep_list]
if self._rewards_full is not None:
self._value_preds_full = self._value_preds_full[:, keep_list]
self._rewards_full = self._rewards_full[:, keep_list]
self._returns_full = self._returns_full[:, keep_list]
def before_updates(
self,
*,
next_value: torch.Tensor,
use_gae: bool,
gamma: float,
tau: float,
adv_stats_callback: Callable[[torch.Tensor], Dict[str, torch.Tensor]],
**kwargs,
):
assert len(kwargs) == 0
self.compute_returns(
next_value=next_value, use_gae=use_gae, gamma=gamma, tau=tau,
)
self._advantages = self.returns[:-1] - self.value_preds[:-1]
adv_stats = adv_stats_callback(self._advantages)
self._normalized_advantages = (self._advantages - adv_stats["mean"]) / (
adv_stats["std"] + 1e-5
)
self._before_update_called = True
def after_updates(self, **kwargs):
assert len(kwargs) == 0
for storage in [self.observations, self.memory_first_last]:
for key in storage:
storage[key][0][0].copy_(storage[key][0][-1])
if self._masks_full is not None:
self.masks[0].copy_(self.masks[-1])
if self._prev_actions_full is not None:
self.prev_actions[0].copy_(self.prev_actions[-1])
self._before_update_called = False
self._advantages = None
self._normalized_advantages = None
self.step = 0
@staticmethod
def _extend_tensor_with_ones(stored_tensor: torch.Tensor, desired_num_dims: int):
# Ensure broadcast to all flattened dimensions
extended_shape = stored_tensor.shape + (1,) * (
desired_num_dims - len(stored_tensor.shape)
)
return stored_tensor.view(*extended_shape)
def compute_returns(
self, next_value: torch.Tensor, use_gae: bool, gamma: float, tau: float
):
extended_mask = self._extend_tensor_with_ones(
self.masks, desired_num_dims=len(self.value_preds.shape)
)
extended_rewards = self._extend_tensor_with_ones(
self.rewards, desired_num_dims=len(self.value_preds.shape)
)
if use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(extended_rewards.shape[0])):
delta = (
extended_rewards[step]
+ gamma * self.value_preds[step + 1] * extended_mask[step + 1]
- self.value_preds[step]
)
gae = delta + gamma * tau * extended_mask[step + 1] * gae # type:ignore
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(extended_rewards.shape[0])):
self.returns[step] = (
self.returns[step + 1] * gamma * extended_mask[step + 1]
+ extended_rewards[step]
)
def batched_experience_generator(
self, num_mini_batch: int,
):
assert self._before_update_called, (
"self._before_update_called() must be called before"
" attempting to generated batched rollouts."
)
num_samplers = self.rewards.shape[1]
assert num_samplers >= num_mini_batch, (
f"The number of task samplers ({num_samplers}) "
f"must be greater than or equal to the number of "
f"mini batches ({num_mini_batch})."
)
inds = np.round(
np.linspace(0, num_samplers, num_mini_batch + 1, endpoint=True)
).astype(np.int32)
pairs = list(zip(inds[:-1], inds[1:]))
random.shuffle(pairs)
for start_ind, end_ind in pairs:
cur_samplers = list(range(start_ind, end_ind))
memory_batch = self.memory_first_last.step_squeeze(0).sampler_select(
cur_samplers
)
observations_batch = self.unflatten_observations(
self.observations.slice(dim=0, stop=-1).sampler_select(cur_samplers)
)
actions_batch = []
prev_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
norm_adv_targ = []
for ind in cur_samplers:
actions_batch.append(self.actions[:, ind])
prev_actions_batch.append(self.prev_actions[:-1, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(self.action_log_probs[:, ind])
adv_targ.append(self._advantages[:, ind])
norm_adv_targ.append(self._normalized_advantages[:, ind])
actions_batch = torch.stack(actions_batch, 1) # type:ignore
prev_actions_batch = torch.stack(prev_actions_batch, 1) # type:ignore
value_preds_batch = torch.stack(value_preds_batch, 1) # type:ignore
return_batch = torch.stack(return_batch, 1) # type:ignore
masks_batch = torch.stack(masks_batch, 1) # type:ignore
old_action_log_probs_batch = torch.stack( # type:ignore
old_action_log_probs_batch, 1
)
adv_targ = torch.stack(adv_targ, 1) # type:ignore
norm_adv_targ = torch.stack(norm_adv_targ, 1) # type:ignore
yield {
"observations": observations_batch,
"memory": memory_batch,
"actions": su.unflatten(self.action_space, actions_batch),
"prev_actions": su.unflatten(self.action_space, prev_actions_batch),
"values": value_preds_batch,
"returns": return_batch,
"masks": masks_batch,
"old_action_log_probs": old_action_log_probs_batch,
"adv_targ": adv_targ,
"norm_adv_targ": norm_adv_targ,
"bsize": int(np.prod(masks_batch.shape[:2])),
}
def unflatten_observations(self, flattened_batch: Memory) -> ObservationType:
result: ObservationType = {}
for name in flattened_batch:
full_path = self.flattened_to_unflattened["observations"][name]
cur_dict = result
for part in full_path[:-1]:
if part not in cur_dict:
cur_dict[part] = {}
cur_dict = cast(ObservationType, cur_dict[part])
cur_dict[full_path[-1]] = flattened_batch[name][0]
return result
def pick_observation_step(self, step: int) -> ObservationType:
return self.unflatten_observations(self.observations.step_select(step))
def pick_memory_step(self, step: int) -> Memory:
assert step in [0, self.step, -1], "Can only access the first or last memory."
return self.memory_first_last.step_squeeze(min(step, 1))
def pick_prev_actions_step(self, step: int) -> ActionType:
return su.unflatten(self.action_space, self.prev_actions[step : step + 1])
def agent_input_for_next_step(self) -> Dict[str, Any]:
return {
"observations": self.pick_observation_step(self.step),
"memory": self.pick_memory_step(self.step),
"prev_actions": self.pick_prev_actions_step(self.step),
"masks": self.masks[self.step : self.step + 1],
}
| allenact-main | allenact/algorithms/onpolicy_sync/storage.py |
import functools
from typing import Dict, cast, Sequence, Set
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class GroupedActionImitation(AbstractActorCriticLoss):
def __init__(
self, nactions: int, action_groups: Sequence[Set[int]], *args, **kwargs
):
super().__init__(*args, **kwargs)
assert (
sum(len(ag) for ag in action_groups) == nactions
and len(functools.reduce(lambda x, y: x | y, action_groups)) == nactions
), f"`action_groups` (==`{action_groups}`) must be a partition of `[0, 1, 2, ..., nactions - 1]`"
self.nactions = nactions
self.action_groups_mask = torch.FloatTensor(
[
[i in action_group for i in range(nactions)]
for action_group in action_groups
]
+ [[1] * nactions] # type:ignore
)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
):
observations = cast(Dict[str, torch.Tensor], batch["observations"])
assert "expert_group_action" in observations
expert_group_actions = observations["expert_group_action"]
# expert_group_actions = expert_group_actions + (expert_group_actions == -1).long() * (
# 1 + self.action_groups_mask.shape[0]
# )
if self.action_groups_mask.get_device() != expert_group_actions.get_device():
self.action_groups_mask = cast(
torch.FloatTensor,
self.action_groups_mask.cuda(expert_group_actions.get_device()),
)
expert_group_actions_reshaped = expert_group_actions.view(-1, 1)
expert_group_actions_mask = self.action_groups_mask[
expert_group_actions_reshaped
]
probs_tensor = actor_critic_output.distributions.probs_tensor
expert_group_actions_mask = expert_group_actions_mask.view(probs_tensor.shape)
total_loss = -(
torch.log((probs_tensor * expert_group_actions_mask).sum(-1))
).mean()
return total_loss, {"grouped_action_cross_entropy": total_loss.item(),}
| allenact-main | allenact/algorithms/onpolicy_sync/losses/grouped_action_imitation.py |
from .a2cacktr import A2C, ACKTR, A2CACKTR
from .ppo import PPO
| allenact-main | allenact/algorithms/onpolicy_sync/losses/__init__.py |
"""Defining imitation losses for actor critic type models."""
from collections import OrderedDict
from typing import Dict, cast, Optional, Union
import torch
import allenact.utils.spaces_utils as su
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import (
Distr,
CategoricalDistr,
SequentialDistr,
ConditionalDistr,
)
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.base_abstractions.sensor import AbstractExpertSensor
class Imitation(AbstractActorCriticLoss):
"""Expert imitation loss."""
def __init__(
self, expert_sensor: Optional[AbstractExpertSensor] = None, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.expert_sensor = expert_sensor
@staticmethod
def group_loss(
distribution: Union[CategoricalDistr, ConditionalDistr],
expert_actions: torch.Tensor,
expert_actions_masks: torch.Tensor,
):
assert isinstance(distribution, CategoricalDistr) or (
isinstance(distribution, ConditionalDistr)
and isinstance(distribution.distr, CategoricalDistr)
), "This implementation only supports (groups of) `CategoricalDistr`"
expert_successes = expert_actions_masks.sum()
log_probs = distribution.log_prob(cast(torch.LongTensor, expert_actions))
assert (
log_probs.shape[: len(expert_actions_masks.shape)]
== expert_actions_masks.shape
)
# Add dimensions to `expert_actions_masks` on the right to allow for masking
# if necessary.
len_diff = len(log_probs.shape) - len(expert_actions_masks.shape)
assert len_diff >= 0
expert_actions_masks = expert_actions_masks.view(
*expert_actions_masks.shape, *((1,) * len_diff)
)
group_loss = -(expert_actions_masks * log_probs).sum() / torch.clamp(
expert_successes, min=1
)
return group_loss, expert_successes
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[Distr],
*args,
**kwargs,
):
"""Computes the imitation loss.
# Parameters
batch : A batch of data corresponding to the information collected when rolling out (possibly many) agents
over a fixed number of steps. In particular this batch should have the same format as that returned by
`RolloutStorage.batched_experience_generator`.
Here `batch["observations"]` must contain `"expert_action"` observations
or `"expert_policy"` observations. See `ExpertActionSensor` (or `ExpertPolicySensor`) for an example of
a sensor producing such observations.
actor_critic_output : The output of calling an ActorCriticModel on the observations in `batch`.
args : Extra args. Ignored.
kwargs : Extra kwargs. Ignored.
# Returns
A (0-dimensional) torch.FloatTensor corresponding to the computed loss. `.backward()` will be called on this
tensor in order to compute a gradient update to the ActorCriticModel's parameters.
"""
observations = cast(Dict[str, torch.Tensor], batch["observations"])
losses = OrderedDict()
should_report_loss = False
if "expert_action" in observations:
if self.expert_sensor is None or not self.expert_sensor.use_groups:
expert_actions_and_mask = observations["expert_action"]
assert expert_actions_and_mask.shape[-1] == 2
expert_actions_and_mask_reshaped = expert_actions_and_mask.view(-1, 2)
expert_actions = expert_actions_and_mask_reshaped[:, 0].view(
*expert_actions_and_mask.shape[:-1], 1
)
expert_actions_masks = (
expert_actions_and_mask_reshaped[:, 1]
.float()
.view(*expert_actions_and_mask.shape[:-1], 1)
)
total_loss, expert_successes = self.group_loss(
cast(CategoricalDistr, actor_critic_output.distributions),
expert_actions,
expert_actions_masks,
)
should_report_loss = expert_successes.item() != 0
else:
expert_actions = su.unflatten(
self.expert_sensor.observation_space, observations["expert_action"]
)
total_loss = 0
ready_actions = OrderedDict()
for group_name, cd in zip(
self.expert_sensor.group_spaces,
cast(
SequentialDistr, actor_critic_output.distributions
).conditional_distrs,
):
assert group_name == cd.action_group_name
cd.reset()
cd.condition_on_input(**ready_actions)
expert_action = expert_actions[group_name][
AbstractExpertSensor.ACTION_POLICY_LABEL
]
expert_action_masks = expert_actions[group_name][
AbstractExpertSensor.EXPERT_SUCCESS_LABEL
]
ready_actions[group_name] = expert_action
current_loss, expert_successes = self.group_loss(
cd, expert_action, expert_action_masks,
)
should_report_loss = (
expert_successes.item() != 0 or should_report_loss
)
cd.reset()
if expert_successes.item() != 0:
losses[group_name + "_cross_entropy"] = current_loss.item()
total_loss = total_loss + current_loss
elif "expert_policy" in observations:
if self.expert_sensor is None or not self.expert_sensor.use_groups:
assert isinstance(
actor_critic_output.distributions, CategoricalDistr
), "This implementation currently only supports `CategoricalDistr`"
expert_policies = cast(Dict[str, torch.Tensor], batch["observations"])[
"expert_policy"
][..., :-1]
expert_actions_masks = cast(
Dict[str, torch.Tensor], batch["observations"]
)["expert_policy"][..., -1:]
expert_successes = expert_actions_masks.sum()
if expert_successes.item() > 0:
should_report_loss = True
log_probs = cast(
CategoricalDistr, actor_critic_output.distributions
).log_probs_tensor
# Add dimensions to `expert_actions_masks` on the right to allow for masking
# if necessary.
len_diff = len(log_probs.shape) - len(expert_actions_masks.shape)
assert len_diff >= 0
expert_actions_masks = expert_actions_masks.view(
*expert_actions_masks.shape, *((1,) * len_diff)
)
total_loss = (
-(log_probs * expert_policies) * expert_actions_masks
).sum() / torch.clamp(expert_successes, min=1)
else:
raise NotImplementedError(
"This implementation currently only supports `CategoricalDistr`"
)
else:
raise NotImplementedError(
"Imitation loss requires either `expert_action` or `expert_policy`"
" sensor to be active."
)
return (
total_loss,
{"expert_cross_entropy": total_loss.item(), **losses}
if should_report_loss
else {},
)
| allenact-main | allenact/algorithms/onpolicy_sync/losses/imitation.py |
"""Defining abstract loss classes for actor critic models."""
import abc
from typing import Dict, Tuple, Union
import torch
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import Loss, ActorCriticOutput
class AbstractActorCriticLoss(Loss):
"""Abstract class representing a loss function used to train an
ActorCriticModel."""
# noinspection PyMethodOverriding
@abc.abstractmethod
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
) -> Union[
Tuple[torch.FloatTensor, Dict[str, float]],
Tuple[torch.FloatTensor, Dict[str, float], Dict[str, float]],
]:
"""Computes the loss.
# Parameters
batch : A batch of data corresponding to the information collected when rolling out (possibly many) agents
over a fixed number of steps. In particular this batch should have the same format as that returned by
`RolloutStorage.batched_experience_generator`.
actor_critic_output : The output of calling an ActorCriticModel on the observations in `batch`.
args : Extra args.
kwargs : Extra kwargs.
# Returns
A (0-dimensional) torch.FloatTensor corresponding to the computed loss. `.backward()` will be called on this
tensor in order to compute a gradient update to the ActorCriticModel's parameters.
A Dict[str, float] with scalar values corresponding to sub-losses.
An optional Dict[str, float] with scalar values corresponding to extra info to be processed per epoch and
combined across epochs by the engine.
"""
# TODO: The above documentation is missing what the batch dimensions are.
raise NotImplementedError()
| allenact-main | allenact/algorithms/onpolicy_sync/losses/abstract_loss.py |
"""Defining the PPO loss for actor critic type models."""
from typing import Dict, Optional, Callable, cast, Tuple
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class PPO(AbstractActorCriticLoss):
"""Implementation of the Proximal Policy Optimization loss.
# Attributes
clip_param : The clipping parameter to use.
value_loss_coef : Weight of the value loss.
entropy_coef : Weight of the entropy (encouraging) loss.
use_clipped_value_loss : Whether or not to also clip the value loss.
clip_decay : Callable for clip param decay factor (function of the current number of steps)
entropy_method_name : Name of Distr's entropy method name. Default is `entropy`,
but we might use `conditional_entropy` for `SequentialDistr`
show_ratios : If True, adds tracking for the PPO ratio (linear, clamped, and used) in each
epoch to be logged by the engine.
normalize_advantage: Whether or not to use normalized advantage. Default is True.
"""
def __init__(
self,
clip_param: float,
value_loss_coef: float,
entropy_coef: float,
use_clipped_value_loss=True,
clip_decay: Optional[Callable[[int], float]] = None,
entropy_method_name: str = "entropy",
normalize_advantage: bool = True,
show_ratios: bool = False,
*args,
**kwargs
):
"""Initializer.
See the class documentation for parameter definitions.
"""
super().__init__(*args, **kwargs)
self.clip_param = clip_param
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_decay = clip_decay if clip_decay is not None else (lambda x: 1.0)
self.entropy_method_name = entropy_method_name
self.show_ratios = show_ratios
if normalize_advantage:
self.adv_key = "norm_adv_targ"
else:
self.adv_key = "adv_targ"
def loss_per_step(
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
) -> Tuple[
Dict[str, Tuple[torch.Tensor, Optional[float]]], Dict[str, torch.Tensor]
]: # TODO tuple output
actions = cast(torch.LongTensor, batch["actions"])
values = actor_critic_output.values
action_log_probs = actor_critic_output.distributions.log_prob(actions)
dist_entropy: torch.FloatTensor = getattr(
actor_critic_output.distributions, self.entropy_method_name
)()
def add_trailing_dims(t: torch.Tensor):
assert len(t.shape) <= len(batch[self.adv_key].shape)
return t.view(
t.shape + ((1,) * (len(batch[self.adv_key].shape) - len(t.shape)))
)
dist_entropy = add_trailing_dims(dist_entropy)
clip_param = self.clip_param * self.clip_decay(step_count)
ratio = torch.exp(action_log_probs - batch["old_action_log_probs"])
ratio = add_trailing_dims(ratio)
clamped_ratio = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)
surr1 = ratio * batch[self.adv_key]
surr2 = clamped_ratio * batch[self.adv_key]
use_clamped = surr2 < surr1
action_loss = -torch.where(cast(torch.Tensor, use_clamped), surr2, surr1)
if self.use_clipped_value_loss:
value_pred_clipped = batch["values"] + (values - batch["values"]).clamp(
-clip_param, clip_param
)
value_losses = (values - batch["returns"]).pow(2)
value_losses_clipped = (value_pred_clipped - batch["returns"]).pow(2)
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
else:
value_loss = 0.5 * (cast(torch.FloatTensor, batch["returns"]) - values).pow(
2
)
# noinspection PyUnresolvedReferences
return (
{
"value": (value_loss, self.value_loss_coef),
"action": (action_loss, None),
"entropy": (dist_entropy.mul_(-1.0), self.entropy_coef), # type: ignore
},
{
"ratio": ratio,
"ratio_clamped": clamped_ratio,
"ratio_used": torch.where(
cast(torch.Tensor, use_clamped), clamped_ratio, ratio
),
}
if self.show_ratios
else {},
)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
losses_per_step, ratio_info = self.loss_per_step(
step_count=step_count, batch=batch, actor_critic_output=actor_critic_output,
)
losses = {
key: (loss.mean(), weight)
for (key, (loss, weight)) in losses_per_step.items()
}
total_loss = sum(
loss * weight if weight is not None else loss
for loss, weight in losses.values()
)
result = (
total_loss,
{
"ppo_total": cast(torch.Tensor, total_loss).item(),
**{key: loss.item() for key, (loss, _) in losses.items()},
},
{key: float(value.mean().item()) for key, value in ratio_info.items()},
)
return result if self.show_ratios else result[:2]
class PPOValue(AbstractActorCriticLoss):
"""Implementation of the Proximal Policy Optimization loss.
# Attributes
clip_param : The clipping parameter to use.
use_clipped_value_loss : Whether or not to also clip the value loss.
"""
def __init__(
self,
clip_param: float,
use_clipped_value_loss=True,
clip_decay: Optional[Callable[[int], float]] = None,
*args,
**kwargs
):
"""Initializer.
See the class documentation for parameter definitions.
"""
super().__init__(*args, **kwargs)
self.clip_param = clip_param
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_decay = clip_decay if clip_decay is not None else (lambda x: 1.0)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
values = actor_critic_output.values
clip_param = self.clip_param * self.clip_decay(step_count)
if self.use_clipped_value_loss:
value_pred_clipped = batch["values"] + (values - batch["values"]).clamp(
-clip_param, clip_param
)
value_losses = (values - batch["returns"]).pow(2)
value_losses_clipped = (value_pred_clipped - batch["returns"]).pow(2)
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped).mean()
else:
value_loss = (
0.5 * (cast(torch.FloatTensor, batch["returns"]) - values).pow(2).mean()
)
return (
value_loss,
{"value": value_loss.item(),},
)
PPOConfig = dict(clip_param=0.1, value_loss_coef=0.5, entropy_coef=0.01)
| allenact-main | allenact/algorithms/onpolicy_sync/losses/ppo.py |
"""Implementation of A2C and ACKTR losses."""
from typing import cast, Tuple, Dict, Optional
import torch
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.utils.system import get_logger
class A2CACKTR(AbstractActorCriticLoss):
"""Class implementing A2C and ACKTR losses.
# Attributes
acktr : `True` if should use ACKTR loss (currently not supported), otherwise uses A2C loss.
value_loss_coef : Weight of value loss.
entropy_coef : Weight of entropy (encouraging) loss.
entropy_method_name : Name of Distr's entropy method name. Default is `entropy`,
but we might use `conditional_entropy` for `SequentialDistr`.
"""
def __init__(
self,
value_loss_coef,
entropy_coef,
acktr=False,
entropy_method_name: str = "entropy",
*args,
**kwargs,
):
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(*args, **kwargs)
self.acktr = acktr
self.loss_key = "a2c_total" if not acktr else "aktr_total"
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.entropy_method_name = entropy_method_name
def loss_per_step( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
) -> Dict[str, Tuple[torch.Tensor, Optional[float]]]:
actions = cast(torch.LongTensor, batch["actions"])
values = actor_critic_output.values
action_log_probs = actor_critic_output.distributions.log_prob(actions)
action_log_probs = action_log_probs.view(
action_log_probs.shape
+ (1,)
* (
len(cast(torch.Tensor, batch["adv_targ"]).shape)
- len(action_log_probs.shape)
)
)
dist_entropy: torch.FloatTensor = getattr(
actor_critic_output.distributions, self.entropy_method_name
)()
dist_entropy = dist_entropy.view(
dist_entropy.shape
+ ((1,) * (len(action_log_probs.shape) - len(dist_entropy.shape)))
)
value_loss = 0.5 * (cast(torch.FloatTensor, batch["returns"]) - values).pow(2)
# TODO: Decided not to use normalized advantages here,
# is this correct? (it's how it's done in Kostrikov's)
action_loss = -(
cast(torch.FloatTensor, batch["adv_targ"]).detach() * action_log_probs
)
if self.acktr:
# TODO: Currently acktr doesn't really work because of this natural gradient stuff
# that we should figure out how to integrate properly.
get_logger().warning("acktr is only partially supported.")
return {
"value": (value_loss, self.value_loss_coef),
"action": (action_loss, None),
"entropy": (dist_entropy.mul_(-1.0), self.entropy_coef), # type: ignore
}
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs,
):
losses_per_step = self.loss_per_step(
step_count=step_count, batch=batch, actor_critic_output=actor_critic_output,
)
losses = {
key: (loss.mean(), weight)
for (key, (loss, weight)) in losses_per_step.items()
}
total_loss = cast(
torch.Tensor,
sum(
loss * weight if weight is not None else loss
for loss, weight in losses.values()
),
)
return (
total_loss,
{
self.loss_key: total_loss.item(),
**{key: loss.item() for key, (loss, _) in losses.items()},
},
)
class A2C(A2CACKTR):
"""A2C Loss."""
def __init__(
self,
value_loss_coef,
entropy_coef,
entropy_method_name: str = "entropy",
*args,
**kwargs,
):
super().__init__(
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
acktr=False,
entropy_method_name=entropy_method_name,
*args,
**kwargs,
)
class ACKTR(A2CACKTR):
"""ACKTR Loss.
This code is not supported as it currently lacks an implementation
for recurrent models.
"""
def __init__(
self,
value_loss_coef,
entropy_coef,
entropy_method_name: str = "entropy",
*args,
**kwargs,
):
super().__init__(
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
acktr=True,
entropy_method_name=entropy_method_name,
*args,
**kwargs,
)
A2CConfig = dict(value_loss_coef=0.5, entropy_coef=0.01,)
| allenact-main | allenact/algorithms/onpolicy_sync/losses/a2cacktr.py |
allenact-main | allenact/algorithms/offpolicy_sync/__init__.py |
|
"""Defining abstract loss classes for actor critic models."""
import abc
from typing import Dict, Tuple, TypeVar, Generic
import torch
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.misc import Loss, Memory
ModelType = TypeVar("ModelType")
class AbstractOffPolicyLoss(Generic[ModelType], Loss):
"""Abstract class representing an off-policy loss function used to train a
model."""
# noinspection PyMethodOverriding
@abc.abstractmethod
def loss( # type: ignore
self,
*, # No positional arguments
step_count: int,
model: ModelType,
batch: ObservationType,
memory: Memory,
**kwargs,
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
"""Computes the loss.
Loss after processing a batch of data with (part of) a model (possibly with memory).
# Parameters
model: model to run on data batch (both assumed to be on the same device)
batch: data to use as input for model (already on the same device as model)
memory: model memory before processing current data batch
# Returns
A tuple with:
current_loss: total loss
current_info: additional information about the current loss
memory: model memory after processing current data batch
bsize: batch size
"""
raise NotImplementedError()
| allenact-main | allenact/algorithms/offpolicy_sync/losses/abstract_offpolicy_loss.py |
allenact-main | allenact/algorithms/offpolicy_sync/losses/__init__.py |
|
"""Functions used to initialize and manipulate pytorch models."""
import hashlib
from collections import Callable
from typing import Sequence, Tuple, Union, Optional, Dict, Any
import numpy as np
import torch
import torch.nn as nn
from allenact.utils.misc_utils import md5_hash_str_as_int
def md5_hash_of_state_dict(state_dict: Dict[str, Any]):
hashables = []
for piece in sorted(state_dict.items()):
if isinstance(piece[1], (np.ndarray, torch.Tensor, nn.Parameter)):
hashables.append(piece[0])
if not isinstance(piece[1], np.ndarray):
p1 = piece[1].data.cpu().numpy()
else:
p1 = piece[1]
hashables.append(int(hashlib.md5(p1.tobytes()).hexdigest(), 16,))
else:
hashables.append(md5_hash_str_as_int(str(piece)))
return md5_hash_str_as_int(str(hashables))
class Flatten(nn.Module):
"""Flatten input tensor so that it is of shape (FLATTENED_BATCH x -1)."""
# noinspection PyMethodMayBeStatic
def forward(self, x):
"""Flatten input tensor.
# Parameters
x : Tensor of size (FLATTENED_BATCH x ...) to flatten to size (FLATTENED_BATCH x -1)
# Returns
Flattened tensor.
"""
return x.reshape(x.size(0), -1)
def init_linear_layer(
module: nn.Linear, weight_init: Callable, bias_init: Callable, gain=1
):
"""Initialize a torch.nn.Linear layer.
# Parameters
module : A torch linear layer.
weight_init : Function used to initialize the weight parameters of the linear layer. Should take the weight data
tensor and gain as input.
bias_init : Function used to initialize the bias parameters of the linear layer. Should take the bias data
tensor and gain as input.
gain : The gain to apply.
# Returns
The initialized linear layer.
"""
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == "inf":
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1.0 / norm_type)
return total_norm
def make_cnn(
input_channels: int,
layer_channels: Sequence[int],
kernel_sizes: Sequence[Union[int, Tuple[int, int]]],
strides: Sequence[Union[int, Tuple[int, int]]],
paddings: Sequence[Union[int, Tuple[int, int]]],
dilations: Sequence[Union[int, Tuple[int, int]]],
output_height: int,
output_width: int,
output_channels: int,
flatten: bool = True,
output_relu: bool = True,
) -> nn.Module:
assert (
len(layer_channels)
== len(kernel_sizes)
== len(strides)
== len(paddings)
== len(dilations)
), "Mismatched sizes: layers {} kernels {} strides {} paddings {} dilations {}".format(
layer_channels, kernel_sizes, strides, paddings, dilations
)
net = nn.Sequential()
input_channels_list = [input_channels] + list(layer_channels)
for it, current_channels in enumerate(layer_channels):
net.add_module(
"conv_{}".format(it),
nn.Conv2d(
in_channels=input_channels_list[it],
out_channels=current_channels,
kernel_size=kernel_sizes[it],
stride=strides[it],
padding=paddings[it],
dilation=dilations[it],
),
)
if it < len(layer_channels) - 1:
net.add_module("relu_{}".format(it), nn.ReLU(inplace=True))
if flatten:
net.add_module("flatten", Flatten())
net.add_module(
"fc",
nn.Linear(
layer_channels[-1] * output_width * output_height, output_channels
),
)
if output_relu:
net.add_module("out_relu", nn.ReLU(True))
return net
def compute_cnn_output(
cnn: nn.Module,
cnn_input: torch.Tensor,
permute_order: Optional[Tuple[int, ...]] = (
0, # FLAT_BATCH (flattening steps, samplers and agents)
3, # CHANNEL
1, # ROW
2, # COL
), # from [FLAT_BATCH x ROW x COL x CHANNEL] flattened input
):
"""Computes CNN outputs for given inputs.
# Parameters
cnn : A torch CNN.
cnn_input: A torch Tensor with inputs.
permute_order: A permutation Tuple to provide PyTorch dimension order, default (0, 3, 1, 2), where 0 corresponds to
the flattened batch dimensions (combining step, sampler and agent)
# Returns
CNN output with dimensions [STEP, SAMPLER, AGENT, CHANNEL, (HEIGHT, WIDTH)].
"""
nsteps: int
nsamplers: int
nagents: int
assert len(cnn_input.shape) in [
5,
6,
], "CNN input must have shape [STEP, SAMPLER, (AGENT,) dim1, dim2, dim3]"
nagents: Optional[int] = None
if len(cnn_input.shape) == 6:
nsteps, nsamplers, nagents = cnn_input.shape[:3]
else:
nsteps, nsamplers = cnn_input.shape[:2]
# Make FLAT_BATCH = nsteps * nsamplers (* nagents)
cnn_input = cnn_input.view((-1,) + cnn_input.shape[2 + int(nagents is not None) :])
if permute_order is not None:
cnn_input = cnn_input.permute(*permute_order)
cnn_output = cnn(cnn_input)
if nagents is not None:
cnn_output = cnn_output.reshape(
(nsteps, nsamplers, nagents,) + cnn_output.shape[1:]
)
else:
cnn_output = cnn_output.reshape((nsteps, nsamplers,) + cnn_output.shape[1:])
return cnn_output
def simple_conv_and_linear_weights_init(m):
if type(m) in [
nn.Conv1d,
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose1d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
]:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
if m.bias is not None:
m.bias.data.fill_(0)
elif type(m) == nn.Linear:
simple_linear_weights_init(m)
def simple_linear_weights_init(m):
if type(m) == nn.Linear:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
if m.bias is not None:
m.bias.data.fill_(0)
class FeatureEmbedding(nn.Module):
"""A wrapper of nn.Embedding but support zero output Used for extracting
features for actions/rewards."""
def __init__(self, input_size, output_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
if self.output_size != 0:
self.fc = nn.Embedding(input_size, output_size)
else: # automatically be moved to a device
self.null_embedding: torch.Tensor
self.register_buffer("null_embedding", torch.zeros(0,), persistent=False)
def forward(self, inputs):
if self.output_size != 0:
return self.fc(inputs)
else:
return self.null_embedding
| allenact-main | allenact/utils/model_utils.py |
"""Utility classes and functions for running and designing experiments."""
import abc
import collections.abc
import copy
import numbers
import random
from collections import OrderedDict, defaultdict
from typing import (
Callable,
NamedTuple,
Dict,
Any,
Union,
Iterator,
Optional,
List,
cast,
Sequence,
TypeVar,
Generic,
Tuple,
)
import attr
import numpy as np
import torch
import torch.optim as optim
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import Memory
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.storage import (
ExperienceStorage,
RolloutStorage,
RolloutBlockStorage,
)
from allenact.base_abstractions.misc import Loss, GenericAbstractLoss
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.system import get_logger
try:
# noinspection PyProtectedMember,PyUnresolvedReferences
from torch.optim.lr_scheduler import _LRScheduler
except (ImportError, ModuleNotFoundError):
raise ImportError("`_LRScheduler` was not found in `torch.optim.lr_scheduler`")
_DEFAULT_ONPOLICY_UUID = "onpolicy"
def evenly_distribute_count_into_bins(count: int, nbins: int) -> List[int]:
"""Distribute a count into a number of bins.
# Parameters
count: A positive integer to be distributed, should be `>= nbins`.
nbins: The number of bins.
# Returns
A list of positive integers which sum to `count`. These values will be
as close to equal as possible (may differ by at most 1).
"""
assert count >= nbins, f"count ({count}) < nbins ({nbins})"
res = [0] * nbins
for it in range(count):
res[it % nbins] += 1
return res
def recursive_update(
original: Union[Dict, collections.abc.MutableMapping],
update: Union[Dict, collections.abc.MutableMapping],
):
"""Recursively updates original dictionary with entries form update dict.
# Parameters
original : Original dictionary to be updated.
update : Dictionary with additional or replacement entries.
# Returns
Updated original dictionary.
"""
for k, v in update.items():
if isinstance(v, collections.abc.MutableMapping):
original[k] = recursive_update(original.get(k, {}), v)
else:
original[k] = v
return original
ToBuildType = TypeVar("ToBuildType")
class Builder(tuple, Generic[ToBuildType]):
"""Used to instantiate a given class with (default) parameters.
Helper class that stores a class, default parameters for that
class, and key word arguments that (possibly) overwrite the defaults.
When calling this an object of the Builder class it generates
a class of type `class_type` with parameters specified by
the attributes `default` and `kwargs` (and possibly additional, overwriting,
keyword arguments).
# Attributes
class_type : The class to be instantiated when calling the object.
kwargs : Keyword arguments used to instantiate an object of type `class_type`.
default : Default parameters used when instantiating the class.
"""
class_type: ToBuildType
kwargs: Dict[str, Any]
default: Dict[str, Any]
# noinspection PyTypeChecker
def __new__(
cls,
class_type: ToBuildType,
kwargs: Optional[Dict[str, Any]] = None,
default: Optional[Dict[str, Any]] = None,
):
"""Create a new Builder.
For parameter descriptions see the class documentation. Note
that `kwargs` and `default` can be None in which case they are
set to be empty dictionaries.
"""
self = tuple.__new__(
cls,
(
class_type,
kwargs if kwargs is not None else {},
default if default is not None else {},
),
)
self.class_type = class_type
self.kwargs = self[1]
self.default = self[2]
return self
def __repr__(self) -> str:
return (
f"Group(class_type={self.class_type},"
f" kwargs={self.kwargs},"
f" default={self.default})"
)
def __call__(self, **kwargs) -> ToBuildType:
"""Build and return a new class.
# Parameters
kwargs : additional keyword arguments to use when instantiating
the object. These overwrite all arguments already in the `self.kwargs`
and `self.default` attributes.
# Returns
Class of type `self.class_type` with parameters
taken from `self.default`, `self.kwargs`, and
any keyword arguments additionally passed to `__call__`.
"""
allkwargs = copy.deepcopy(self.default)
recursive_update(allkwargs, self.kwargs)
recursive_update(allkwargs, kwargs)
return cast(Callable, self.class_type)(**allkwargs)
class ScalarMeanTracker(object):
"""Track a collection `scalar key -> mean` pairs."""
def __init__(self) -> None:
self._sums: Dict[str, float] = OrderedDict()
self._counts: Dict[str, int] = OrderedDict()
def add_scalars(
self, scalars: Dict[str, Union[float, int]], n: Union[int, Dict[str, int]] = 1
) -> None:
"""Add additional scalars to track.
# Parameters
scalars : A dictionary of `scalar key -> value` pairs.
"""
ndict = cast(
Dict[str, int], (n if isinstance(n, Dict) else defaultdict(lambda: n)) # type: ignore
)
for k in scalars:
if k not in self._sums:
self._sums[k] = ndict[k] * scalars[k]
self._counts[k] = ndict[k]
else:
self._sums[k] += ndict[k] * scalars[k]
self._counts[k] += ndict[k]
def pop_and_reset(self) -> Dict[str, float]:
"""Return tracked means and reset.
On resetting all previously tracked values are discarded.
# Returns
A dictionary of `scalar key -> current mean` pairs corresponding to those
values added with `add_scalars`.
"""
means = OrderedDict(
[(k, float(self._sums[k] / self._counts[k])) for k in self._sums]
)
self.reset()
return means
def reset(self):
self._sums = OrderedDict()
self._counts = OrderedDict()
def sums(self):
return copy.copy(self._sums)
def counts(self) -> Dict[str, int]:
return copy.copy(self._counts)
def means(self) -> Dict[str, float]:
return OrderedDict(
[(k, float(self._sums[k] / self._counts[k])) for k in self._sums]
)
@property
def empty(self):
assert len(self._sums) == len(
self._counts
), "Mismatched length of _sums {} and _counts {}".format(
len(self._sums), len(self._counts)
)
return len(self._sums) == 0
class LoggingPackage:
"""Data package used for logging."""
def __init__(
self,
mode: str,
training_steps: Optional[int],
storage_uuid_to_total_experiences: Dict[str, int],
pipeline_stage: Optional[int] = None,
) -> None:
self.mode = mode
self.training_steps: int = training_steps
self.storage_uuid_to_total_experiences: Dict[
str, int
] = storage_uuid_to_total_experiences
self.pipeline_stage = pipeline_stage
self.metrics_tracker = ScalarMeanTracker()
self.info_trackers: Dict[Tuple[str, str], ScalarMeanTracker] = {}
self.metric_dicts: List[Any] = []
self.viz_data: Optional[Dict[str, List[Dict[str, Any]]]] = None
self.checkpoint_file_name: Optional[str] = None
self.task_callback_data: List[Any] = []
self.num_empty_metrics_dicts_added: int = 0
@property
def num_non_empty_metrics_dicts_added(self) -> int:
return len(self.metric_dicts)
@staticmethod
def _metrics_dict_is_empty(
single_task_metrics_dict: Dict[str, Union[float, int]]
) -> bool:
return (
len(single_task_metrics_dict) == 0
or (
len(single_task_metrics_dict) == 1
and "task_info" in single_task_metrics_dict
)
or (
"success" in single_task_metrics_dict
and single_task_metrics_dict["success"] is None
)
)
def add_metrics_dict(
self, single_task_metrics_dict: Dict[str, Union[float, int]]
) -> bool:
if self._metrics_dict_is_empty(single_task_metrics_dict):
self.num_empty_metrics_dicts_added += 1
return False
self.metric_dicts.append(single_task_metrics_dict)
self.metrics_tracker.add_scalars(
{k: v for k, v in single_task_metrics_dict.items() if k != "task_info"}
)
return True
def add_info_dict(
self,
info_dict: Dict[str, Union[int, float]],
n: int,
stage_component_uuid: str,
storage_uuid: str,
):
key = (stage_component_uuid, storage_uuid)
if key not in self.info_trackers:
self.info_trackers[key] = ScalarMeanTracker()
assert n >= 0
self.info_trackers[key].add_scalars(scalars=info_dict, n=n)
class LinearDecay(object):
"""Linearly decay between two values over some number of steps.
Obtain the value corresponding to the `i`-th step by calling
an instance of this class with the value `i`.
# Parameters
steps : The number of steps over which to decay.
startp : The starting value.
endp : The ending value.
"""
def __init__(self, steps: int, startp: float = 1.0, endp: float = 0.0) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
self.steps = steps
self.startp = startp
self.endp = endp
def __call__(self, epoch: int) -> float:
"""Get the decayed value for `epoch` number of steps.
# Parameters
epoch : The number of steps.
# Returns
Decayed value for `epoch` number of steps.
"""
epoch = max(min(epoch, self.steps), 0)
return self.startp + (self.endp - self.startp) * (epoch / float(self.steps))
class MultiLinearDecay(object):
"""Container for multiple stages of LinearDecay.
Obtain the value corresponding to the `i`-th step by calling
an instance of this class with the value `i`.
# Parameters
stages: List of `LinearDecay` objects to be sequentially applied
for the number of steps in each stage.
"""
def __init__(self, stages: Sequence[LinearDecay]) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
self.stages = stages
self.steps = np.cumsum([stage.steps for stage in self.stages])
self.total_steps = self.steps[-1]
self.stage_idx = -1
self.min_steps = 0
self.max_steps = 0
self.stage = None
def __call__(self, epoch: int) -> float:
"""Get the decayed value factor for `epoch` number of steps.
# Parameters
epoch : The number of steps.
# Returns
Decayed value for `epoch` number of steps.
"""
epoch = max(min(epoch, self.total_steps), 0)
while epoch >= self.max_steps and self.max_steps < self.total_steps:
self.stage_idx += 1
assert self.stage_idx < len(self.stages)
self.min_steps = self.max_steps
self.max_steps = self.steps[self.stage_idx]
self.stage = self.stages[self.stage_idx]
return self.stage(epoch - self.min_steps)
# noinspection PyTypeHints,PyUnresolvedReferences
def set_deterministic_cudnn() -> None:
"""Makes cudnn deterministic.
This may slow down computations.
"""
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = False # type: ignore
def set_seed(seed: Optional[int] = None) -> None:
"""Set seeds for multiple (cpu) sources of randomness.
Sets seeds for (cpu) `pytorch`, base `random`, and `numpy`.
# Parameters
seed : The seed to set. If set to None, keep using the current seed.
"""
if seed is None:
return
torch.manual_seed(seed) # seeds the RNG for all devices (CPU and GPUs)
random.seed(seed)
np.random.seed(seed)
class EarlyStoppingCriterion(abc.ABC):
"""Abstract class for class who determines if training should stop early in
a particular pipeline stage."""
@abc.abstractmethod
def __call__(
self, stage_steps: int, total_steps: int, training_metrics: ScalarMeanTracker,
) -> bool:
"""Returns `True` if training should be stopped early.
# Parameters
stage_steps: Total number of steps taken in the current pipeline stage.
total_steps: Total number of steps taken during training so far (includes steps
taken in prior pipeline stages).
training_metrics: Metrics recovered over some fixed number of steps
(see the `metric_accumulate_interval` attribute in the `TrainingPipeline` class)
training.
"""
raise NotImplementedError
class NeverEarlyStoppingCriterion(EarlyStoppingCriterion):
"""Implementation of `EarlyStoppingCriterion` which never stops early."""
def __call__(
self, stage_steps: int, total_steps: int, training_metrics: ScalarMeanTracker,
) -> bool:
return False
class OffPolicyPipelineComponent(NamedTuple):
"""An off-policy component for a PipeLineStage.
# Attributes
data_iterator_builder: A function to instantiate a Data Iterator (with a __next__(self) method)
loss_names: list of unique names assigned to off-policy losses
updates: number of off-policy updates between on-policy rollout collections
loss_weights : A list of floating point numbers describing the relative weights
applied to the losses referenced by `loss_names`. Should be the same length
as `loss_names`. If this is `None`, all weights will be assumed to be one.
data_iterator_kwargs_generator: Optional generator of keyword arguments for data_iterator_builder (useful for
distributed training. It takes
a `cur_worker` int value,
a `rollouts_per_worker` list of number of samplers per training worker,
and an optional random `seed` shared by all workers, which can be None.
"""
data_iterator_builder: Callable[..., Iterator]
loss_names: List[str]
updates: int
loss_weights: Optional[Sequence[float]] = None
data_iterator_kwargs_generator: Callable[
[int, Sequence[int], Optional[int]], Dict
] = lambda cur_worker, rollouts_per_worker, seed: {}
class TrainingSettings:
"""Class defining parameters used for training (within a stage or the
entire pipeline).
# Attributes
num_mini_batch : The number of mini-batches to break a rollout into.
update_repeats : The number of times we will cycle through the mini-batches corresponding
to a single rollout doing gradient updates.
max_grad_norm : The maximum "inf" norm of any gradient step (gradients are clipped to not exceed this).
num_steps : Total number of steps a single agent takes in a rollout.
gamma : Discount factor applied to rewards (should be in [0, 1]).
use_gae : Whether or not to use generalized advantage estimation (GAE).
gae_lambda : The additional parameter used in GAE.
advance_scene_rollout_period: Optional number of rollouts before enforcing an advance scene in all samplers.
save_interval : The frequency with which to save (in total agent steps taken). If `None` then *no*
checkpoints will be saved. Otherwise, in addition to the checkpoints being saved every
`save_interval` steps, a checkpoint will *always* be saved at the end of each pipeline stage.
If `save_interval <= 0` then checkpoints will only be saved at the end of each pipeline stage.
metric_accumulate_interval : The frequency with which training/validation metrics are accumulated
(in total agent steps). Metrics accumulated in an interval are logged (if `should_log` is `True`)
and used by the stage's early stopping criterion (if any).
"""
num_mini_batch: Optional[int]
update_repeats: Optional[Union[int, Sequence[int]]]
max_grad_norm: Optional[float]
num_steps: Optional[int]
gamma: Optional[float]
use_gae: Optional[bool]
gae_lambda: Optional[float]
advance_scene_rollout_period: Optional[int]
save_interval: Optional[int]
metric_accumulate_interval: Optional[int]
# noinspection PyUnresolvedReferences
def __init__(
self,
num_mini_batch: Optional[int] = None,
update_repeats: Optional[int] = None,
max_grad_norm: Optional[float] = None,
num_steps: Optional[int] = None,
gamma: Optional[float] = None,
use_gae: Optional[bool] = None,
gae_lambda: Optional[float] = None,
advance_scene_rollout_period: Optional[int] = None,
save_interval: Optional[int] = None,
metric_accumulate_interval: Optional[int] = None,
):
self._key_to_setting = prepare_locals_for_super(locals(), ignore_kwargs=True)
self._training_setting_keys = tuple(sorted(self._key_to_setting.keys()))
self._defaults: Optional["TrainingSettings"] = None
def keys(self) -> Tuple[str, ...]:
return self._training_setting_keys
def has_key(self, key: str) -> bool:
return key in self._key_to_setting
def set_defaults(self, defaults: "TrainingSettings"):
assert self._defaults is None, "Defaults can only be set once."
self._defaults = defaults
def __getattr__(self, item: str):
if item in self._key_to_setting:
val = self._key_to_setting[item]
if val is None and self._defaults is not None:
val = getattr(self._defaults, item)
return val
else:
super(TrainingSettings, self).__getattribute__(item)
@attr.s(kw_only=True)
class StageComponent:
"""A custom component for a PipelineStage, possibly including overrides to
the `TrainingSettings` from the `TrainingPipeline` and `PipelineStage`.
# Attributes
uuid: the name of this component
storage_uuid: the name of the `ExperienceStorage` that will be used with this component.
loss_names: list of unique names assigned to off-policy losses
training_settings: Instance of `TrainingSettings`
loss_weights : A list of floating point numbers describing the relative weights
applied to the losses referenced by `loss_names`. Should be the same length
as `loss_names`. If this is `None`, all weights will be assumed to be one.
"""
uuid: str = attr.ib()
storage_uuid: str = attr.ib()
loss_names: Sequence[str] = attr.ib()
training_settings: TrainingSettings = attr.ib(
default=attr.Factory(TrainingSettings)
)
@training_settings.validator
def _validate_training_settings(self, attribute, value: TrainingSettings):
must_be_none = [
"num_steps",
"gamma",
"use_gae",
"gae_lambda",
"advance_scene_rollout_period",
"save_interval",
"metric_accumulate_interval",
]
for key in must_be_none:
assert getattr(value, key) is None, (
f"`{key}` must be `None` in `TrainingSettings` passed to"
f" `StageComponent` (as such values will be ignored). Pass such"
f" settings to the `PipelineStage` or `TrainingPipeline` objects instead.",
)
class PipelineStage:
"""A single stage in a training pipeline, possibly including overrides to
the global `TrainingSettings` in `TrainingPipeline`.
# Attributes
loss_name : A collection of unique names assigned to losses. These will
reference the `Loss` objects in a `TrainingPipeline` instance.
max_stage_steps : Either the total number of steps agents should take in this stage or
a Callable object (e.g. a function)
loss_weights : A list of floating point numbers describing the relative weights
applied to the losses referenced by `loss_name`. Should be the same length
as `loss_name`. If this is `None`, all weights will be assumed to be one.
teacher_forcing : If applicable, defines the probability an agent will take the
expert action (as opposed to its own sampled action) at a given time point.
early_stopping_criterion: An `EarlyStoppingCriterion` object which determines if
training in this stage should be stopped early. If `None` then no early stopping
occurs. If `early_stopping_criterion` is not `None` then we do not guarantee
reproducibility when restarting a model from a checkpoint (as the
`EarlyStoppingCriterion` object may store internal state which is not
saved in the checkpoint). Currently, AllenAct only supports using early stopping
criterion when **not** using distributed training.
training_settings: Instance of `TrainingSettings`.
training_settings_kwargs: For backwards compatability: arguments to instantiate TrainingSettings when
`training_settings` is `None`.
"""
def __init__(
self,
*, # Disables positional arguments. Please provide arguments as keyword arguments.
max_stage_steps: Union[int, Callable],
loss_names: List[str],
loss_weights: Optional[Sequence[float]] = None,
teacher_forcing: Optional[Callable[[int], float]] = None,
stage_components: Optional[Sequence[StageComponent]] = None,
early_stopping_criterion: Optional[EarlyStoppingCriterion] = None,
training_settings: Optional[TrainingSettings] = None,
**training_settings_kwargs,
):
# Populate TrainingSettings members
# THIS MUST COME FIRST IN `__init__` as otherwise `__getattr__` will loop infinitely.
assert training_settings is None or len(training_settings_kwargs) == 0
if training_settings is None:
training_settings = TrainingSettings(**training_settings_kwargs)
self.training_settings = training_settings
assert self.training_settings.update_repeats is None or isinstance(
self.training_settings.update_repeats, numbers.Integral
), (
"`training_settings` passed to `PipelineStage` must have `training_settings.update_repeats`"
" equal to `None` or an integer. If you'd like to specify per-loss `update_repeats` then please"
" do so in the training settings of a `StageComponent`."
)
self.loss_names = loss_names
self.max_stage_steps = max_stage_steps
self.loss_weights = (
[1.0] * len(loss_names) if loss_weights is None else loss_weights
)
assert len(self.loss_weights) == len(self.loss_names)
self.teacher_forcing = teacher_forcing
self.early_stopping_criterion = early_stopping_criterion
self.steps_taken_in_stage: int = 0
self.rollout_count = 0
self.early_stopping_criterion_met = False
self.uuid_to_loss_weight: Dict[str, float] = {
loss_uuid: loss_weight
for loss_uuid, loss_weight in zip(loss_names, self.loss_weights)
}
self._stage_components: List[StageComponent] = []
self.uuid_to_stage_component: Dict[str, StageComponent] = {}
self.storage_uuid_to_steps_taken_in_stage: Dict[str, int] = {}
self.stage_component_uuid_to_stream_memory: Dict[str, Memory] = {}
if stage_components is not None:
for stage_component in stage_components:
self.add_stage_component(stage_component)
# Sanity check
for key in training_settings.keys():
assert not hasattr(
self, key
), f"`{key}` should be defined in `TrainingSettings`, not in `PipelineStage`."
def reset(self):
self.steps_taken_in_stage: int = 0
self.rollout_count = 0
self.early_stopping_criterion_met = False
for k in self.storage_uuid_to_steps_taken_in_stage:
self.storage_uuid_to_steps_taken_in_stage[k] = 0
for memory in self.stage_component_uuid_to_stream_memory.values():
memory.clear()
@property
def stage_components(self) -> Tuple[StageComponent]:
return tuple(self._stage_components)
def add_stage_component(self, stage_component: StageComponent):
assert stage_component.uuid not in self.uuid_to_stage_component
# Setting default training settings for the `stage_component`
sc_ts = stage_component.training_settings
sc_ts.set_defaults(self.training_settings)
# Handling the case where different losses should be updated different
# numbers of times
stage_update_repeats = self.training_settings.update_repeats
if stage_update_repeats is not None and sc_ts.update_repeats is None:
loss_to_update_repeats = dict(zip(self.loss_names, stage_update_repeats))
if isinstance(stage_update_repeats, Sequence):
sc_ts.update_repeats = [
loss_to_update_repeats[uuid] for uuid in stage_component.loss_names
]
else:
sc_ts.update_repeats = stage_update_repeats
self._stage_components.append(stage_component)
self.uuid_to_stage_component[stage_component.uuid] = stage_component
if (
stage_component.storage_uuid
not in self.storage_uuid_to_steps_taken_in_stage
):
self.storage_uuid_to_steps_taken_in_stage[stage_component.storage_uuid] = 0
else:
raise NotImplementedError(
"Cannot have multiple stage components which"
f" use the same storage (reused storage uuid: '{stage_component.storage_uuid}'."
)
self.stage_component_uuid_to_stream_memory[stage_component.uuid] = Memory()
def __setattr__(self, key: str, value: Any):
if key != "training_settings" and self.training_settings.has_key(key):
raise NotImplementedError(
f"Cannot set {key} in {self.__name__}, update the"
f" `training_settings` attribute of {self.__name__} instead."
)
else:
return super(PipelineStage, self).__setattr__(key, value)
@property
def is_complete(self):
return (
self.early_stopping_criterion_met
or self.steps_taken_in_stage >= self.max_stage_steps
)
class TrainingPipeline:
"""Class defining the stages (and global training settings) in a training
pipeline.
The training pipeline can be used as an iterator to go through the pipeline
stages in, for instance, a loop.
# Parameters
named_losses : Dictionary mapping a the name of a loss to either an instantiation
of that loss or a `Builder` that, when called, will return that loss.
pipeline_stages : A list of PipelineStages. Each of these define how the agent
will be trained and are executed sequentially.
optimizer_builder : Builder object to instantiate the optimizer to use during training.
named_storages: Map of storage names to corresponding `ExperienceStorage` instances or `Builder` objects.
If this is `None` (or does not contain a value of (sub)type `RolloutStorage`) then a new
`Builder[RolloutBlockStorage]` will be created and added by default.
rollout_storage_uuid: Optional name of `RolloutStorage`, if `None` given, it will be assigned to the
`ExperienceStorage` of subclass `RolloutStorage` in `named_storages`. Note that this assumes that there
is only a single `RolloutStorage` object in the values of `named_storages`.
should_log: `True` if metrics accumulated during training should be logged to the console as well
as to a tensorboard file.
lr_scheduler_builder : Optional builder object to instantiate the learning rate scheduler used
through the pipeline.
training_settings: Instance of `TrainingSettings`
training_settings_kwargs: For backwards compatability: arguments to instantiate TrainingSettings when
`training_settings` is `None`.
"""
# noinspection PyUnresolvedReferences
def __init__(
self,
*,
named_losses: Dict[str, Union[Loss, Builder[Loss]]],
pipeline_stages: List[PipelineStage],
optimizer_builder: Builder[optim.Optimizer], # type: ignore
named_storages: Optional[
Dict[str, Union[ExperienceStorage, Builder[ExperienceStorage]]]
] = None,
rollout_storage_uuid: Optional[str] = None,
should_log: bool = True,
lr_scheduler_builder: Optional[Builder[_LRScheduler]] = None, # type: ignore
training_settings: Optional[TrainingSettings] = None,
valid_pipeline_stage: Optional[PipelineStage] = None,
test_pipeline_stage: Optional[PipelineStage] = None,
**training_settings_kwargs,
):
"""Initializer.
See class docstring for parameter definitions.
"""
# Populate TrainingSettings members
assert training_settings is None or len(training_settings_kwargs) == 0
if training_settings is None:
training_settings = TrainingSettings(**training_settings_kwargs)
self.training_settings = training_settings
assert self.training_settings.update_repeats is None or isinstance(
self.training_settings.update_repeats, numbers.Integral
), (
"`training_settings` passed to `TrainingPipeline` must have `training_settings.update_repeats`"
" equal to `None` or an integer. If you'd like to specify per-loss `update_repeats` then please"
" do so in the training settings of a `StageComponent`."
)
self.training_settings = training_settings
self.optimizer_builder = optimizer_builder
self.lr_scheduler_builder = lr_scheduler_builder
self._named_losses = named_losses
self._named_storages = self._initialize_named_storages(
named_storages=named_storages
)
self.rollout_storage_uuid = self._initialize_rollout_storage_uuid(
rollout_storage_uuid
)
if self.rollout_storage_uuid is None:
get_logger().warning(
f"No rollout storage was specified in the TrainingPipeline. This need not be an issue"
f" if you are performing off-policy training but, otherwise, please ensure you have"
f" defined a rollout storage in the `named_storages` argument of the TrainingPipeline."
)
self.should_log = should_log
self.pipeline_stages = pipeline_stages
def if_none_then_empty_stage(stage: Optional[PipelineStage]) -> PipelineStage:
return (
stage
if stage is not None
else PipelineStage(max_stage_steps=-1, loss_names=[])
)
self.valid_pipeline_stage = if_none_then_empty_stage(valid_pipeline_stage)
self.test_pipeline_stage = if_none_then_empty_stage(test_pipeline_stage)
assert (
len(self.pipeline_stages) == len(set(id(ps) for ps in pipeline_stages))
and self.valid_pipeline_stage not in self.pipeline_stages
and self.test_pipeline_stage not in self.pipeline_stages
), (
"Duplicate `PipelineStage` object instances found in the pipeline stages input"
" to `TrainingPipeline`. `PipelineStage` objects are not immutable, if you'd"
" like to have multiple pipeline stages of the same type, please instantiate"
" multiple separate instances."
)
self._ensure_pipeline_stages_all_have_at_least_one_stage_component()
self._current_stage: Optional[PipelineStage] = None
self.rollout_count = 0
self._refresh_current_stage(force_stage_search_from_start=True)
def _initialize_rollout_storage_uuid(
self, rollout_storage_uuid: Optional[str]
) -> str:
if rollout_storage_uuid is None:
rollout_storage_uuids = self._get_uuids_of_rollout_storages(
self._named_storages
)
assert len(rollout_storage_uuids) <= 1, (
f"`rollout_storage_uuid` cannot be automatically inferred as there are multiple storages defined"
f" (ids: {rollout_storage_uuids}) of type `RolloutStorage`."
)
rollout_storage_uuid = next(iter(rollout_storage_uuids), None)
assert (
rollout_storage_uuid is None or rollout_storage_uuid in self._named_storages
)
return rollout_storage_uuid
def _ensure_pipeline_stages_all_have_at_least_one_stage_component(self):
rollout_storages_uuids = self._get_uuids_of_rollout_storages(
self._named_storages
)
named_pipeline_stages = {
f"{i}th": ps for i, ps in enumerate(self.pipeline_stages)
}
named_pipeline_stages["valid"] = self.valid_pipeline_stage
named_pipeline_stages["test"] = self.test_pipeline_stage
for stage_name, stage in named_pipeline_stages.items():
# Forward default `TrainingSettings` to all `PipelineStage`s settings:
stage.training_settings.set_defaults(defaults=self.training_settings)
if len(stage.stage_components) == 0:
assert len(rollout_storages_uuids) <= 1, (
f"In {stage_name} pipeline stage: you have several storages specified ({rollout_storages_uuids}) which"
f" are subclasses of `RolloutStorage`. This is only allowed when stage components are explicitly"
f" defined in every `PipelineStage` instance. You have `PipelineStage`s for which stage components"
f" are not specified."
)
if len(rollout_storages_uuids) > 0:
stage.add_stage_component(
StageComponent(
uuid=rollout_storages_uuids[0],
storage_uuid=rollout_storages_uuids[0],
loss_names=stage.loss_names,
training_settings=TrainingSettings(),
)
)
for sc in stage.stage_components:
assert sc.storage_uuid in self._named_storages, (
f"In {stage_name} pipeline stage: storage with name '{sc.storage_uuid}' not found in collection of"
f" defined storages names: {list(self._named_storages.keys())}"
)
if (
self.rollout_storage_uuid is not None
and self.rollout_storage_uuid
not in stage.storage_uuid_to_steps_taken_in_stage
):
stage.storage_uuid_to_steps_taken_in_stage[
self.rollout_storage_uuid
] = 0
@classmethod
def _get_uuids_of_rollout_storages(
cls,
named_storages: Dict[str, Union[Builder[ExperienceStorage], ExperienceStorage]],
) -> List[str]:
return [
uuid
for uuid, storage in named_storages.items()
if isinstance(storage, RolloutStorage)
or (
isinstance(storage, Builder)
and issubclass(storage.class_type, RolloutStorage)
)
]
@classmethod
def _initialize_named_storages(
cls,
named_storages: Optional[
Dict[str, Union[Builder[ExperienceStorage], ExperienceStorage]]
],
) -> Dict[str, Union[Builder[ExperienceStorage], ExperienceStorage]]:
named_storages = {} if named_storages is None else {**named_storages}
rollout_storages_uuids = cls._get_uuids_of_rollout_storages(named_storages)
if len(named_storages) == 0:
assert (
_DEFAULT_ONPOLICY_UUID not in named_storages
), f"Storage uuid '{_DEFAULT_ONPOLICY_UUID}' is reserved, please pick a different uuid."
named_storages[_DEFAULT_ONPOLICY_UUID] = Builder(RolloutBlockStorage)
rollout_storages_uuids.append(_DEFAULT_ONPOLICY_UUID)
return named_storages
def _refresh_current_stage(
self, force_stage_search_from_start: bool = False
) -> Optional[PipelineStage]:
if force_stage_search_from_start:
self._current_stage = None
if self._current_stage is None or self._current_stage.is_complete:
if self._current_stage is None:
start_index = 0
else:
start_index = self.pipeline_stages.index(self._current_stage) + 1
self._current_stage = None
for ps in self.pipeline_stages[start_index:]:
if not ps.is_complete:
self._current_stage = ps
break
return self._current_stage
@property
def total_steps(self) -> int:
return sum(ps.steps_taken_in_stage for ps in self.pipeline_stages)
@property
def storage_uuid_to_total_experiences(self) -> Dict[str, int]:
totals = {k: 0 for k in self._named_storages}
for ps in self.pipeline_stages:
for k in ps.storage_uuid_to_steps_taken_in_stage:
totals[k] += ps.storage_uuid_to_steps_taken_in_stage[k]
for k in totals:
split = k.split("__")
if len(split) == 2 and split[1] in ["valid", "test"]:
assert totals[k] == 0, (
"Total experiences should be 0 for validation/test storages, i.e."
" storages who have `__valid` or `__test` as their suffix. These storages"
" will copy their `total_experiences` from the corresponding training"
" storage i.e.:\n"
" 1. the storage without the above suffix if it exists, else\n"
" 2. the total number of steps."
)
totals[k] = totals.get(split[0], self.total_steps)
return totals
@property
def current_stage(self) -> Optional[PipelineStage]:
return self._current_stage
@property
def current_stage_index(self) -> Optional[int]:
if self.current_stage is None:
return None
return self.pipeline_stages.index(self.current_stage)
def before_rollout(self, train_metrics: Optional[ScalarMeanTracker] = None) -> bool:
if (
train_metrics is not None
and self.current_stage.early_stopping_criterion is not None
):
self.current_stage.early_stopping_criterion_met = self.current_stage.early_stopping_criterion(
stage_steps=self.current_stage.steps_taken_in_stage,
total_steps=self.total_steps,
training_metrics=train_metrics,
)
if self.current_stage.early_stopping_criterion_met:
get_logger().debug(
f"Early stopping criterion met after {self.total_steps} total steps "
f"({self.current_stage.steps_taken_in_stage} in current stage, stage index {self.current_stage_index})."
)
return self.current_stage is not self._refresh_current_stage(
force_stage_search_from_start=False
)
def restart_pipeline(self):
for ps in self.pipeline_stages:
ps.reset()
if self.valid_pipeline_stage:
self.valid_pipeline_stage.reset()
if self.test_pipeline_stage:
self.test_pipeline_stage.reset()
self._current_stage = None
self._refresh_current_stage(force_stage_search_from_start=True)
def state_dict(self):
return dict(
stage_info_list=[
{
"early_stopping_criterion_met": ps.early_stopping_criterion_met,
"steps_taken_in_stage": ps.steps_taken_in_stage,
"storage_uuid_to_steps_taken_in_stage": ps.storage_uuid_to_steps_taken_in_stage,
"rollout_count": ps.rollout_count,
}
for ps in self.pipeline_stages
],
rollout_count=self.rollout_count,
)
def load_state_dict(self, state_dict: Dict[str, Any]):
if "off_policy_epochs" in state_dict:
get_logger().warning(
"Loaded state dict was saved using an older version of AllenAct."
" If you are attempting to restart training for a model that had an off-policy component, be aware"
" that logging for the off-policy component will not behave as it previously did."
" Additionally, while the total step count will remain accurate, step counts"
" associated with losses will be reset to step 0."
)
for ps, stage_info in zip(self.pipeline_stages, state_dict["stage_info_list"]):
ps.early_stopping_criterion_met = stage_info["early_stopping_criterion_met"]
ps.steps_taken_in_stage = stage_info["steps_taken_in_stage"]
if "storage_uuid_to_steps_taken_in_stage" in stage_info:
ps.storage_uuid_to_steps_taken_in_stage = stage_info[
"storage_uuid_to_steps_taken_in_stage"
]
ps.rollout_count = stage_info["rollout_count"]
self.rollout_count = state_dict["rollout_count"]
self._refresh_current_stage(force_stage_search_from_start=True)
@property
def rollout_storage(self) -> Optional[RolloutStorage]:
if self.rollout_storage_uuid is None:
return None
rs = self._named_storages[self.rollout_storage_uuid]
if isinstance(rs, Builder):
rs = rs()
self._named_storages[self.rollout_storage_uuid] = rs
return cast(RolloutStorage, rs)
def get_stage_storage(
self, stage: PipelineStage
) -> "OrderedDict[str, ExperienceStorage]":
storage_uuids_for_current_stage_set = set(
sc.storage_uuid for sc in stage.stage_components
)
# Always include self.rollout_storage_uuid in the current stage storage (when the uuid is defined)
if self.rollout_storage_uuid is not None:
storage_uuids_for_current_stage_set.add(self.rollout_storage_uuid)
storage_uuids_for_current_stage = sorted(
list(storage_uuids_for_current_stage_set)
)
for storage_uuid in storage_uuids_for_current_stage:
if isinstance(self._named_storages[storage_uuid], Builder):
self._named_storages[storage_uuid] = cast(
Builder["ExperienceStorage"], self._named_storages[storage_uuid],
)()
return OrderedDict(
(k, self._named_storages[k]) for k in storage_uuids_for_current_stage
)
@property
def current_stage_storage(self) -> "OrderedDict[str, ExperienceStorage]":
return self.get_stage_storage(self.current_stage)
def get_loss(self, uuid: str):
if isinstance(self._named_losses[uuid], Builder):
self._named_losses[uuid] = cast(
Builder[Union["AbstractActorCriticLoss", "GenericAbstractLoss"]],
self._named_losses[uuid],
)()
return self._named_losses[uuid]
@property
def current_stage_losses(
self,
) -> Dict[str, Union[AbstractActorCriticLoss, GenericAbstractLoss]]:
for loss_name in self.current_stage.loss_names:
if isinstance(self._named_losses[loss_name], Builder):
self._named_losses[loss_name] = cast(
Builder[Union["AbstractActorCriticLoss", "GenericAbstractLoss"]],
self._named_losses[loss_name],
)()
return {
loss_name: cast(
Union[AbstractActorCriticLoss, GenericAbstractLoss],
self._named_losses[loss_name],
)
for loss_name in self.current_stage.loss_names
}
| allenact-main | allenact/utils/experiment_utils.py |
# Original work Copyright (c) 2016 OpenAI (https://openai.com).
# Modified work Copyright (c) Allen Institute for AI
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Union, Tuple, List, cast, Iterable, Callable
from collections import OrderedDict
import numpy as np
import torch
from gym import spaces as gym
ActionType = Union[torch.Tensor, OrderedDict, Tuple, int]
def flatdim(space):
"""Return the number of dimensions a flattened equivalent of this space
would have.
Accepts a space and returns an integer. Raises
``NotImplementedError`` if the space is not defined in
``gym.spaces``.
"""
if isinstance(space, gym.Box):
return int(np.prod(space.shape))
elif isinstance(space, gym.Discrete):
return 1 # we do not expand to one-hot
elif isinstance(space, gym.Tuple):
return int(sum([flatdim(s) for s in space.spaces]))
elif isinstance(space, gym.Dict):
return int(sum([flatdim(s) for s in space.spaces.values()]))
elif isinstance(space, gym.MultiBinary):
return int(space.n)
elif isinstance(space, gym.MultiDiscrete):
return int(np.prod(space.shape))
else:
raise NotImplementedError
def flatten(space, torch_x):
"""Flatten data points from a space."""
if isinstance(space, gym.Box):
if len(space.shape) > 0:
return torch_x.view(torch_x.shape[: -len(space.shape)] + (-1,))
else:
return torch_x.view(torch_x.shape + (-1,))
elif isinstance(space, gym.Discrete):
# Assume tensor input does NOT contain a dimension for action
if isinstance(torch_x, torch.Tensor):
return torch_x.unsqueeze(-1)
else:
return torch.tensor(torch_x).view(1)
elif isinstance(space, gym.Tuple):
return torch.cat(
[flatten(s, x_part) for x_part, s in zip(torch_x, space.spaces)], dim=-1
)
elif isinstance(space, gym.Dict):
return torch.cat(
[flatten(s, torch_x[key]) for key, s in space.spaces.items()], dim=-1
)
elif isinstance(space, gym.MultiBinary):
return torch_x.view(torch_x.shape[: -len(space.shape)] + (-1,))
elif isinstance(space, gym.MultiDiscrete):
return torch_x.view(torch_x.shape[: -len(space.shape)] + (-1,))
else:
raise NotImplementedError
def unflatten(space, torch_x):
"""Unflatten a concatenated data points tensor from a space."""
if isinstance(space, gym.Box):
return torch_x.view(torch_x.shape[:-1] + space.shape).float()
elif isinstance(space, gym.Discrete):
res = torch_x.view(torch_x.shape[:-1] + space.shape).long()
return res if len(res.shape) > 0 else res.item()
elif isinstance(space, gym.Tuple):
dims = [flatdim(s) for s in space.spaces]
list_flattened = torch.split(torch_x, dims, dim=-1)
list_unflattened = [
unflatten(s, flattened)
for flattened, s in zip(list_flattened, space.spaces)
]
return tuple(list_unflattened)
elif isinstance(space, gym.Dict):
dims = [flatdim(s) for s in space.spaces.values()]
list_flattened = torch.split(torch_x, dims, dim=-1)
list_unflattened = [
(key, unflatten(s, flattened))
for flattened, (key, s) in zip(list_flattened, space.spaces.items())
]
return OrderedDict(list_unflattened)
elif isinstance(space, gym.MultiBinary):
return torch_x.view(torch_x.shape[:-1] + space.shape).byte()
elif isinstance(space, gym.MultiDiscrete):
return torch_x.view(torch_x.shape[:-1] + space.shape).long()
else:
raise NotImplementedError
def torch_point(space, np_x):
"""Convert numpy space point into torch."""
if isinstance(space, gym.Box):
return torch.from_numpy(np_x)
elif isinstance(space, gym.Discrete):
return np_x
elif isinstance(space, gym.Tuple):
return tuple([torch_point(s, x_part) for x_part, s in zip(np_x, space.spaces)])
elif isinstance(space, gym.Dict):
return OrderedDict(
[(key, torch_point(s, np_x[key])) for key, s in space.spaces.items()]
)
elif isinstance(space, gym.MultiBinary):
return torch.from_numpy(np_x)
elif isinstance(space, gym.MultiDiscrete):
return torch.from_numpy(np.asarray(np_x))
else:
raise NotImplementedError
def numpy_point(
space: gym.Space, torch_x: Union[int, torch.Tensor, OrderedDict, Tuple]
):
"""Convert torch space point into numpy."""
if isinstance(space, gym.Box):
return cast(torch.Tensor, torch_x).cpu().numpy()
elif isinstance(space, gym.Discrete):
return torch_x
elif isinstance(space, gym.Tuple):
return tuple(
[
numpy_point(s, x_part)
for x_part, s in zip(cast(Iterable, torch_x), space.spaces)
]
)
elif isinstance(space, gym.Dict):
return OrderedDict(
[
(key, numpy_point(s, cast(torch.Tensor, torch_x)[key]))
for key, s in space.spaces.items()
]
)
elif isinstance(space, gym.MultiBinary):
return cast(torch.Tensor, torch_x).cpu().numpy()
elif isinstance(space, gym.MultiDiscrete):
return cast(torch.Tensor, torch_x).cpu().numpy()
else:
raise NotImplementedError
def flatten_space(space: gym.Space):
if isinstance(space, gym.Box):
return gym.Box(space.low.flatten(), space.high.flatten())
if isinstance(space, gym.Discrete):
return gym.Box(low=0, high=space.n, shape=(1,))
if isinstance(space, gym.Tuple):
space = [flatten_space(s) for s in space.spaces]
return gym.Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
)
if isinstance(space, gym.Dict):
space = [flatten_space(s) for s in space.spaces.values()]
return gym.Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
)
if isinstance(space, gym.MultiBinary):
return gym.Box(low=0, high=1, shape=(space.n,))
if isinstance(space, gym.MultiDiscrete):
return gym.Box(low=np.zeros_like(space.nvec), high=space.nvec,)
raise NotImplementedError
def policy_space(
action_space: gym.Space, box_space_to_policy: Callable[[gym.Box], gym.Space] = None,
) -> gym.Space:
if isinstance(action_space, gym.Box):
if box_space_to_policy is None:
# policy = mean (default)
return action_space
else:
return box_space_to_policy(action_space)
if isinstance(action_space, gym.Discrete):
# policy = prob of each option
return gym.Box(
low=np.float32(0.0), high=np.float32(1.0), shape=(action_space.n,)
)
if isinstance(action_space, gym.Tuple):
# policy = tuple of sub-policies
spaces = [policy_space(s, box_space_to_policy) for s in action_space.spaces]
return gym.Tuple(spaces)
if isinstance(action_space, gym.Dict):
# policy = dict of sub-policies
spaces = [
(name, policy_space(s, box_space_to_policy),)
for name, s in action_space.spaces.items()
]
return gym.Dict(spaces)
if isinstance(action_space, gym.MultiBinary):
# policy = prob of 0, 1 in each entry
return gym.Box(
low=np.float32(0.0), high=np.float32(1.0), shape=(action_space.n, 2)
)
if isinstance(action_space, gym.MultiDiscrete):
# policy = Tuple of prob of each option for each discrete
return gym.Tuple(
[
gym.Box(low=np.float32(0.0), high=np.float32(1.0), shape=(n,))
for n in action_space.nvec
]
)
raise NotImplementedError
def action_list(
action_space: gym.Space, flat_actions: torch.Tensor
) -> List[ActionType]:
"""Convert flattened actions to list.
Assumes `flat_actions` are of shape `[step, sampler, flatdim]`.
"""
def tolist(action):
if isinstance(action, torch.Tensor):
return action.tolist()
if isinstance(action, Tuple):
actions = [tolist(ac) for ac in action]
return tuple(actions)
if isinstance(action, OrderedDict):
actions = [(key, tolist(action[key])) for key in action.keys()]
return OrderedDict(actions)
# else, it's a scalar
return action
return [tolist(unflatten(action_space, ac)) for ac in flat_actions[0]]
| allenact-main | allenact/utils/spaces_utils.py |
import io
import logging
import os
import socket
import sys
from contextlib import closing
from typing import cast, Optional, Tuple
from torch import multiprocessing as mp
from allenact._constants import ALLENACT_INSTALL_DIR
HUMAN_LOG_LEVELS: Tuple[str, ...] = ("debug", "info", "warning", "error", "none")
"""
Available log levels: "debug", "info", "warning", "error", "none"
"""
_LOGGER: Optional[logging.Logger] = None
class ColoredFormatter(logging.Formatter):
"""Format a log string with colors.
This implementation taken (with modifications) from
https://stackoverflow.com/a/384125.
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
"WARNING": YELLOW,
"INFO": GREEN,
"DEBUG": BLUE,
"ERROR": RED,
"CRITICAL": MAGENTA,
}
def __init__(self, fmt: str, datefmt: Optional[str] = None, use_color=True):
super().__init__(fmt=fmt, datefmt=datefmt)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
levelname = record.levelname
if self.use_color and levelname in self.COLORS:
levelname_with_color = (
self.COLOR_SEQ % (30 + self.COLORS[levelname])
+ levelname
+ self.RESET_SEQ
)
record.levelname = levelname_with_color
formated_record = logging.Formatter.format(self, record)
record.levelname = (
levelname # Resetting levelname as `record` might be used elsewhere
)
return formated_record
else:
return logging.Formatter.format(self, record)
def get_logger() -> logging.Logger:
"""Get a `logging.Logger` to stderr. It can be called whenever we wish to
log some message. Messages can get mixed-up
(https://docs.python.org/3.6/library/multiprocessing.html#logging), but it
works well in most cases.
# Returns
logger: the `logging.Logger` object
"""
if _new_logger():
if mp.current_process().name == "MainProcess":
_new_logger(logging.DEBUG)
_set_log_formatter()
return _LOGGER
def _human_log_level_to_int(human_log_level):
human_log_level = human_log_level.lower().strip()
assert human_log_level in HUMAN_LOG_LEVELS, "unknown human_log_level {}".format(
human_log_level
)
if human_log_level == "debug":
log_level = logging.DEBUG
elif human_log_level == "info":
log_level = logging.INFO
elif human_log_level == "warning":
log_level = logging.WARNING
elif human_log_level == "error":
log_level = logging.ERROR
elif human_log_level == "none":
log_level = logging.CRITICAL + 1
else:
raise NotImplementedError(f"Unknown log level {human_log_level}.")
return log_level
def init_logging(human_log_level: str = "info") -> None:
"""Init the `logging.Logger`.
It should be called only once in the app (e.g. in `main`). It sets
the log_level to one of `HUMAN_LOG_LEVELS`. And sets up a handler
for stderr. The logging level is propagated to all subprocesses.
"""
_new_logger(_human_log_level_to_int(human_log_level))
_set_log_formatter()
def update_log_level(logger, human_log_level: str):
logger.setLevel(_human_log_level_to_int(human_log_level))
def find_free_port(address: str = "127.0.0.1") -> int:
"""Finds a free port for distributed training.
# Returns
port: port number that can be used to listen
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind((address, 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = s.getsockname()[1]
return port
def _new_logger(log_level: Optional[int] = None):
global _LOGGER
if _LOGGER is None:
_LOGGER = mp.get_logger()
if log_level is not None:
get_logger().setLevel(log_level)
return True
if log_level is not None:
get_logger().setLevel(log_level)
return False
def _set_log_formatter():
assert _LOGGER is not None
if _LOGGER.getEffectiveLevel() <= logging.CRITICAL:
add_style_to_logs = True # In case someone wants to turn this off manually.
if add_style_to_logs:
default_format = "$BOLD[%(asctime)s$RESET %(levelname)s$BOLD:]$RESET %(message)s\t[%(filename)s: %(lineno)d]"
default_format = default_format.replace(
"$BOLD", ColoredFormatter.BOLD_SEQ
).replace("$RESET", ColoredFormatter.RESET_SEQ)
else:
default_format = (
"%(asctime)s %(levelname)s: %(message)s\t[%(filename)s: %(lineno)d]"
)
short_date_format = "%m/%d %H:%M:%S"
log_format = "default"
if log_format == "default":
fmt = default_format
datefmt = short_date_format
elif log_format == "defaultMilliseconds":
fmt = default_format
datefmt = None
else:
fmt = log_format
datefmt = short_date_format
if add_style_to_logs:
formatter = ColoredFormatter(fmt=fmt, datefmt=datefmt,)
else:
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.addFilter(cast(logging.Filter, _AllenActMessageFilter(os.getcwd())))
_LOGGER.addHandler(ch)
sys.excepthook = _excepthook
sys.stdout = cast(io.TextIOWrapper, _StreamToLogger())
return _LOGGER
class _StreamToLogger:
def __init__(self):
self.linebuf = ""
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
if line[-1] == "\n":
cast(logging.Logger, _LOGGER).info(line.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
cast(logging.Logger, _LOGGER).info(self.linebuf.rstrip())
self.linebuf = ""
def _excepthook(*args):
# noinspection PyTypeChecker
get_logger().error(msg="Uncaught exception:", exc_info=args)
class _AllenActMessageFilter:
def __init__(self, working_directory: str):
self.working_directory = working_directory
# noinspection PyMethodMayBeStatic
def filter(self, record):
# TODO: Does this work when pip-installing AllenAct?
return int(
self.working_directory in record.pathname
or ALLENACT_INSTALL_DIR in record.pathname
or "main" in record.pathname
)
class ImportChecker:
def __init__(self, msg=None):
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if exc_type == ModuleNotFoundError and self.msg is not None:
value.msg += self.msg
return exc_type is None
| allenact-main | allenact/utils/system.py |
from typing import List, Any
import torch
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.faster_rcnn import FasterRCNN
# noinspection PyProtectedMember
from torchvision.models.detection.faster_rcnn import model_urls
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.utils import load_state_dict_from_url
class CachelessAnchorGenerator(AnchorGenerator):
def forward(self, image_list: Any, feature_maps: Any):
grid_sizes = list([feature_map.shape[-2:] for feature_map in feature_maps])
image_size = image_list.tensors.shape[-2:]
strides = [
[int(image_size[0] / g[0]), int(image_size[1] / g[1])] for g in grid_sizes
]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
self.set_cell_anchors(dtype, device)
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
anchors = torch.jit.annotate(List[List[torch.Tensor]], []) # type:ignore
for i, (image_height, image_width) in enumerate(image_list.image_sizes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
anchors_in_image.append(anchors_per_feature_map)
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
return anchors
def fasterrcnn_resnet50_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, **kwargs
):
if pretrained:
# no need to download the backbone if pretrained is set
pretrained_backbone = False
backbone = resnet_fpn_backbone("resnet50", pretrained_backbone)
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = CachelessAnchorGenerator(anchor_sizes, aspect_ratios)
model = FasterRCNN(
backbone, num_classes, rpn_anchor_generator=rpn_anchor_generator, **kwargs
)
# min_size = 300
# max_size = 400
# anchor_sizes = ((12,), (24,), (48,), (96,), (192,))
# aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
# rpn_anchor_generator = CachelessAnchorGenerator(
# anchor_sizes, aspect_ratios
# )
# model = FasterRCNN(backbone, num_classes, rpn_anchor_generator=rpn_anchor_generator, min_size=min_size, max_size=max_size, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
model_urls["fasterrcnn_resnet50_fpn_coco"], progress=progress
)
model.load_state_dict(state_dict)
return model
| allenact-main | allenact/utils/cacheless_frcnn.py |
allenact-main | allenact/utils/__init__.py |
|
import copy
import functools
import hashlib
import inspect
import json
import math
import os
import pdb
import random
import subprocess
import sys
import urllib
import urllib.request
from collections import Counter
from contextlib import contextmanager
from typing import Sequence, List, Optional, Tuple, Hashable
import filelock
import numpy as np
import torch
from scipy.special import comb
from allenact.utils.system import get_logger
TABLEAU10_RGB = (
(31, 119, 180),
(255, 127, 14),
(44, 160, 44),
(214, 39, 40),
(148, 103, 189),
(140, 86, 75),
(227, 119, 194),
(127, 127, 127),
(188, 189, 34),
(23, 190, 207),
)
def multiprocessing_safe_download_file_from_url(url: str, save_path: str):
with filelock.FileLock(save_path + ".lock"):
if not os.path.isfile(save_path):
get_logger().info(f"Downloading file from {url} to {save_path}.")
urllib.request.urlretrieve(
url, save_path,
)
else:
get_logger().debug(f"{save_path} exists - skipping download.")
def experimental_api(to_decorate):
"""Decorate a function to note that it is part of the experimental API."""
have_warned = [False]
name = f"{inspect.getmodule(to_decorate).__name__}.{to_decorate.__qualname__}"
if to_decorate.__name__ == "__init__":
name = name.replace(".__init__", "")
@functools.wraps(to_decorate)
def decorated(*args, **kwargs):
if not have_warned[0]:
get_logger().warning(
f"'{name}' is a part of AllenAct's experimental API."
f" This means: (1) there are likely bugs present and (2)"
f" we may remove/change this functionality without warning."
f" USE AT YOUR OWN RISK.",
)
have_warned[0] = True
return to_decorate(*args, **kwargs)
return decorated
def deprecated(to_decorate):
"""Decorate a function to note that it has been deprecated."""
have_warned = [False]
name = f"{inspect.getmodule(to_decorate).__name__}.{to_decorate.__qualname__}"
if to_decorate.__name__ == "__init__":
name = name.replace(".__init__", "")
@functools.wraps(to_decorate)
def decorated(*args, **kwargs):
if not have_warned[0]:
get_logger().warning(
f"'{name}' has been deprecated and will soon be removed from AllenAct's API."
f" Please discontinue your use of this function.",
)
have_warned[0] = True
return to_decorate(*args, **kwargs)
return decorated
class NumpyJSONEncoder(json.JSONEncoder):
"""JSON encoder for numpy objects.
Based off the stackoverflow answer by Jie Yang here: https://stackoverflow.com/a/57915246.
The license for this code is [BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/).
"""
def default(self, obj):
if isinstance(obj, np.void):
return None
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyJSONEncoder, self).default(obj)
@contextmanager
def tensor_print_options(**print_opts):
torch_print_opts = copy.deepcopy(torch._tensor_str.PRINT_OPTS)
np_print_opts = np.get_printoptions()
try:
torch.set_printoptions(**print_opts)
np.set_printoptions(**print_opts)
yield None
finally:
torch.set_printoptions(**{k: getattr(torch_print_opts, k) for k in print_opts})
np.set_printoptions(**np_print_opts)
def md5_hash_str_as_int(to_hash: str):
return int(hashlib.md5(to_hash.encode()).hexdigest(), 16,)
def get_git_diff_of_project() -> Tuple[str, str]:
short_sha = (
subprocess.check_output(["git", "describe", "--always"]).decode("utf-8").strip()
)
diff = subprocess.check_output(["git", "diff", short_sha]).decode("utf-8")
return short_sha, diff
class HashableDict(dict):
"""A dictionary which is hashable so long as all of its values are
hashable.
A HashableDict object will allow setting / deleting of items until
the first time that `__hash__()` is called on it after which
attempts to set or delete items will throw `RuntimeError`
exceptions.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hash_has_been_called = False
def __key(self):
return tuple((k, self[k]) for k in sorted(self))
def __hash__(self):
self._hash_has_been_called = True
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
def __setitem__(self, *args, **kwargs):
if not self._hash_has_been_called:
return super(HashableDict, self).__setitem__(*args, **kwargs)
raise RuntimeError("Cannot set item in HashableDict after having called hash.")
def __delitem__(self, *args, **kwargs):
if not self._hash_has_been_called:
return super(HashableDict, self).__delitem__(*args, **kwargs)
raise RuntimeError(
"Cannot delete item in HashableDict after having called hash."
)
def partition_sequence(seq: Sequence, parts: int) -> List:
assert 0 < parts, f"parts [{parts}] must be greater > 0"
assert parts <= len(seq), f"parts [{parts}] > len(seq) [{len(seq)}]"
n = len(seq)
quotient = n // parts
remainder = n % parts
counts = [quotient + (i < remainder) for i in range(parts)]
inds = np.cumsum([0] + counts)
return [seq[ind0:ind1] for ind0, ind1 in zip(inds[:-1], inds[1:])]
def unzip(seq: Sequence[Tuple], n: Optional[int]):
"""Undoes a `zip` operation.
# Parameters
seq: The sequence of tuples that should be unzipped
n: The number of items in each tuple. This is an optional value but is necessary if
`len(seq) == 0` (as there is no other way to infer how many empty lists were zipped together
in this case) and can otherwise be used to error check.
# Returns
A tuple (of length `n` if `n` is given) of lists where the ith list contains all
the ith elements from the tuples in the input `seq`.
"""
assert n is not None or len(seq) != 0
if n is None:
n = len(seq[0])
lists = [[] for _ in range(n)]
for t in seq:
assert len(t) == n
for i in range(n):
lists[i].append(t[i])
return lists
def uninterleave(seq: Sequence, parts: int) -> List:
assert 0 < parts <= len(seq)
n = len(seq)
quotient = n // parts
return [
[seq[i + j * parts] for j in range(quotient + 1) if i + j * parts < len(seq)]
for i in range(parts)
]
@functools.lru_cache(10000)
def cached_comb(n: int, m: int):
return comb(n, m)
def expected_max_of_subset_statistic(vals: List[float], m: int):
n = len(vals)
assert m <= n
vals_and_counts = list(Counter([round(val, 8) for val in vals]).items())
vals_and_counts.sort()
count_so_far = 0
logdenom = math.log(comb(n, m))
expected_max = 0.0
for val, num_occurances_of_val in vals_and_counts:
count_so_far += num_occurances_of_val
if count_so_far < m:
continue
count_where_max = 0
for i in range(1, min(num_occurances_of_val, m) + 1):
count_where_max += cached_comb(num_occurances_of_val, i) * cached_comb(
count_so_far - num_occurances_of_val, m - i
)
expected_max += val * math.exp(math.log(count_where_max) - logdenom)
return expected_max
def bootstrap_max_of_subset_statistic(
vals: List[float], m: int, reps=1000, seed: Optional[int] = None
):
rstate = None
if seed is not None:
rstate = random.getstate()
random.seed(seed)
results = []
for _ in range(reps):
results.append(
expected_max_of_subset_statistic(random.choices(vals, k=len(vals)), m)
)
if seed is not None:
random.setstate(rstate)
return results
def rand_float(low: float, high: float, shape):
assert low <= high
try:
return np.random.rand(*shape) * (high - low) + low
except TypeError as _:
return np.random.rand(shape) * (high - low) + low
def all_unique(seq: Sequence[Hashable]):
seen = set()
for s in seq:
if s in seen:
return False
seen.add(s)
return True
def all_equal(s: Sequence):
if len(s) <= 1:
return True
return all(s[0] == ss for ss in s[1:])
def prepare_locals_for_super(
local_vars, args_name="args", kwargs_name="kwargs", ignore_kwargs=False
):
assert (
args_name not in local_vars
), "`prepare_locals_for_super` does not support {}.".format(args_name)
new_locals = {k: v for k, v in local_vars.items() if k != "self" and "__" not in k}
if kwargs_name in new_locals:
if ignore_kwargs:
new_locals.pop(kwargs_name)
else:
kwargs = new_locals.pop(kwargs_name)
kwargs.update(new_locals)
new_locals = kwargs
return new_locals
def partition_limits(num_items: int, num_parts: int):
return (
np.round(np.linspace(0, num_items, num_parts + 1, endpoint=True))
.astype(np.int32)
.tolist()
)
def str2bool(v: str):
v = v.lower().strip()
if v in ("yes", "true", "t", "y", "1"):
return True
elif v in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError(f"{v} cannot be converted to a bool")
class ForkedPdb(pdb.Pdb):
"""A Pdb subclass that may be used from a forked multiprocessing child."""
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
sys.stdin = open("/dev/stdin")
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
| allenact-main | allenact/utils/misc_utils.py |
from typing import Sequence, Any
import numpy as np
from matplotlib import pyplot as plt, markers
from matplotlib.collections import LineCollection
from allenact.utils.viz_utils import TrajectoryViz
class MultiTrajectoryViz(TrajectoryViz):
def __init__(
self,
path_to_trajectory_prefix: Sequence[str] = ("task_info", "followed_path"),
agent_suffixes: Sequence[str] = ("1", "2"),
label: str = "trajectories",
trajectory_plt_colormaps: Sequence[str] = ("cool", "spring"),
marker_plt_colors: Sequence[Any] = ("blue", "orange"),
axes_equal: bool = True,
**other_base_kwargs,
):
super().__init__(label=label, **other_base_kwargs)
self.path_to_trajectory_prefix = list(path_to_trajectory_prefix)
self.agent_suffixes = list(agent_suffixes)
self.trajectory_plt_colormaps = list(trajectory_plt_colormaps)
self.marker_plt_colors = marker_plt_colors
self.axes_equal = axes_equal
def make_fig(self, episode, episode_id):
# From https://nbviewer.jupyter.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
def colorline(
x,
y,
z=None,
cmap=plt.get_cmap("cool"),
norm=plt.Normalize(0.0, 1.0),
linewidth=2,
alpha=1.0,
zorder=1,
):
"""Plot a colored line with coordinates x and y.
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width.
"""
def make_segments(x, y):
"""Create list of line segments from x and y coordinates, in
the correct format for LineCollection:
an array of the form numlines x (points per line) x 2
(x and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(
z, "__iter__"
): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = LineCollection(
segments,
array=z,
cmap=cmap,
norm=norm,
linewidth=linewidth,
alpha=alpha,
zorder=zorder,
)
ax = plt.gca()
ax.add_collection(lc)
return lc
fig, ax = plt.subplots(figsize=self.figsize)
for agent, cmap, marker_color in zip(
self.agent_suffixes, self.trajectory_plt_colormaps, self.marker_plt_colors
):
path = self.path_to_trajectory_prefix[:]
path[-1] = path[-1] + agent
trajectory = self._access(episode, path)
x, y = [], []
for xy in trajectory:
x.append(float(self._access(xy, self.x)))
y.append(float(self._access(xy, self.y)))
colorline(x, y, zorder=1, cmap=cmap)
start_marker = markers.MarkerStyle(marker=self.start_marker_shape)
if self.path_to_rot_degrees is not None:
rot_degrees = float(
self._access(trajectory[0], self.path_to_rot_degrees)
)
if self.adapt_rotation is not None:
rot_degrees = self.adapt_rotation(rot_degrees)
start_marker._transform = start_marker.get_transform().rotate_deg(
rot_degrees
)
ax.scatter(
[x[0]],
[y[0]],
marker=start_marker,
zorder=2,
s=self.start_marker_scale,
color=marker_color,
)
ax.scatter(
[x[-1]], [y[-1]], marker="s", color=marker_color
) # stop (square)
if self.axes_equal:
ax.set_aspect("equal", "box")
ax.set_title(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
return fig
| allenact-main | allenact/utils/multi_agent_viz_utils.py |
import abc
import json
import os
import sys
from collections import defaultdict
from typing import (
Dict,
Any,
Union,
Optional,
List,
Tuple,
Sequence,
Callable,
cast,
Set,
)
import numpy as np
from allenact.utils.experiment_utils import Builder
from allenact.utils.tensor_utils import SummaryWriter, tile_images, process_video
try:
# Tensorflow not installed for testing
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
_TF_AVAILABLE = True
except ImportError as _:
event_pb2 = None
tf_record = None
_TF_AVAILABLE = False
import matplotlib
try:
# When debugging we don't want to use the interactive version of matplotlib
# as it causes all sorts of problems.
# noinspection PyPackageRequirements
import pydevd
matplotlib.use("agg")
except ImportError as _:
pass
import matplotlib.pyplot as plt
import matplotlib.markers as markers
import cv2
from allenact.utils.system import get_logger
class AbstractViz:
def __init__(
self,
label: Optional[str] = None,
vector_task_sources: Sequence[Tuple[str, Dict[str, Any]]] = (),
rollout_sources: Sequence[Union[str, Sequence[str]]] = (),
actor_critic_source: bool = False,
**kwargs, # accepts `max_episodes_in_group`
):
self.label = label
self.vector_task_sources = list(vector_task_sources)
self.rollout_sources = [
[entry] if isinstance(entry, str) else list(entry)
for entry in rollout_sources
]
self.actor_critic_source = actor_critic_source
self.mode: Optional[str] = None
self.path_to_id: Optional[Sequence[str]] = None
self.episode_ids: Optional[List[Sequence[str]]] = None
if "max_episodes_in_group" in kwargs:
self.max_episodes_in_group = kwargs["max_episodes_in_group"]
self.assigned_max_eps_in_group = True
else:
self.max_episodes_in_group = 8
self.assigned_max_eps_in_group = False
@staticmethod
def _source_to_str(source, is_vector_task):
source_type = "vector_task" if is_vector_task else "rollout_or_actor_critic"
return "{}__{}".format(
source_type,
"__{}_sep__".format(source_type).join(["{}".format(s) for s in source]),
)
@staticmethod
def _access(dictionary, path):
path = path[::-1]
while len(path) > 0:
dictionary = dictionary[path.pop()]
return dictionary
def _auto_viz_order(self, task_outputs):
if task_outputs is None:
return None, None
all_episodes = {
self._access(episode, self.path_to_id): episode for episode in task_outputs
}
if self.episode_ids is None:
all_episode_keys = list(all_episodes.keys())
viz_order = []
for page_start in range(
0, len(all_episode_keys), self.max_episodes_in_group
):
viz_order.append(
all_episode_keys[
page_start : page_start + self.max_episodes_in_group
]
)
get_logger().debug("visualizing with order {}".format(viz_order))
else:
viz_order = self.episode_ids
return viz_order, all_episodes
def _setup(
self,
mode: str,
path_to_id: Sequence[str],
episode_ids: Optional[Sequence[Union[Sequence[str], str]]],
max_episodes_in_group: int,
force: bool = False,
):
self.mode = mode
self.path_to_id = list(path_to_id)
if (self.episode_ids is None or force) and episode_ids is not None:
self.episode_ids = (
list(episode_ids)
if not isinstance(episode_ids[0], str)
else [list(cast(List[str], episode_ids))]
)
if not self.assigned_max_eps_in_group or force:
self.max_episodes_in_group = max_episodes_in_group
@abc.abstractmethod
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
raise NotImplementedError()
class TrajectoryViz(AbstractViz):
def __init__(
self,
path_to_trajectory: Sequence[str] = ("task_info", "followed_path"),
path_to_target_location: Optional[Sequence[str]] = (
"task_info",
"target_position",
),
path_to_x: Sequence[str] = ("x",),
path_to_y: Sequence[str] = ("z",),
path_to_rot_degrees: Optional[Sequence[str]] = ("rotation", "y"),
adapt_rotation: Optional[Callable[[float], float]] = None,
label: str = "trajectory",
figsize: Tuple[float, float] = (2, 2),
fontsize: float = 5,
start_marker_shape: str = r"$\spadesuit$",
start_marker_scale: int = 100,
**other_base_kwargs,
):
super().__init__(label, **other_base_kwargs)
self.path_to_trajectory = list(path_to_trajectory)
self.path_to_target_location = (
list(path_to_target_location)
if path_to_target_location is not None
else None
)
self.adapt_rotation = adapt_rotation
self.x = list(path_to_x)
self.y = list(path_to_y)
self.path_to_rot_degrees = (
list(path_to_rot_degrees) if path_to_rot_degrees is not None else None
)
self.figsize = figsize
self.fontsize = fontsize
self.start_marker_shape = start_marker_shape
self.start_marker_scale = start_marker_scale
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
viz_order, all_episodes = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("trajectory viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
figs = []
for episode_id in current_ids:
# assert episode_id in all_episodes
if episode_id not in all_episodes:
get_logger().warning(
"skipping viz for missing episode {}".format(episode_id)
)
continue
figs.append(self.make_fig(all_episodes[episode_id], episode_id))
if len(figs) == 0:
continue
log_writer.add_figure(
"{}/{}_group{}".format(self.mode, self.label, page),
figs,
global_step=num_steps,
)
plt.close(
"all"
) # close all current figures (SummaryWriter already closes all figures we log)
def make_fig(self, episode, episode_id):
# From https://nbviewer.jupyter.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
def colorline(
x,
y,
z=None,
cmap=plt.get_cmap("cool"),
norm=plt.Normalize(0.0, 1.0),
linewidth=2,
alpha=1.0,
zorder=1,
):
"""Plot a colored line with coordinates x and y.
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width.
"""
def make_segments(x, y):
"""Create list of line segments from x and y coordinates, in
the correct format for LineCollection:
an array of the form numlines x (points per line) x 2
(x and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(
z, "__iter__"
): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = matplotlib.collections.LineCollection(
segments,
array=z,
cmap=cmap,
norm=norm,
linewidth=linewidth,
alpha=alpha,
zorder=zorder,
)
ax = plt.gca()
ax.add_collection(lc)
return lc
trajectory = self._access(episode, self.path_to_trajectory)
x, y = [], []
for xy in trajectory:
x.append(float(self._access(xy, self.x)))
y.append(float(self._access(xy, self.y)))
fig, ax = plt.subplots(figsize=self.figsize)
colorline(x, y, zorder=1)
start_marker = markers.MarkerStyle(marker=self.start_marker_shape)
if self.path_to_rot_degrees is not None:
rot_degrees = float(self._access(trajectory[0], self.path_to_rot_degrees))
if self.adapt_rotation is not None:
rot_degrees = self.adapt_rotation(rot_degrees)
start_marker._transform = start_marker.get_transform().rotate_deg(
rot_degrees
)
ax.scatter(
[x[0]], [y[0]], marker=start_marker, zorder=2, s=self.start_marker_scale
)
ax.scatter([x[-1]], [y[-1]], marker="s") # stop
if self.path_to_target_location is not None:
target = self._access(episode, self.path_to_target_location)
ax.scatter(
[float(self._access(target, self.x))],
[float(self._access(target, self.y))],
marker="*",
)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
return fig
class AgentViewViz(AbstractViz):
def __init__(
self,
label: str = "agent_view",
max_clip_length: int = 100, # control memory used when converting groups of images into clips
max_video_length: int = -1, # no limit, if > 0, limit the maximum video length (discard last frames)
vector_task_source: Tuple[str, Dict[str, Any]] = (
"render",
{"mode": "raw_rgb_list"},
),
episode_ids: Optional[Sequence[Union[Sequence[str], str]]] = None,
fps: int = 4,
max_render_size: int = 400,
**other_base_kwargs,
):
super().__init__(
label, vector_task_sources=[vector_task_source], **other_base_kwargs,
)
self.max_clip_length = max_clip_length
self.max_video_length = max_video_length
self.fps = fps
self.max_render_size = max_render_size
self.episode_ids = (
(
list(episode_ids)
if not isinstance(episode_ids[0], str)
else [list(cast(List[str], episode_ids))]
)
if episode_ids is not None
else None
)
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
if render is None:
return
datum_id = self._source_to_str(self.vector_task_sources[0], is_vector_task=True)
viz_order, _ = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("agent view viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
images = [] # list of lists of rgb frames
for episode_id in current_ids:
# assert episode_id in render
if episode_id not in render:
get_logger().warning(
"skipping viz for missing episode {}".format(episode_id)
)
continue
images.append(
[
self._overlay_label(step[datum_id], episode_id)
for step in render[episode_id]
]
)
if len(images) == 0:
continue
vid = self.make_vid(images)
if vid is not None:
log_writer.add_vid(
f"{self.mode}/{self.label}_group{page}", vid, global_step=num_steps,
)
@staticmethod
def _overlay_label(
img,
text,
pos=(0, 0),
bg_color=(255, 255, 255),
fg_color=(0, 0, 0),
scale=0.4,
thickness=1,
margin=2,
font_face=cv2.FONT_HERSHEY_SIMPLEX,
):
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1]
pos = (pos[0], pos[1] + txt_size[0][1] + margin)
cv2.rectangle(img, pos, (end_x, end_y), bg_color, cv2.FILLED)
cv2.putText(
img=img,
text=text,
org=pos,
fontFace=font_face,
fontScale=scale,
color=fg_color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
return img
def make_vid(self, images):
max_length = max([len(ep) for ep in images])
if max_length == 0:
return None
valid_im = None
for ep in images:
if len(ep) > 0:
valid_im = ep[0]
break
frames = []
for it in range(max_length):
current_images = []
for ep in images:
if it < len(ep):
current_images.append(ep[it])
else:
if it == 0:
current_images.append(np.zeros_like(valid_im))
else:
gray = ep[-1].copy()
gray[:, :, 0] = gray[:, :, 2] = gray[:, :, 1]
current_images.append(gray)
frames.append(tile_images(current_images))
return process_video(
frames, self.max_clip_length, self.max_video_length, fps=self.fps
)
class AbstractTensorViz(AbstractViz):
def __init__(
self,
rollout_source: Union[str, Sequence[str]],
label: Optional[str] = None,
figsize: Tuple[float, float] = (3, 3),
**other_base_kwargs,
):
if label is None:
if isinstance(rollout_source, str):
label = rollout_source[:]
else:
label = "/".join(rollout_source)
super().__init__(label, rollout_sources=[rollout_source], **other_base_kwargs)
self.figsize = figsize
self.datum_id = self._source_to_str(
self.rollout_sources[0], is_vector_task=False
)
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
if render is None:
return
viz_order, _ = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("tensor viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
figs = []
for episode_id in current_ids:
if episode_id not in render or len(render[episode_id]) == 0:
get_logger().warning(
"skipping viz for missing or 0-length episode {}".format(
episode_id
)
)
continue
episode_src = [
step[self.datum_id]
for step in render[episode_id]
if self.datum_id in step
]
if len(episode_src) > 0:
# If the last episode for an inference worker is of length 1, there's no captured rollout sources
figs.append(self.make_fig(episode_src, episode_id))
if len(figs) == 0:
continue
log_writer.add_figure(
"{}/{}_group{}".format(self.mode, self.label, page),
figs,
global_step=num_steps,
)
plt.close(
"all"
) # close all current figures (SummaryWriter already closes all figures we log)
@abc.abstractmethod
def make_fig(
self, episode_src: Sequence[np.ndarray], episode_id: str
) -> matplotlib.figure.Figure:
raise NotImplementedError()
class TensorViz1D(AbstractTensorViz):
def __init__(
self,
rollout_source: Union[str, Sequence[str]] = "action_log_probs",
label: Optional[str] = None,
figsize: Tuple[float, float] = (3, 3),
**other_base_kwargs,
):
super().__init__(rollout_source, label, figsize, **other_base_kwargs)
def make_fig(self, episode_src, episode_id):
assert episode_src[0].size == 1
# Concatenate along step axis (0)
seq = np.concatenate(episode_src, axis=0).squeeze() # remove all singleton dims
fig, ax = plt.subplots(figsize=self.figsize)
ax.plot(seq)
ax.set_title(episode_id)
ax.set_aspect("auto")
plt.tight_layout()
return fig
class TensorViz2D(AbstractTensorViz):
def __init__(
self,
rollout_source: Union[str, Sequence[str]] = ("memory_first_last", "rnn"),
label: Optional[str] = None,
figsize: Tuple[float, float] = (10, 10),
fontsize: float = 5,
**other_base_kwargs,
):
super().__init__(rollout_source, label, figsize, **other_base_kwargs)
self.fontsize = fontsize
def make_fig(self, episode_src, episode_id):
# Concatenate along step axis (0)
seq = np.concatenate(
episode_src, axis=0
).squeeze() # remove num_layers if it's equal to 1, else die
assert len(seq.shape) == 2, "No support for higher-dimensions"
# get_logger().debug("basic {} h render {}".format(episode_id, seq[:10, 0]))
fig, ax = plt.subplots(figsize=self.figsize)
ax.matshow(seq)
ax.set_xlabel(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
ax.tick_params(bottom=False)
ax.set_aspect("auto")
plt.tight_layout()
return fig
class ActorViz(AbstractViz):
def __init__(
self,
label: str = "action_probs",
action_names_path: Optional[Sequence[str]] = ("task_info", "action_names"),
figsize: Tuple[float, float] = (1, 5),
fontsize: float = 5,
**other_base_kwargs,
):
super().__init__(label, actor_critic_source=True, **other_base_kwargs)
self.action_names_path: Optional[Sequence[str]] = (
list(action_names_path) if action_names_path is not None else None
)
self.figsize = figsize
self.fontsize = fontsize
self.action_names: Optional[List[str]] = None
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
if render is None:
return
if (
self.action_names is None
and task_outputs is not None
and len(task_outputs) > 0
and self.action_names_path is not None
):
self.action_names = list(
self._access(task_outputs[0], self.action_names_path)
)
viz_order, _ = self._auto_viz_order(task_outputs)
if viz_order is None:
get_logger().debug("actor viz returning without visualizing")
return
for page, current_ids in enumerate(viz_order):
figs = []
for episode_id in current_ids:
# assert episode_id in render
if episode_id not in render:
get_logger().warning(
"skipping viz for missing episode {}".format(episode_id)
)
continue
episode_src = [
step["actor_probs"]
for step in render[episode_id]
if "actor_probs" in step
]
assert len(episode_src) == len(render[episode_id])
figs.append(self.make_fig(episode_src, episode_id))
if len(figs) == 0:
continue
log_writer.add_figure(
"{}/{}_group{}".format(self.mode, self.label, page),
figs,
global_step=num_steps,
)
plt.close(
"all"
) # close all current figures (SummaryWriter already closes all figures we log)
def make_fig(self, episode_src, episode_id):
# Concatenate along step axis (0, reused from kept sampler axis)
mat = np.concatenate(episode_src, axis=0)
fig, ax = plt.subplots(figsize=self.figsize)
ax.matshow(mat)
if self.action_names is not None:
assert len(self.action_names) == mat.shape[-1]
ax.set_xticklabels([""] + self.action_names, rotation="vertical")
ax.set_xlabel(episode_id, fontsize=self.fontsize)
ax.tick_params(axis="x", labelsize=self.fontsize)
ax.tick_params(axis="y", labelsize=self.fontsize)
ax.tick_params(bottom=False)
# Gridlines based on minor ticks
ax.set_yticks(np.arange(-0.5, mat.shape[0], 1), minor=True)
ax.set_xticks(np.arange(-0.5, mat.shape[1], 1), minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=0.05)
ax.tick_params(
axis="both", which="minor", left=False, top=False, right=False, bottom=False
)
ax.set_aspect("auto")
plt.tight_layout()
return fig
class VizSuite(AbstractViz):
def __init__(
self,
episode_ids: Optional[Sequence[Union[Sequence[str], str]]] = None,
path_to_id: Sequence[str] = ("task_info", "id"),
mode: str = "valid",
force_episodes_and_max_episodes_in_group: bool = False,
max_episodes_in_group: int = 8,
*viz,
**kw_viz,
):
super().__init__(max_episodes_in_group=max_episodes_in_group)
self._setup(
mode=mode,
path_to_id=path_to_id,
episode_ids=episode_ids,
max_episodes_in_group=max_episodes_in_group,
)
self.force_episodes_and_max_episodes_in_group = (
force_episodes_and_max_episodes_in_group
)
self.all_episode_ids = self._episodes_set()
self.viz = [
v() if isinstance(v, Builder) else v
for v in viz
if isinstance(v, Builder) or isinstance(v, AbstractViz)
] + [
v() if isinstance(v, Builder) else v
for k, v in kw_viz.items()
if isinstance(v, Builder) or isinstance(v, AbstractViz)
]
self.max_render_size: Optional[int] = None
(
self.rollout_sources,
self.vector_task_sources,
self.actor_critic_source,
) = self._setup_sources()
self.data: Dict[
str, List[Dict]
] = {} # dict of episode id to list of dicts with collected data
self.last_it2epid: List[str] = []
def _setup_sources(self):
rollout_sources, vector_task_sources = [], []
labels = []
actor_critic_source = False
new_episodes = []
for v in self.viz:
labels.append(v.label)
rollout_sources += v.rollout_sources
vector_task_sources += v.vector_task_sources
actor_critic_source |= v.actor_critic_source
if (
v.episode_ids is not None
and not self.force_episodes_and_max_episodes_in_group
):
cur_episodes = self._episodes_set(v.episode_ids)
for ep in cur_episodes:
if (
self.all_episode_ids is not None
and ep not in self.all_episode_ids
):
new_episodes.append(ep)
get_logger().info(
"Added new episode {} from {}".format(ep, v.label)
)
v._setup(
mode=self.mode,
path_to_id=self.path_to_id,
episode_ids=self.episode_ids,
max_episodes_in_group=self.max_episodes_in_group,
force=self.force_episodes_and_max_episodes_in_group,
)
if isinstance(v, AgentViewViz):
self.max_render_size = v.max_render_size
get_logger().info("Logging labels {}".format(labels))
if len(new_episodes) > 0:
get_logger().info("Added new episodes {}".format(new_episodes))
self.episode_ids.append(new_episodes) # new group with all added episodes
self.all_episode_ids = self._episodes_set()
rol_flat = {json.dumps(src, sort_keys=True): src for src in rollout_sources}
vt_flat = {json.dumps(src, sort_keys=True): src for src in vector_task_sources}
rol_keys = list(set(rol_flat.keys()))
vt_keys = list(set(vt_flat.keys()))
return (
[rol_flat[k] for k in rol_keys],
[vt_flat[k] for k in vt_keys],
actor_critic_source,
)
def _episodes_set(self, episode_list=None) -> Optional[Set[str]]:
source = self.episode_ids if episode_list is None else episode_list
if source is None:
return None
all_episode_ids: List[str] = []
for group in source:
all_episode_ids += group
return set(all_episode_ids)
def empty(self):
return len(self.data) == 0
def _update(self, collected_data):
for epid in collected_data:
assert epid in self.data
self.data[epid][-1].update(collected_data[epid])
def _append(self, vector_task_data):
for epid in vector_task_data:
if epid in self.data:
self.data[epid].append(vector_task_data[epid])
else:
self.data[epid] = [vector_task_data[epid]]
def _collect_actor_critic(self, actor_critic):
actor_critic_data = {
epid: dict()
for epid in self.last_it2epid
if self.all_episode_ids is None or epid in self.all_episode_ids
}
if len(actor_critic_data) > 0 and actor_critic is not None:
if self.actor_critic_source:
# TODO this code only supports Discrete action spaces!
probs = (
actor_critic.distributions.probs
) # step (=1) x sampler x agent (=1) x action
values = actor_critic.values # step x sampler x agent x 1
for it, epid in enumerate(self.last_it2epid):
if epid in actor_critic_data:
# Select current episode (sampler axis will be reused as step axis)
prob = (
# probs.narrow(dim=0, start=it, length=1) # works for sampler x action
probs.narrow(
dim=1, start=it, length=1
) # step x sampler x agent x action -> step x 1 x agent x action
.squeeze(
0
) # step x 1 x agent x action -> 1 x agent x action
# .squeeze(-2) # 1 x agent x action -> 1 x action
.to("cpu")
.detach()
.numpy()
)
assert "actor_probs" not in actor_critic_data[epid]
actor_critic_data[epid]["actor_probs"] = prob
val = (
# values.narrow(dim=0, start=it, length=1) # works for sampler x 1
values.narrow(
dim=1, start=it, length=1
) # step x sampler x agent x 1 -> step x 1 x agent x 1
.squeeze(0) # step x 1 x agent x 1 -> 1 x agent x 1
# .squeeze(-2) # 1 x agent x 1 -> 1 x 1
.to("cpu")
.detach()
.numpy()
)
assert "critic_value" not in actor_critic_data[epid]
actor_critic_data[epid]["critic_value"] = val
self._update(actor_critic_data)
def _collect_rollout(self, rollout, alive):
alive_set = set(alive)
assert len(alive_set) == len(alive)
alive_it2epid = [
epid for it, epid in enumerate(self.last_it2epid) if it in alive_set
]
rollout_data = {
epid: dict()
for epid in alive_it2epid
if self.all_episode_ids is None or epid in self.all_episode_ids
}
if len(rollout_data) > 0 and rollout is not None:
for source in self.rollout_sources:
datum_id = self._source_to_str(source, is_vector_task=False)
storage, path = source[0], source[1:]
# Access storage
res = getattr(rollout, storage)
episode_dim = rollout.dim_names.index("sampler")
# Access sub-storage if path not empty
if len(path) > 0:
if storage == "memory_first_last":
storage = "memory"
flattened_name = rollout.unflattened_to_flattened[storage][
tuple(path)
]
# for path_step in path:
# res = res[path_step]
res = res[flattened_name]
res, episode_dim = res
if rollout.step > 0:
if rollout.step > res.shape[0]:
# e.g. rnn with only latest memory saved
rollout_step = res.shape[0] - 1
else:
rollout_step = rollout.step - 1
else:
if rollout.num_steps - 1 < res.shape[0]:
rollout_step = rollout.num_steps - 1
else:
# e.g. rnn with only latest memory saved
rollout_step = res.shape[0] - 1
# Select latest step
res = res.narrow(
dim=0, start=rollout_step, length=1, # step dimension
) # 1 x ... x sampler x ...
# get_logger().debug("basic collect h {}".format(res[..., 0]))
for it, epid in enumerate(alive_it2epid):
if epid in rollout_data:
# Select current episode and remove episode/sampler axis
datum = (
res.narrow(dim=episode_dim, start=it, length=1)
.squeeze(axis=episode_dim)
.to("cpu")
.detach()
.numpy()
) # 1 x ... (no sampler dim)
# get_logger().debug("basic collect ep {} h {}".format(epid, res[..., 0]))
assert datum_id not in rollout_data[epid]
rollout_data[epid][
datum_id
] = datum.copy() # copy needed when running on CPU!
self._update(rollout_data)
def _collect_vector_task(self, vector_task):
it2epid = [
self._access(info, self.path_to_id[1:])
for info in vector_task.attr("task_info")
]
# get_logger().debug("basic epids {}".format(it2epid))
def limit_spatial_res(data: np.ndarray, max_size=400):
if data.shape[0] <= max_size and data.shape[1] <= max_size:
return data
else:
f = float(max_size) / max(data.shape[0], data.shape[1])
size = (int(data.shape[1] * f), int(data.shape[0] * f))
return cv2.resize(data, size, 0, 0, interpolation=cv2.INTER_AREA)
vector_task_data = {
epid: dict()
for epid in it2epid
if self.all_episode_ids is None or epid in self.all_episode_ids
}
if len(vector_task_data) > 0:
for (
source
) in self.vector_task_sources: # these are observations for next step!
datum_id = self._source_to_str(source, is_vector_task=True)
method, kwargs = source
res = getattr(vector_task, method)(**kwargs)
if not isinstance(res, Sequence):
assert len(it2epid) == 1
res = [res]
if method == "render":
res = [limit_spatial_res(r, self.max_render_size) for r in res]
assert len(res) == len(it2epid)
for datum, epid in zip(res, it2epid):
if epid in vector_task_data:
assert datum_id not in vector_task_data[epid]
vector_task_data[epid][datum_id] = datum
self._append(vector_task_data)
return it2epid
# to be called by engine
def collect(self, vector_task=None, alive=None, rollout=None, actor_critic=None):
if actor_critic is not None:
# in phase with last_it2epid
try:
self._collect_actor_critic(actor_critic)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Failed collect (actor_critic) for viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Failed collect (actor_critic) for viz")
if alive is not None and rollout is not None:
# in phase with last_it2epid that stay alive
try:
self._collect_rollout(rollout=rollout, alive=alive)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Failed collect (rollout) for viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Failed collect (rollout) for viz")
# Always call this one last!
if vector_task is not None:
# in phase with identifiers of current episodes from vector_task
try:
self.last_it2epid = self._collect_vector_task(vector_task)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Failed collect (vector_task) for viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Failed collect (vector_task) for viz")
def read_and_reset(self) -> Dict[str, List[Dict[str, Any]]]:
res = self.data
self.data = {}
# get_logger().debug("Returning episodes {}".format(list(res.keys())))
return res
# to be called by logger
def log(
self,
log_writer: SummaryWriter,
task_outputs: Optional[List[Any]],
render: Optional[Dict[str, List[Dict[str, Any]]]],
num_steps: int,
):
for v in self.viz:
try:
v.log(log_writer, task_outputs, render, num_steps)
except (AssertionError, RuntimeError):
get_logger().debug(
msg=f"Dropped {v.label} viz due to exception:",
exc_info=sys.exc_info(),
)
get_logger().error(f"Dropped {v.label} viz")
class TensorboardSummarizer:
"""Assumption: tensorboard tags/labels include a valid/test/train substr indicating the data modality"""
def __init__(
self,
experiment_to_train_events_paths_map: Dict[str, Sequence[str]],
experiment_to_test_events_paths_map: Dict[str, Sequence[str]],
eval_min_mega_steps: Optional[Sequence[float]] = None,
tensorboard_tags_to_labels_map: Optional[Dict[str, str]] = None,
tensorboard_output_summary_folder: str = "tensorboard_plotter_output",
):
if not _TF_AVAILABLE:
raise ImportError(
"Please install tensorflow e.g. with `pip install tensorflow` to enable TensorboardSummarizer"
)
self.experiment_to_train_events_paths_map = experiment_to_train_events_paths_map
self.experiment_to_test_events_paths_map = experiment_to_test_events_paths_map
train_experiments = set(list(experiment_to_train_events_paths_map.keys()))
test_experiments = set(list(experiment_to_test_events_paths_map.keys()))
assert (train_experiments - test_experiments) in [set(), train_experiments,], (
f"`experiment_to_test_events_paths_map` must have identical keys (experiment names) to those"
f" in `experiment_to_train_events_paths_map`, or be empty."
f" Got {train_experiments} train keys and {test_experiments} test keys."
)
self.eval_min_mega_steps = eval_min_mega_steps
self.tensorboard_tags_to_labels_map = tensorboard_tags_to_labels_map
if self.tensorboard_tags_to_labels_map is not None:
for tag, label in self.tensorboard_tags_to_labels_map.items():
assert ("valid" in label) + ("train" in label) + (
"test" in label
) == 1, (
f"One (and only one) of {'train', 'valid', 'test'} must be part of the label for"
f" tag {tag} ({label} given)."
)
self.tensorboard_output_summary_folder = tensorboard_output_summary_folder
self.train_data = self._read_tensorflow_experiment_events(
self.experiment_to_train_events_paths_map
)
self.test_data = self._read_tensorflow_experiment_events(
self.experiment_to_test_events_paths_map
)
def _read_tensorflow_experiment_events(
self, experiment_to_events_paths_map, skip_map=False
):
def my_summary_iterator(path):
try:
for r in tf_record.tf_record_iterator(path):
yield event_pb2.Event.FromString(r)
except IOError:
get_logger().debug(f"IOError for path {path}")
return None
collected_data = {}
for experiment_name, path_list in experiment_to_events_paths_map.items():
experiment_data = defaultdict(list)
for filename_path in path_list:
for event in my_summary_iterator(filename_path):
if event is None:
break
for value in event.summary.value:
if self.tensorboard_tags_to_labels_map is None or skip_map:
label = value.tag
elif value.tag in self.tensorboard_tags_to_labels_map:
label = self.tensorboard_tags_to_labels_map[value.tag]
else:
continue
experiment_data[label].append(
dict(
score=value.simple_value,
time=event.wall_time,
steps=event.step,
)
)
collected_data[experiment_name] = experiment_data
return collected_data
def _eval_vs_train_time_steps(self, eval_data, train_data):
min_mega_steps = self.eval_min_mega_steps
if min_mega_steps is None:
min_mega_steps = [(item["steps"] - 1) / 1e6 for item in eval_data]
scores, times, steps = [], [], []
i, t, last_i = 0, 0, -1
while len(times) < len(min_mega_steps):
while eval_data[i]["steps"] / min_mega_steps[len(times)] / 1e6 < 1:
i += 1
while train_data[t]["steps"] / min_mega_steps[len(times)] / 1e6 < 1:
t += 1
# step might be missing in valid! (and would duplicate future value at previous steps!)
# solution: move forward last entry's time if no change in i (instead of new entry)
if i == last_i:
times[-1] = train_data[t]["time"]
else:
scores.append(eval_data[i]["score"])
times.append(train_data[t]["time"])
steps.append(eval_data[i]["steps"])
last_i = i
scores.insert(0, train_data[0]["score"])
times.insert(0, train_data[0]["time"])
steps.insert(0, 0)
return scores, times, steps
def _train_vs_time_steps(self, train_data):
last_eval_step = (
self.eval_min_mega_steps[-1] * 1e6
if self.eval_min_mega_steps is not None
else float("inf")
)
scores = [train_data[0]["score"]]
times = [train_data[0]["time"]]
steps = [train_data[0]["steps"]]
t = 1
while steps[-1] < last_eval_step and t < len(train_data):
scores.append(train_data[t]["score"])
times.append(train_data[t]["time"])
steps.append(train_data[t]["steps"])
t += 1
return scores, times, steps
def make_tensorboard_summary(self):
all_experiments = list(self.experiment_to_train_events_paths_map.keys())
for experiment_name in all_experiments:
summary_writer = SummaryWriter(
os.path.join(self.tensorboard_output_summary_folder, experiment_name)
)
test_labels = (
sorted(list(self.test_data[experiment_name].keys()))
if len(self.test_data) > 0
else []
)
for test_label in test_labels:
train_label = test_label.replace("valid", "test").replace(
"test", "train"
)
if train_label not in self.train_data[experiment_name]:
print(
f"Missing matching 'train' label {train_label} for eval label {test_label}. Skipping"
)
continue
train_data = self.train_data[experiment_name][train_label]
test_data = self.test_data[experiment_name][test_label]
scores, times, steps = self._eval_vs_train_time_steps(
test_data, train_data
)
for score, t, step in zip(scores, times, steps):
summary_writer.add_scalar(
test_label, score, global_step=step, walltime=t
)
valid_labels = sorted(
[
key
for key in list(self.train_data[experiment_name].keys())
if "valid" in key
]
)
for valid_label in valid_labels:
train_label = valid_label.replace("valid", "train")
assert (
train_label in self.train_data[experiment_name]
), f"Missing matching 'train' label {train_label} for valid label {valid_label}"
train_data = self.train_data[experiment_name][train_label]
valid_data = self.train_data[experiment_name][valid_label]
scores, times, steps = self._eval_vs_train_time_steps(
valid_data, train_data
)
for score, t, step in zip(scores, times, steps):
summary_writer.add_scalar(
valid_label, score, global_step=step, walltime=t
)
train_labels = sorted(
[
key
for key in list(self.train_data[experiment_name].keys())
if "train" in key
]
)
for train_label in train_labels:
scores, times, steps = self._train_vs_time_steps(
self.train_data[experiment_name][train_label]
)
for score, t, step in zip(scores, times, steps):
summary_writer.add_scalar(
train_label, score, global_step=step, walltime=t
)
summary_writer.close()
| allenact-main | allenact/utils/viz_utils.py |
"""Functions used to manipulate pytorch tensors and numpy arrays."""
import numbers
import os
import tempfile
from collections import defaultdict
from typing import List, Dict, Optional, DefaultDict, Union, Any, cast
import PIL
import numpy as np
import torch
from PIL import Image
from moviepy import editor as mpy
from moviepy.editor import concatenate_videoclips
from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary
from tensorboardX.proto.summary_pb2 import Summary as TBXSummary
# noinspection PyProtectedMember
from tensorboardX.utils import _prepare_video as tbx_prepare_video
from tensorboardX.x2num import make_np as tbxmake_np
from allenact.utils.system import get_logger
def to_device_recursively(
input: Any, device: Union[str, torch.device, int], inplace: bool = True
):
"""Recursively places tensors on the appropriate device."""
if input is None:
return input
elif isinstance(input, torch.Tensor):
return input.to(device) # type: ignore
elif isinstance(input, tuple):
return tuple(
to_device_recursively(input=subinput, device=device, inplace=inplace)
for subinput in input
)
elif isinstance(input, list):
if inplace:
for i in range(len(input)):
input[i] = to_device_recursively(
input=input[i], device=device, inplace=inplace
)
return input
else:
return [
to_device_recursively(input=subpart, device=device, inplace=inplace)
for subpart in input
]
elif isinstance(input, dict):
if inplace:
for key in input:
input[key] = to_device_recursively(
input=input[key], device=device, inplace=inplace
)
return input
else:
return {
k: to_device_recursively(input=input[k], device=device, inplace=inplace)
for k in input
}
elif isinstance(input, set):
if inplace:
for element in list(input):
input.remove(element)
input.add(
to_device_recursively(element, device=device, inplace=inplace)
)
else:
return set(
to_device_recursively(k, device=device, inplace=inplace) for k in input
)
elif isinstance(input, np.ndarray) or np.isscalar(input) or isinstance(input, str):
return input
elif hasattr(input, "to"):
# noinspection PyCallingNonCallable
return input.to(device=device, inplace=inplace)
else:
raise NotImplementedError(
"Sorry, value of type {} is not supported.".format(type(input))
)
def detach_recursively(input: Any, inplace=True):
"""Recursively detaches tensors in some data structure from their
computation graph."""
if input is None:
return input
elif isinstance(input, torch.Tensor):
return input.detach()
elif isinstance(input, tuple):
return tuple(
detach_recursively(input=subinput, inplace=inplace) for subinput in input
)
elif isinstance(input, list):
if inplace:
for i in range(len(input)):
input[i] = detach_recursively(input[i], inplace=inplace)
return input
else:
return [
detach_recursively(input=subinput, inplace=inplace)
for subinput in input
]
elif isinstance(input, dict):
if inplace:
for key in input:
input[key] = detach_recursively(input[key], inplace=inplace)
return input
else:
return {k: detach_recursively(input[k], inplace=inplace) for k in input}
elif isinstance(input, set):
if inplace:
for element in list(input):
input.remove(element)
input.add(detach_recursively(element, inplace=inplace))
else:
return set(detach_recursively(k, inplace=inplace) for k in input)
elif isinstance(input, np.ndarray) or np.isscalar(input) or isinstance(input, str):
return input
elif hasattr(input, "detach_recursively"):
# noinspection PyCallingNonCallable
return input.detach_recursively(inplace=inplace)
else:
raise NotImplementedError(
"Sorry, hidden state of type {} is not supported.".format(type(input))
)
def batch_observations(
observations: List[Dict], device: Optional[torch.device] = None
) -> Dict[str, Union[Dict, torch.Tensor]]:
"""Transpose a batch of observation dicts to a dict of batched
observations.
# Arguments
observations : List of dicts of observations.
device : The torch.device to put the resulting tensors on.
Will not move the tensors if None.
# Returns
Transposed dict of lists of observations.
"""
def dict_from_observation(
observation: Dict[str, Any]
) -> Dict[str, Union[Dict, List]]:
batch_dict: DefaultDict = defaultdict(list)
for sensor in observation:
if isinstance(observation[sensor], Dict):
batch_dict[sensor] = dict_from_observation(observation[sensor])
else:
batch_dict[sensor].append(to_tensor(observation[sensor]))
return batch_dict
def fill_dict_from_observations(
input_batch: Any, observation: Dict[str, Any]
) -> None:
for sensor in observation:
if isinstance(observation[sensor], Dict):
fill_dict_from_observations(input_batch[sensor], observation[sensor])
else:
input_batch[sensor].append(to_tensor(observation[sensor]))
def dict_to_batch(input_batch: Any) -> None:
for sensor in input_batch:
if isinstance(input_batch[sensor], Dict):
dict_to_batch(input_batch[sensor])
else:
input_batch[sensor] = torch.stack(
[batch.to(device=device) for batch in input_batch[sensor]], dim=0
)
if len(observations) == 0:
return cast(Dict[str, Union[Dict, torch.Tensor]], observations)
batch = dict_from_observation(observations[0])
for obs in observations[1:]:
fill_dict_from_observations(batch, obs)
dict_to_batch(batch)
return cast(Dict[str, Union[Dict, torch.Tensor]], batch)
def to_tensor(v) -> torch.Tensor:
"""Return a torch.Tensor version of the input.
# Parameters
v : Input values that can be coerced into being a tensor.
# Returns
A tensor version of the input.
"""
if torch.is_tensor(v):
return v
elif isinstance(v, np.ndarray):
return torch.from_numpy(v)
else:
return torch.tensor(
v, dtype=torch.int64 if isinstance(v, numbers.Integral) else torch.float
)
def tile_images(images: List[np.ndarray]) -> np.ndarray:
"""Tile multiple images into single image.
# Parameters
images : list of images where each image has dimension
(height x width x channels)
# Returns
Tiled image (new_height x width x channels).
"""
assert len(images) > 0, "empty list of images"
np_images = np.asarray(images)
n_images, height, width, n_channels = np_images.shape
new_height = int(np.ceil(np.sqrt(n_images)))
new_width = int(np.ceil(float(n_images) / new_height))
# pad with empty images to complete the rectangle
np_images = np.array(
images + [images[0] * 0 for _ in range(n_images, new_height * new_width)]
)
# img_HWhwc
out_image = np_images.reshape((new_height, new_width, height, width, n_channels))
# img_HhWwc
out_image = out_image.transpose(0, 2, 1, 3, 4)
# img_Hh_Ww_c
out_image = out_image.reshape((new_height * height, new_width * width, n_channels))
return out_image
class SummaryWriter(TBXSummaryWriter):
@staticmethod
def _video(tag, vid):
# noinspection PyProtectedMember
tag = tbxsummary._clean_tag(tag)
return TBXSummary(value=[TBXSummary.Value(tag=tag, image=vid)])
def add_vid(self, tag, vid, global_step=None, walltime=None):
self._get_file_writer().add_summary(
self._video(tag, vid), global_step, walltime
)
def add_image(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW"
):
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def image(tag, tensor, rescale=1, dataformats="CHW"):
"""Outputs a `Summary` protocol buffer with images. The summary has up to
`max_images` summary values containing images. The images are built from
`tensor` which must be 3-D with shape `[height, width, channels]` and where
`channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
# Parameters
tag: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
channels]` where `channels` is 1, 3, or 4.
'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).
The image() function will scale the image values to [0, 255] by applying
a scale factor of either 1 (uint8) or 255 (float32).
rescale: The scale.
dataformats: Input image shape format.
# Returns
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# noinspection PyProtectedMember
tag = tbxsummary._clean_tag(tag)
tensor = tbxmake_np(tensor)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
img = tbxsummary.make_image(tensor, rescale=rescale)
return TBXSummary(value=[TBXSummary.Value(tag=tag, image=img)])
def convert_to_HWC(tensor, input_format): # tensor: numpy array
assert len(set(input_format)) == len(
input_format
), "You can not use the same dimension shordhand twice. \
input_format: {}".format(
input_format
)
assert len(tensor.shape) == len(
input_format
), "size of input tensor and input format are different. \
tensor shape: {}, input_format: {}".format(
tensor.shape, input_format
)
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in "NCHW"]
tensor_NCHW = tensor.transpose(index)
tensor_CHW = make_grid(tensor_NCHW)
# noinspection PyTypeChecker
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in "HWC"]
tensor_HWC = tensor.transpose(index)
if tensor_HWC.shape[2] == 1:
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2)
return tensor_HWC
if len(input_format) == 2:
index = [input_format.find(c) for c in "HW"]
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
def make_grid(I, ncols=8):
# I: N1HW or N3HW
assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here"
if I.shape[1] == 1:
I = np.concatenate([I, I, I], 1)
assert I.ndim == 4 and I.shape[1] == 3 or I.shape[1] == 4
nimg = I.shape[0]
H = I.shape[2]
W = I.shape[3]
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((I.shape[1], H * nrows, W * ncols), dtype=I.dtype)
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i]
i = i + 1
return canvas
def tensor_to_video(tensor, fps=4):
tensor = tbxmake_np(tensor)
tensor = tbx_prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
return tbxsummary.make_video(tensor, fps)
def tensor_to_clip(tensor, fps=4):
tensor = tbxmake_np(tensor)
tensor = tbx_prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
t, h, w, c = tensor.shape
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
return clip, (h, w, c)
def clips_to_video(clips, h, w, c):
# encode sequence of images into gif string
clip = concatenate_videoclips(clips)
filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
# moviepy >= 1.0.0 use logger=None to suppress output.
try:
clip.write_gif(filename, verbose=False, logger=None)
except TypeError:
get_logger().warning(
"Upgrade to moviepy >= 1.0.0 to suppress the progress bar."
)
clip.write_gif(filename, verbose=False)
with open(filename, "rb") as f:
tensor_string = f.read()
try:
os.remove(filename)
except OSError:
get_logger().warning("The temporary file used by moviepy cannot be deleted.")
return TBXSummary.Image(
height=h, width=w, colorspace=c, encoded_image_string=tensor_string
)
def process_video(render, max_clip_len=500, max_video_len=-1, fps=4):
output = []
hwc = None
if len(render) > 0:
if len(render) > max_video_len > 0:
get_logger().warning(
"Clipping video to first {} frames out of {} original frames".format(
max_video_len, len(render)
)
)
render = render[:max_video_len]
for clipstart in range(0, len(render), max_clip_len):
clip = render[clipstart : clipstart + max_clip_len]
try:
current = np.stack(clip, axis=0) # T, H, W, C
current = current.transpose((0, 3, 1, 2)) # T, C, H, W
current = np.expand_dims(current, axis=0) # 1, T, C, H, W
current, cur_hwc = tensor_to_clip(current, fps=fps)
if hwc is None:
hwc = cur_hwc
else:
assert (
hwc == cur_hwc
), "Inconsistent clip shape: previous {} current {}".format(
hwc, cur_hwc
)
output.append(current)
except MemoryError:
get_logger().error(
"Skipping video due to memory error with clip of length {}".format(
len(clip)
)
)
return None
else:
get_logger().warning("Calling process_video with 0 frames")
return None
assert len(output) > 0, "No clips to concatenate"
assert hwc is not None, "No tensor dims assigned"
try:
result = clips_to_video(output, *hwc)
except MemoryError:
get_logger().error("Skipping video due to memory error calling clips_to_video")
result = None
return result
class ScaleBothSides(object):
"""Rescales the input PIL.Image to the given 'width' and `height`.
Attributes
width: new width
height: new height
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, width: int, height: int, interpolation=Image.BILINEAR):
self.width = width
self.height = height
self.interpolation = interpolation
def __call__(self, img: PIL.Image) -> PIL.Image:
return img.resize((self.width, self.height), self.interpolation)
| allenact-main | allenact/utils/tensor_utils.py |
import math
from typing import Dict, Any, Union, Callable, Optional
from allenact.utils.system import get_logger
def pos_to_str_for_cache(pos: Dict[str, float]) -> str:
return "_".join([str(pos["x"]), str(pos["y"]), str(pos["z"])])
def str_to_pos_for_cache(s: str) -> Dict[str, float]:
split = s.split("_")
return {"x": float(split[0]), "y": float(split[1]), "z": float(split[2])}
def get_distance(
cache: Dict[str, Any], pos: Dict[str, float], target: Dict[str, float]
) -> float:
pos = {
"x": 0.25 * math.ceil(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.ceil(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.floor(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.ceil(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.ceil(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.floor(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = {
"x": 0.25 * math.floor(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * math.floor(pos["z"] / 0.25),
}
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
pos = find_nearest_point_in_cache(cache, pos)
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
target = find_nearest_point_in_cache(cache, target)
sp = _get_shortest_path_distance_from_cache(cache, pos, target)
if sp == -1.0:
print("Your cache is incomplete!")
exit()
return sp
def get_distance_to_object(
cache: Dict[str, Any], pos: Dict[str, float], target_class: str
) -> float:
dists = []
weights = []
for rounder_func_0 in [math.ceil, math.floor]:
for rounder_func_1 in [math.ceil, math.floor]:
rounded_pos = {
"x": 0.25 * rounder_func_0(pos["x"] / 0.25),
"y": pos["y"],
"z": 0.25 * rounder_func_1(pos["z"] / 0.25),
}
dist = _get_shortest_path_distance_to_object_from_cache(
cache, rounded_pos, target_class
)
if dist >= 0:
dists.append(dist)
weights.append(
1.0
/ (
math.sqrt(
(pos["x"] - rounded_pos["x"]) ** 2
+ (pos["z"] - rounded_pos["z"]) ** 2
)
+ 1e6
)
)
if len(dists) == 0:
raise RuntimeError("Your cache is incomplete!")
total_weight = sum(weights)
weights = [w / total_weight for w in weights]
return sum(d * w for d, w in zip(dists, weights))
def _get_shortest_path_distance_from_cache(
cache: Dict[str, Any], position: Dict[str, float], target: Dict[str, float]
) -> float:
try:
return cache[pos_to_str_for_cache(position)][pos_to_str_for_cache(target)][
"distance"
]
except KeyError:
return -1.0
def _get_shortest_path_distance_to_object_from_cache(
cache: Dict[str, Any], position: Dict[str, float], target_class: str
) -> float:
try:
return cache[pos_to_str_for_cache(position)][target_class]["distance"]
except KeyError:
return -1.0
def find_nearest_point_in_cache(
cache: Dict[str, Any], point: Dict[str, float]
) -> Dict[str, float]:
best_delta = float("inf")
closest_point: Dict[str, float] = {}
for p in cache:
pos = str_to_pos_for_cache(p)
delta = (
abs(point["x"] - pos["x"])
+ abs(point["y"] - pos["y"])
+ abs(point["z"] - pos["z"])
)
if delta < best_delta:
best_delta = delta
closest_point = pos
return closest_point
class DynamicDistanceCache(object):
def __init__(self, rounding: Optional[int] = None):
self.cache: Dict[str, Any] = {}
self.rounding = rounding
self.hits = 0
self.misses = 0
self.num_accesses = 0
def find_distance(
self,
scene_name: str,
position: Dict[str, Any],
target: Union[Dict[str, Any], str],
native_distance_function: Callable[
[Dict[str, Any], Union[Dict[str, Any], str]], float
],
) -> float:
# Convert the position to its rounded string representation
position_str = scene_name + self._pos_to_str(position)
# If the target is also a position, convert it to its rounded string representation
if isinstance(target, str):
target_str = target
else:
target_str = self._pos_to_str(target)
if position_str not in self.cache:
self.cache[position_str] = {}
if target_str not in self.cache[position_str]:
self.cache[position_str][target_str] = native_distance_function(
position, target
)
self.misses += 1
else:
self.hits += 1
self.num_accesses += 1
if self.num_accesses % 1000 == 0:
get_logger().debug("Cache Miss-Hit Ratio: %.4f" % (self.misses / self.hits))
return self.cache[position_str][target_str]
def invalidate(self):
self.cache = []
def _pos_to_str(self, pos: Dict[str, Any]) -> str:
if self.rounding:
pos = {k: round(v, self.rounding) for k, v in pos.items()}
return str(pos)
| allenact-main | allenact/utils/cache_utils.py |
from typing import Optional, cast, Tuple, Any, Dict
import attr
import torch
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.algorithms.onpolicy_sync.storage import RolloutStorage
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import (
Memory,
ObservationType,
ActorCriticOutput,
DistributionType,
)
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.utils import spaces_utils as su
from allenact.utils.tensor_utils import batch_observations
@attr.s(kw_only=True)
class InferenceAgent:
actor_critic: ActorCriticModel = attr.ib()
rollout_storage: RolloutStorage = attr.ib()
device: torch.device = attr.ib()
sensor_preprocessor_graph: Optional[SensorPreprocessorGraph] = attr.ib()
steps_before_rollout_refresh: int = attr.ib(default=128)
memory: Optional[Memory] = attr.ib(default=None)
steps_taken_in_task: int = attr.ib(default=0)
last_action_flat: Optional = attr.ib(default=None)
has_initialized: Optional = attr.ib(default=False)
def __attrs_post_init__(self):
self.actor_critic.eval()
self.actor_critic.to(device=self.device)
if self.memory is not None:
self.memory.to(device=self.device)
if self.sensor_preprocessor_graph is not None:
self.sensor_preprocessor_graph.to(self.device)
self.rollout_storage.to(self.device)
self.rollout_storage.set_partition(index=0, num_parts=1)
@classmethod
def from_experiment_config(
cls,
exp_config: ExperimentConfig,
device: torch.device,
checkpoint_path: Optional[str] = None,
model_state_dict: Optional[Dict[str, Any]] = None,
mode: str = "test",
):
assert (
checkpoint_path is None or model_state_dict is None
), "Cannot have `checkpoint_path` and `model_state_dict` both non-None."
rollout_storage = exp_config.training_pipeline().rollout_storage
machine_params = exp_config.machine_params(mode)
if not isinstance(machine_params, MachineParams):
machine_params = MachineParams(**machine_params)
sensor_preprocessor_graph = machine_params.sensor_preprocessor_graph
actor_critic = cast(
ActorCriticModel,
exp_config.create_model(
sensor_preprocessor_graph=sensor_preprocessor_graph
),
)
if checkpoint_path is not None:
actor_critic.load_state_dict(
torch.load(checkpoint_path, map_location="cpu")["model_state_dict"]
)
elif model_state_dict is not None:
actor_critic.load_state_dict(
model_state_dict
if "model_state_dict" not in model_state_dict
else model_state_dict["model_state_dict"]
)
return cls(
actor_critic=actor_critic,
rollout_storage=rollout_storage,
device=device,
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
def reset(self):
if self.has_initialized:
self.rollout_storage.after_updates()
self.steps_taken_in_task = 0
self.memory = None
def act(self, observations: ObservationType):
# Batch of size 1
obs_batch = batch_observations([observations], device=self.device)
if self.sensor_preprocessor_graph is not None:
obs_batch = self.sensor_preprocessor_graph.get_observations(obs_batch)
if self.steps_taken_in_task == 0:
self.has_initialized = True
self.rollout_storage.initialize(
observations=obs_batch,
num_samplers=1,
recurrent_memory_specification=self.actor_critic.recurrent_memory_specification,
action_space=self.actor_critic.action_space,
)
self.rollout_storage.after_updates()
else:
dummy_val = torch.zeros((1, 1), device=self.device) # Unused dummy value
self.rollout_storage.add(
observations=obs_batch,
memory=self.memory,
actions=self.last_action_flat[0],
action_log_probs=dummy_val,
value_preds=dummy_val,
rewards=dummy_val,
masks=torch.ones(
(1, 1), device=self.device
), # Always == 1 as we're in a single task until `reset`
)
agent_input = self.rollout_storage.agent_input_for_next_step()
actor_critic_output, self.memory = cast(
Tuple[ActorCriticOutput[DistributionType], Optional[Memory]],
self.actor_critic(**agent_input),
)
action = actor_critic_output.distributions.sample()
self.last_action_flat = su.flatten(self.actor_critic.action_space, action)
self.steps_taken_in_task += 1
if self.steps_taken_in_task % self.steps_before_rollout_refresh == 0:
self.rollout_storage.after_updates()
return su.action_list(self.actor_critic.action_space, self.last_action_flat)[0]
| allenact-main | allenact/utils/inference.py |
import os
import sys
from pathlib import Path
from subprocess import getoutput
def make_package(name, verbose=False):
"""Prepares sdist for allenact or allenact_plugins."""
orig_dir = os.getcwd()
base_dir = os.path.join(os.path.abspath(os.path.dirname(Path(__file__))), "..")
os.chdir(base_dir)
with open(".VERSION", "r") as f:
__version__ = f.readline().strip()
# generate sdist via setuptools
output = getoutput(f"{sys.executable} {name}/setup.py sdist")
if verbose:
print(output)
os.chdir(os.path.join(base_dir, "dist"))
# uncompress the tar.gz sdist
output = getoutput(f"tar zxvf {name}-{__version__}.tar.gz")
if verbose:
print(output)
# copy setup.py to the top level of the package (required by pip install)
output = getoutput(
f"cp {name}-{__version__}/{name}/setup.py {name}-{__version__}/setup.py"
)
if verbose:
print(output)
# create new source file with version
getoutput(
f"printf '__version__ = \"{__version__}\"\n' >> {name}-{__version__}/{name}/_version.py"
)
# include it in sources
getoutput(
f'printf "\n{name}/_version.py" >> {name}-{__version__}/{name}.egg-info/SOURCES.txt'
)
# recompress tar.gz
output = getoutput(f"tar zcvf {name}-{__version__}.tar.gz {name}-{__version__}/")
if verbose:
print(output)
# remove temporary directory
output = getoutput(f"rm -r {name}-{__version__}")
if verbose:
print(output)
os.chdir(orig_dir)
if __name__ == "__main__":
verbose = False
make_package("allenact", verbose)
make_package("allenact_plugins", verbose)
| allenact-main | scripts/release.py |
#!/usr/bin/env python3
"""Tool to run command on multiple nodes through SSH."""
import argparse
import glob
import os
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dcommand", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--runs_on",
required=False,
type=str,
default=None,
help="Comma-separated IP addresses of machines. If empty, the tool will scan for lists of IP addresses"
" in `screen_ids_file`s in the `~/.allenact` directory.",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i path/to/mykey.pem ubuntu@{addr}'.",
)
parser.add_argument(
"--command",
required=False,
default="nvidia-smi | head -n 35",
type=str,
help="Command to be run through ssh onto each machine",
)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def wrap_double(text):
return f'"{text}"'
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text, quote=r"'\''"):
return f"{quote}{text}{quote}"
if __name__ == "__main__":
args = get_args()
all_addresses = []
if args.runs_on is not None:
all_addresses = args.runs_on.split(",")
else:
all_files = sorted(
glob.glob(os.path.join(os.path.expanduser("~"), ".allenact", "*.killfile")),
reverse=True,
)
if len(all_files) == 0:
print(
f"No screen_ids_file found under {os.path.join(os.path.expanduser('~'), '.allenact')}"
)
for killfile in all_files:
with open(killfile, "r") as f:
# Each line contains 'IP_address screen_ID'
nodes = [tuple(line[:-1].split(" ")) for line in f.readlines()]
all_addresses.extend(node[0] for node in nodes)
use_addresses = ""
while use_addresses not in ["y", "n"]:
use_addresses = input(
f"Run on {all_addresses} from {killfile}? [Y/n] "
).lower()
if use_addresses == "":
use_addresses = "y"
if use_addresses == "n":
all_addresses.clear()
else:
break
print(f"Running on IP addresses {all_addresses}")
for it, addr in enumerate(all_addresses):
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {wrap_single(args.command)}"
print(f"{it} {addr} SSH command {ssh_command}")
os.system(ssh_command)
print("DONE")
| allenact-main | scripts/dcommand.py |
import glob
import os
import shutil
import sys
from pathlib import Path
from subprocess import check_output
from threading import Thread
from typing import Dict, Union, Optional, Set, List, Sequence, Mapping
from git import Git
from ruamel.yaml import YAML # type: ignore
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
# TODO: the scripts directory shouldn't be a module (as it conflicts with
# some local developmment workflows) but we do want to import scripts/literate.py.
# Temporary solution is just to modify the sys.path when this script is run.
sys.path.append(os.path.abspath(os.path.dirname(Path(__file__))))
from literate import literate_python_to_markdown
class StringColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
exclude_files = [
".DS_Store",
"__init__.py",
"__init__.pyc",
"README.md",
"version.py",
"run.py",
"setup.py",
"main.py",
]
def render_file(
relative_src_path: str, src_file: str, to_file: str, modifier=""
) -> None:
"""Shells out to pydocmd, which creates a .md file from the docstrings of
python functions and classes in the file we specify.
The modifer specifies the depth at which to generate docs for
classes and functions in the file. More information here:
https://pypi.org/project/pydoc-markdown/
"""
# First try literate
was_literate = False
try:
was_literate = literate_python_to_markdown(
path=os.path.join(relative_src_path, src_file)
)
except Exception as _:
pass
if was_literate:
return
# Now do standard pydocmd
relative_src_namespace = relative_src_path.replace("/", ".")
src_base = src_file.replace(".py", "")
if relative_src_namespace == "":
namespace = f"{src_base}{modifier}"
else:
namespace = f"{relative_src_namespace}.{src_base}{modifier}"
pydoc_config = """'{
renderer: {
type: markdown,
code_headers: true,
descriptive_class_title: false,
add_method_class_prefix: true,
source_linker: {type: github, repo: allenai/allenact},
header_level_by_type: {
Module: 1,
Class: 2,
Method: 3,
Function: 3,
Data: 3,
}
}
}'"""
pydoc_config = " ".join(pydoc_config.split())
args = ["pydoc-markdown", "-m", namespace, pydoc_config]
try:
call_result = check_output([" ".join(args)], shell=True, env=os.environ).decode(
"utf-8"
)
# noinspection PyShadowingNames
with open(to_file, "w") as f:
doc_split = call_result.split("\n")
# github_path = "https://github.com/allenai/allenact/tree/master/"
# path = (
# github_path + namespace.replace(".", "/") + ".py"
# )
# mdlink = "[[source]]({})".format(path)
mdlink = "" # Removing the above source link for now.
call_result = "\n".join([doc_split[0] + " " + mdlink] + doc_split[1:])
call_result = call_result.replace("_DOC_COLON_", ":")
f.write(call_result)
print(
f"{StringColors.OKGREEN}[SUCCESS]{StringColors.ENDC} built docs for {src_file} -> {to_file}."
)
except Exception as _:
cmd = " ".join(args)
print(
f"{StringColors.WARNING}[SKIPPING]{StringColors.ENDC} could not"
f" build docs for {src_file} (missing an import?). CMD: '{cmd}'"
)
# noinspection PyShadowingNames
def build_docs_for_file(
relative_path: str, file_name: str, docs_dir: str, threads: List
) -> Dict[str, str]:
"""Build docs for an individual python file."""
clean_filename = file_name.replace(".py", "")
markdown_filename = f"{clean_filename}.md"
output_path = os.path.join(docs_dir, relative_path, markdown_filename)
nav_path = os.path.join("api", relative_path, markdown_filename)
thread = Thread(target=render_file, args=(relative_path, file_name, output_path))
thread.start()
threads.append(thread)
return {os.path.basename(clean_filename): nav_path}
# noinspection PyShadowingNames
def build_docs(
base_dir: Union[Path, str],
root_path: Union[Path, str],
docs_dir: Union[Path, str],
threads: List,
allowed_dirs: Optional[Set[str]] = None,
):
base_dir, root_path, docs_dir = str(base_dir), str(root_path), str(docs_dir)
nav_root = []
for child in os.listdir(root_path):
relative_path = os.path.join(root_path, child)
if (
(allowed_dirs is not None)
and (os.path.isdir(relative_path))
and (os.path.abspath(relative_path) not in allowed_dirs)
# or ".git" in relative_path
# or ".idea" in relative_path
# or "__pycache__" in relative_path
# or "tests" in relative_path
# or "mypy_cache" in relative_path
):
print("SKIPPING {}".format(relative_path))
continue
# without_allenact = str(root_path).replace("allenact/", "")
new_path = os.path.relpath(root_path, base_dir).replace(".", "")
target_dir = os.path.join(docs_dir, new_path)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
if os.path.isdir(relative_path):
nav_subsection = build_docs(
base_dir,
relative_path,
docs_dir,
threads=threads,
allowed_dirs=allowed_dirs,
)
if not nav_subsection:
continue
nav_root.append({child: nav_subsection})
else:
if child in exclude_files or not child.endswith(".py"):
continue
nav = build_docs_for_file(new_path, child, docs_dir, threads=threads)
nav_root.append(nav)
return nav_root
def project_readme_paths_to_nav_structure(project_readmes):
nested_dict = {}
for fp in project_readmes:
has_seen_project_dir = False
sub_nested_dict = nested_dict
split_fp = os.path.dirname(fp).split("/")
for i, yar in enumerate(split_fp):
has_seen_project_dir = has_seen_project_dir or yar == "projects"
if not has_seen_project_dir or yar == "projects":
continue
if yar not in sub_nested_dict:
if i == len(split_fp) - 1:
sub_nested_dict[yar] = fp.replace("docs/", "")
break
else:
sub_nested_dict[yar] = {}
sub_nested_dict = sub_nested_dict[yar]
def recursively_create_nav_structure(nested_dict):
if isinstance(nested_dict, str):
return nested_dict
to_return = []
for key in nested_dict:
to_return.append({key: recursively_create_nav_structure(nested_dict[key])})
return to_return
return recursively_create_nav_structure(nested_dict)
def pruned_nav_entries(nav_entries):
if isinstance(nav_entries, str):
if os.path.exists(os.path.join("docs", nav_entries)):
return nav_entries
else:
return None
elif isinstance(nav_entries, Sequence):
new_entries = []
for entry in nav_entries:
entry = pruned_nav_entries(entry)
if entry:
new_entries.append(entry)
return new_entries
elif isinstance(nav_entries, Mapping):
new_entries = {}
for k, entry in nav_entries.items():
entry = pruned_nav_entries(entry)
if entry:
new_entries[k] = entry
return new_entries
else:
raise NotImplementedError()
def main():
os.chdir(ABS_PATH_OF_TOP_LEVEL_DIR)
print("Copying all README.md files to docs.")
with open("README.md") as f:
readme_content = f.readlines()
readme_content = [x.replace("docs/", "") for x in readme_content]
with open("docs/index.md", "w") as f:
f.writelines(readme_content)
project_readmes = []
for readme_file_path in glob.glob("projects/**/README.md", recursive=True):
if "docs/" not in readme_file_path:
new_path = os.path.join("docs", readme_file_path)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
shutil.copy(readme_file_path, new_path)
project_readmes.append(new_path)
print("Copying LICENSE file to docs.")
shutil.copy("LICENSE", "docs/LICENSE.md")
print("Copying CONTRIBUTING.md file to docs.")
shutil.copy("CONTRIBUTING.md", "docs/CONTRIBUTING.md")
# print("Copying CNAME file to docs.")
# shutil.copy("CNAME", "docs/CNAME")
print("Building the docs.")
parent_folder_path = Path(__file__).parent.parent
yaml_path = parent_folder_path / "mkdocs.yml"
source_path = parent_folder_path
docs_dir = str(parent_folder_path / "docs" / "api")
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
# Adding project readmes to the yaml
yaml = YAML()
mkdocs_yaml = yaml.load(yaml_path)
site_nav = mkdocs_yaml["nav"]
# TODO Find a way to do the following in a way that results in nice titles.
# projects_key = "Projects using allenact"
# nav_obj = None
# for obj in site_nav:
# if projects_key in obj:
# nav_obj = obj
# break
# nav_obj[projects_key] = project_readme_paths_to_nav_structure(project_readmes)
with open(yaml_path, "w") as f:
yaml.dump(mkdocs_yaml, f)
# Get directories to ignore
git_dirs = set(
os.path.abspath(os.path.split(p)[0]) for p in Git(".").ls_files().split("\n")
)
ignore_rel_dirs = [
"docs",
"scripts",
"experiments",
"src",
".pip_src",
"dist",
"build",
]
ignore_abs_dirs = set(
os.path.abspath(os.path.join(str(parent_folder_path), rel_dir))
for rel_dir in ignore_rel_dirs
)
for d in ignore_abs_dirs:
if d in git_dirs:
git_dirs.remove(d)
threads: List = []
nav_entries = build_docs(
parent_folder_path,
source_path,
docs_dir,
threads=threads,
allowed_dirs=git_dirs,
)
nav_entries.sort(key=lambda x: list(x)[0], reverse=False)
for thread in threads:
thread.join()
nav_entries = pruned_nav_entries(nav_entries)
docs_key = "API"
# Find the yaml corresponding to the API
nav_obj = None
for obj in site_nav:
if docs_key in obj:
nav_obj = obj
break
nav_obj[docs_key] = nav_entries
with open(yaml_path, "w") as f:
yaml.dump(mkdocs_yaml, f)
if __name__ == "__main__":
main()
| allenact-main | scripts/build_docs.py |
#!/usr/bin/env python3
import os
import argparse
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dconfig", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--runs_on",
required=True,
type=str,
help="Comma-separated IP addresses of machines",
)
parser.add_argument(
"--config_script",
required=True,
type=str,
help="Path to bash script with configuration",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh -f {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i path/to/mykey.pem -f ubuntu@{addr}'. "
"The option `-f` should be used, since we want a non-interactive session",
)
parser.add_argument(
"--distribute_public_rsa_key",
dest="distribute_public_rsa_key",
action="store_true",
required=False,
help="if you pass the `--distribute_public_rsa_key` flag, the manager node's public key will be added to the "
"authorized keys of all workers (this is necessary in default-configured EC2 instances to use "
"`scripts/dmain.py`)",
)
parser.set_defaults(distribute_public_rsa_key=False)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def wrap_double(text):
return f'"{text}"'
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text, quote=r"'\''"):
return f"{quote}{text}{quote}"
if __name__ == "__main__":
args = get_args()
all_addresses = args.runs_on.split(",")
print(f"Running on addresses {all_addresses}")
remote_config_script = f"{args.config_script}.distributed"
for it, addr in enumerate(all_addresses):
if args.distribute_public_rsa_key:
key_command = (
f"{args.ssh_cmd.format(addr=addr)} "
f"{wrap_double('echo $(cat ~/.ssh/id_rsa.pub) >> ~/.ssh/authorized_keys')}"
)
print(f"Key command {key_command}")
os.system(f"{key_command}")
scp_cmd = (
args.ssh_cmd.replace("ssh ", "scp ")
.replace("-f", args.config_script)
.format(addr=addr)
)
print(f"SCP command {scp_cmd}:{remote_config_script}")
os.system(f"{scp_cmd}:{remote_config_script}")
screen_name = f"allenact_config_machine{it}"
bash_command = wrap_single_nested(
f"source {remote_config_script} &>> log_allenact_distributed_config"
)
screen_command = wrap_single(
f"screen -S {screen_name} -dm bash -c {bash_command}"
)
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {screen_command}"
print(f"SSH command {ssh_command}")
os.system(ssh_command)
print(f"{addr} {screen_name}")
print("DONE")
| allenact-main | scripts/dconfig.py |
#!/usr/bin/env python3
"""Tool to terminate multi-node (distributed) training."""
import os
import argparse
import glob
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dkill", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--screen_ids_file",
required=False,
type=str,
default=None,
help="Path to file generated by dmain.py with IPs and screen ids for nodes running process."
" If empty, the tool will scan the `~/.allenact` directory for `screen_ids_file`s.",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i mykey.pem ubuntu@{addr}'. ",
)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
all_files = (
[args.screen_ids_file]
if args.screen_ids_file is not None
else sorted(
glob.glob(os.path.join(os.path.expanduser("~"), ".allenact", "*.killfile")),
reverse=True,
)
)
if len(all_files) == 0:
print(
f"No screen_ids_file found under {os.path.join(os.path.expanduser('~'), '.allenact')}"
)
for killfile in all_files:
with open(killfile, "r") as f:
nodes = [tuple(line[:-1].split(" ")) for line in f.readlines()]
do_kill = ""
while do_kill not in ["y", "n"]:
do_kill = input(
f"Stopping processes on {nodes} from {killfile}? [y/N] "
).lower()
if do_kill == "":
do_kill = "n"
if do_kill == "y":
for it, node in enumerate(nodes):
addr, screen_name = node
print(f"Killing screen {screen_name} on {addr}")
ssh_command = (
f"{args.ssh_cmd.format(addr=addr)} '"
f"screen -S {screen_name} -p 0 -X quit ; "
f"sleep 1 ; "
f"echo Master processes left running: ; "
f"ps aux | grep Master: | grep -v grep ; "
f"echo ; "
f"'"
)
# print(f"SSH command {ssh_command}")
os.system(ssh_command)
do_delete = ""
while do_delete not in ["y", "n"]:
do_delete = input(f"Delete file {killfile}? [y/N] ").lower()
if do_delete == "":
do_delete = "n"
if do_delete == "y":
os.system(f"rm {killfile}")
print(f"Deleted {killfile}")
print("DONE")
| allenact-main | scripts/dkill.py |
"""Helper functions used to create literate documentation from python files."""
import importlib
import inspect
import os
from typing import Optional, Sequence, List, cast
from typing.io import TextIO
from constants import ABS_PATH_OF_DOCS_DIR, ABS_PATH_OF_TOP_LEVEL_DIR
def get_literate_output_path(file: TextIO) -> Optional[str]:
for l in file:
l = l.strip()
if l != "":
if l.lower().startswith(("# literate", "#literate")):
parts = l.split(":")
if len(parts) == 1:
assert (
file.name[-3:].lower() == ".py"
), "Can only run literate on python (*.py) files."
return file.name[:-3] + ".md"
elif len(parts) == 2:
rel_outpath = parts[1].strip()
outpath = os.path.abspath(
os.path.join(ABS_PATH_OF_DOCS_DIR, rel_outpath)
)
assert outpath.startswith(
ABS_PATH_OF_DOCS_DIR
), f"Path {outpath} is not allowed, must be within {ABS_PATH_OF_DOCS_DIR}."
return outpath
else:
raise NotImplementedError(
f"Line '{l}' is not of the correct format."
)
else:
return None
return None
def source_to_markdown(dot_path: str, summarize: bool = False):
importlib.invalidate_caches()
module_path, obj_name = ".".join(dot_path.split(".")[:-1]), dot_path.split(".")[-1]
module = importlib.import_module(module_path)
obj = getattr(module, obj_name)
source = inspect.getsource(obj)
if not summarize:
return source
elif inspect.isclass(obj):
lines = source.split("\n")
newlines = [lines[0]]
whitespace_len = float("inf")
k = 1
started = False
while k < len(lines):
l = lines[k]
lstripped = l.lstrip()
if started:
newlines.append(l)
started = "):" not in l and "->" not in l
if not started:
newlines.append(l[: cast(int, whitespace_len)] + " ...\n")
if (
l.lstrip().startswith("def ")
and len(l) - len(lstripped) <= whitespace_len
):
whitespace_len = len(l) - len(lstripped)
newlines.append(l)
started = "):" not in l and "->" not in l
if not started:
newlines.append(l[:whitespace_len] + " ...\n")
k += 1
return "\n".join(newlines).strip()
elif inspect.isfunction(obj):
return source.split("\n")[0] + "\n ..."
else:
return
def _strip_empty_lines(lines: Sequence[str]) -> List[str]:
lines = list(lines)
if len(lines) == 0:
return lines
for i in range(len(lines)):
if lines[i].strip() != "":
lines = lines[i:]
break
for i in reversed(list(range(len(lines)))):
if lines[i].strip() != "":
lines = lines[: i + 1]
break
return lines
def literate_python_to_markdown(path: str) -> bool:
assert path[-3:].lower() == ".py", "Can only run literate on python (*.py) files."
with open(path, "r") as file:
output_path = get_literate_output_path(file)
if output_path is None:
return False
output_lines = [
f"<!-- DO NOT EDIT THIS FILE. --> ",
f"<!-- THIS FILE WAS AUTOGENERATED FROM"
f" 'ALLENACT_BASE_DIR/{os.path.relpath(path, ABS_PATH_OF_TOP_LEVEL_DIR)}', EDIT IT INSTEAD. -->\n",
]
md_lines: List[str] = []
code_lines = md_lines
lines = file.readlines()
mode = None
for line in lines:
line = line.rstrip()
stripped_line = line.strip()
if (mode is None or mode == "change") and line.strip() == "":
continue
if mode == "markdown":
if stripped_line in ['"""', "'''"]:
output_lines.extend(_strip_empty_lines(md_lines) + [""])
md_lines.clear()
mode = None
elif stripped_line.endswith(('"""', "'''")):
output_lines.extend(
_strip_empty_lines(md_lines) + [stripped_line[:-3]]
)
md_lines.clear()
mode = None
# TODO: Does not account for the case where a string is ended with a comment.
else:
md_lines.append(line.strip())
elif stripped_line.startswith(("# %%", "#%%")):
last_mode = mode
mode = "change"
if last_mode == "code":
output_lines.extend(
["```python"] + _strip_empty_lines(code_lines) + ["```"]
)
code_lines.clear()
if " import " in stripped_line:
path = stripped_line.split(" import ")[-1].strip()
output_lines.append(
"```python\n" + source_to_markdown(path) + "\n```"
)
elif " import_summary " in stripped_line:
path = stripped_line.split(" import_summary ")[-1].strip()
output_lines.append(
"```python\n"
+ source_to_markdown(path, summarize=True)
+ "\n```"
)
elif " hide" in stripped_line:
mode = "hide"
elif mode == "hide":
continue
elif mode == "change":
if stripped_line.startswith(('"""', "'''")):
mode = "markdown"
if len(stripped_line) != 3:
if stripped_line.endswith(('"""', "'''")):
output_lines.append(stripped_line[3:-3])
mode = "change"
else:
output_lines.append(stripped_line[3:])
else:
mode = "code"
code_lines.append(line)
elif mode == "code":
code_lines.append(line)
else:
raise NotImplementedError(
f"mode {mode} is not implemented. Last 5 lines: "
+ "\n".join(output_lines[-5:])
)
if mode == "code" and len(code_lines) != 0:
output_lines.extend(
["```python"] + _strip_empty_lines(code_lines) + ["```"]
)
with open(output_path, "w") as f:
f.writelines([l + "\n" for l in output_lines])
return True
if __name__ == "__main__":
# print(
# source_to_markdown(
# "allenact_plugins.minigrid_plugin.minigrid_offpolicy.ExpertTrajectoryIterator",
# True
# )
# )
literate_python_to_markdown(
os.path.join(
ABS_PATH_OF_TOP_LEVEL_DIR,
"projects/tutorials/training_a_pointnav_model.py",
)
)
| allenact-main | scripts/literate.py |
import atexit
import os
import platform
import re
import shlex
import subprocess
import tempfile
# Turning off automatic black formatting for this script as it breaks quotes.
# fmt: off
def pci_records():
records = []
command = shlex.split("lspci -vmm")
output = subprocess.check_output(command).decode()
for devices in output.strip().split("\n\n"):
record = {}
records.append(record)
for row in devices.split("\n"):
key, value = row.split("\t")
record[key.split(":")[0]] = value
return records
def generate_xorg_conf(devices):
xorg_conf = []
device_section = """
Section "Device"
Identifier "Device{device_id}"
Driver "nvidia"
VendorName "NVIDIA Corporation"
BusID "{bus_id}"
EndSection
"""
server_layout_section = """
Section "ServerLayout"
Identifier "Layout0"
{screen_records}
EndSection
"""
screen_section = """
Section "Screen"
Identifier "Screen{screen_id}"
Device "Device{device_id}"
DefaultDepth 24
Option "AllowEmptyInitialConfiguration" "True"
SubSection "Display"
Depth 24
Virtual 1024 768
EndSubSection
EndSection
"""
screen_records = []
for i, bus_id in enumerate(devices):
xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
xorg_conf.append(screen_section.format(device_id=i, screen_id=i))
screen_records.append('Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i))
xorg_conf.append(server_layout_section.format(screen_records="\n ".join(screen_records)))
output = "\n".join(xorg_conf)
return output
def startx(display=0):
if platform.system() != "Linux":
raise Exception("Can only run startx on linux")
devices = []
for r in pci_records():
if r.get("Vendor", "") == "NVIDIA Corporation"\
and r["Class"] in ["VGA compatible controller", "3D controller"]:
bus_id = "PCI:" + ":".join(map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"])))
devices.append(bus_id)
if not devices:
raise Exception("no nvidia cards found")
fd = None
path = None
try:
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write(generate_xorg_conf(devices))
command = shlex.split("Xorg -noreset +extension GLX +extension RANDR +extension RENDER -config %s :%s" % (path, display))
proc = subprocess.Popen(command)
atexit.register(lambda: proc.poll() is None and proc.kill())
proc.wait()
finally:
if fd is not None:
os.close(fd)
os.unlink(path)
# fmt: on
if __name__ == "__main__":
startx()
| allenact-main | scripts/startx.py |
#!/usr/bin/env python3
"""Entry point to multi-node (distributed) training for a user given experiment
name."""
import os
import random
import string
import subprocess
import sys
import time
from pathlib import Path
from typing import Optional
# Add to PYTHONPATH the path of the parent directory of the current file's directory
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(Path(__file__)))))
from allenact.main import get_argument_parser as get_main_arg_parser
from allenact.utils.system import init_logging, get_logger
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
def get_argument_parser():
"""Creates the argument parser."""
parser = get_main_arg_parser()
parser.description = f"distributed {parser.description}"
parser.add_argument(
"--runs_on",
required=True,
type=str,
help="Comma-separated IP addresses of machines",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh -f {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i mykey.pem -f ubuntu@{addr}'. "
"The option `-f` should be used for non-interactive session",
)
parser.add_argument(
"--env_activate_path",
required=True,
type=str,
help="Path to the virtual environment's `activate` script. It must be the same across all machines",
)
parser.add_argument(
"--allenact_path",
required=False,
type=str,
default="allenact",
help="Path to allenact top directory. It must be the same across all machines",
)
# Required distributed_ip_and_port
idx = [a.dest for a in parser._actions].index("distributed_ip_and_port")
parser._actions[idx].required = True
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def get_raw_args():
raw_args = sys.argv[1:]
filtered_args = []
remove: Optional[str] = None
enclose_in_quotes: Optional[str] = None
for arg in raw_args:
if remove is not None:
remove = None
elif enclose_in_quotes is not None:
# Within backslash expansion: close former single, open double, create single, close double, reopen single
inner_quote = r"\'\"\'\"\'"
# Convert double quotes into backslash double for later expansion
filtered_args.append(
inner_quote + arg.replace('"', r"\"").replace("'", r"\"") + inner_quote
)
enclose_in_quotes = None
elif arg in [
"--runs_on",
"--ssh_cmd",
"--env_activate_path",
"--allenact_path",
"--extra_tag",
"--machine_id",
]:
remove = arg
elif arg == "--config_kwargs":
enclose_in_quotes = arg
filtered_args.append(arg)
else:
filtered_args.append(arg)
return filtered_args
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text):
# Close former single, start backslash expansion (via $), create new single quote for expansion:
quote_enter = r"'$'\'"
# New closing single quote for expansion, close backslash expansion, reopen former single:
quote_leave = r"\'''"
return f"{quote_enter}{text}{quote_leave}"
def wrap_double(text):
return f'"{text}"'
def id_generator(size=4, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
# Assume we can ssh into each of the `runs_on` machines through port 22
if __name__ == "__main__":
# Tool must be called from AllenAct project's root directory
cwd = os.path.abspath(os.getcwd())
assert cwd == ABS_PATH_OF_TOP_LEVEL_DIR, (
f"`dmain.py` called from {cwd}."
f"\nIt should be called from AllenAct's top level directory {ABS_PATH_OF_TOP_LEVEL_DIR}."
)
args = get_args()
init_logging(args.log_level)
raw_args = get_raw_args()
if args.seed is None:
seed = random.randint(0, 2 ** 31 - 1)
raw_args.extend(["-s", f"{seed}"])
get_logger().info(f"Using random seed {seed} in all workers (none was given)")
all_addresses = args.runs_on.split(",")
get_logger().info(f"Running on IP addresses {all_addresses}")
assert args.distributed_ip_and_port.split(":")[0] in all_addresses, (
f"Missing listener IP address {args.distributed_ip_and_port.split(':')[0]}"
f" in list of worker addresses {all_addresses}"
)
time_str = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
global_job_id = id_generator()
killfilename = os.path.join(
os.path.expanduser("~"), ".allenact", f"{time_str}_{global_job_id}.killfile"
)
os.makedirs(os.path.dirname(killfilename), exist_ok=True)
code_src = "."
with open(killfilename, "w") as killfile:
for it, addr in enumerate(all_addresses):
code_tget = f"{addr}:{args.allenact_path}/"
get_logger().info(f"rsync {code_src} to {code_tget}")
os.system(f"rsync -rz {code_src} {code_tget}")
job_id = id_generator()
command = " ".join(
["python", "main.py"]
+ raw_args
+ [
"--extra_tag",
f"{args.extra_tag}{'__' if len(args.extra_tag) > 0 else ''}machine{it}",
]
+ ["--machine_id", f"{it}"]
)
logfile = (
f"{args.output_dir}/log_{time_str}_{global_job_id}_{job_id}_machine{it}"
)
env_and_command = wrap_single_nested(
f"for NCCL_SOCKET_IFNAME in $(route | grep default) ; do : ; done && export NCCL_SOCKET_IFNAME"
f" && cd {args.allenact_path}"
f" && mkdir -p {args.output_dir}"
f" && source {args.env_activate_path} &>> {logfile}"
f" && echo pwd=$(pwd) &>> {logfile}"
f" && echo output_dir={args.output_dir} &>> {logfile}"
f" && echo python_version=$(python --version) &>> {logfile}"
f" && echo python_path=$(which python) &>> {logfile}"
f" && set | grep NCCL_SOCKET_IFNAME &>> {logfile}"
f" && echo &>> {logfile}"
f" && {command} &>> {logfile}"
)
screen_name = f"allenact_{time_str}_{global_job_id}_{job_id}_machine{it}"
screen_command = wrap_single(
f"screen -S {screen_name} -dm bash -c {env_and_command}"
)
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {screen_command}"
get_logger().debug(f"SSH command {ssh_command}")
subprocess.run(ssh_command, shell=True, executable="/bin/bash")
get_logger().info(f"{addr} {screen_name}")
killfile.write(f"{addr} {screen_name}\n")
get_logger().info("")
get_logger().info(f"Running screen ids saved to {killfilename}")
get_logger().info("")
get_logger().info("DONE")
| allenact-main | scripts/dmain.py |
try:
# noinspection PyProtectedMember,PyUnresolvedReferences
from allenact_plugins._version import __version__
except ModuleNotFoundError:
__version__ = None
| allenact-main | allenact_plugins/__init__.py |
import glob
import os
from pathlib import Path
from setuptools import find_packages, setup
def parse_req_file(fname, initial=None):
"""Reads requires.txt file generated by setuptools and outputs a
new/updated dict of extras as keys and corresponding lists of dependencies
as values.
The input file's contents are similar to a `ConfigParser` file, e.g.
pkg_1
pkg_2
pkg_3
[extras1]
pkg_4
pkg_5
[extras2]
pkg_6
pkg_7
"""
reqs = {} if initial is None else initial
cline = None
with open(fname, "r") as f:
for line in f.readlines():
line = line[:-1].strip()
if len(line) == 0:
continue
if line[0] == "[":
# Add new key for current extras (if missing in dict)
cline = line[1:-1].strip()
if cline not in reqs:
reqs[cline] = []
else:
# Only keep dependencies from extras
if cline is not None:
reqs[cline].append(line)
return reqs
def get_version(fname):
"""Reads PKG-INFO file generated by setuptools and extracts the Version
number."""
res = "UNK"
with open(fname, "r") as f:
for line in f.readlines():
line = line[:-1]
if line.startswith("Version:"):
res = line.replace("Version:", "").strip()
break
if res in ["UNK", ""]:
raise ValueError(f"Missing Version number in {fname}")
return res
def run_setup():
base_dir = os.path.abspath(os.path.dirname(Path(__file__)))
if not os.path.exists(
os.path.join(base_dir, "allenact_plugins.egg-info/dependency_links.txt")
):
# Build mode for sdist
# Extra dependencies required for various plugins
extras = {}
for plugin_path in glob.glob(os.path.join(base_dir, "*_plugin")):
plugin_name = os.path.basename(plugin_path).replace("_plugin", "")
extra_reqs_path = os.path.join(plugin_path, "extra_requirements.txt")
if os.path.exists(extra_reqs_path):
with open(extra_reqs_path, "r") as f:
# Filter out non-PyPI dependencies
extras[plugin_name] = [
clean_dep
for clean_dep in (dep.strip() for dep in f.readlines())
if clean_dep != ""
and not clean_dep.startswith("#")
and "@ git+https://github.com/" not in clean_dep
]
extras["all"] = sum(extras.values(), [])
os.chdir(os.path.join(base_dir, ".."))
with open(".VERSION", "r") as f:
__version__ = f.readline().strip()
else:
# Install mode from sdist
__version__ = get_version(
os.path.join(base_dir, "allenact_plugins.egg-info/PKG-INFO")
)
extras = parse_req_file(
os.path.join(base_dir, "allenact_plugins.egg-info/requires.txt")
)
setup(
name="allenact_plugins",
version=__version__,
description="Plugins for the AllenAct framework",
long_description=(
"A collection of plugins/extensions for use within the AllenAct framework."
),
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords=["reinforcement learning", "embodied-AI", "AI", "RL", "SLAM"],
url="https://github.com/allenai/allenact",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="MIT",
packages=find_packages(include=["allenact_plugins", "allenact_plugins.*"]),
install_requires=[
"gym>=0.17.0,<0.20.0",
"torch>=1.6.0,!=1.8.0,<2.0.0",
"torchvision>=0.7.0",
"numpy>=1.19.1",
"wheel>=0.36.2",
f"allenact=={__version__}",
],
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-cov"],
extras_require=extras,
)
if __name__ == "__main__":
run_setup()
| allenact-main | allenact_plugins/setup.py |
import os
HABITAT_BASE = os.getenv(
"HABITAT_BASE_DIR",
default=os.path.join(os.getcwd(), "external_projects", "habitat-lab"),
)
HABITAT_DATA_BASE = os.path.join(os.getcwd(), "data",)
if (not os.path.exists(HABITAT_BASE)) or (not os.path.exists(HABITAT_DATA_BASE)):
raise ImportError(
"In order to run properly the Habitat environment makes several assumptions about the file structure of"
" the local system. The file structure of the current environment does not seem to respect this required"
" file structure. Please see https://allenact.org/installation/installation-framework/#installation-of-habitat"
" for details as to how to set up your local environment to make it possible to use the habitat plugin of"
" AllenAct."
)
HABITAT_DATASETS_DIR = os.path.join(HABITAT_DATA_BASE, "datasets")
HABITAT_SCENE_DATASETS_DIR = os.path.join(HABITAT_DATA_BASE, "scene_datasets")
HABITAT_CONFIGS_DIR = os.path.join(HABITAT_BASE, "configs")
TESTED_HABITAT_COMMIT = "33654923dc733f5fcea23aea6391034c3f694a67"
MOVE_AHEAD = "MOVE_FORWARD"
ROTATE_LEFT = "TURN_LEFT"
ROTATE_RIGHT = "TURN_RIGHT"
LOOK_DOWN = "LOOK_DOWN"
LOOK_UP = "LOOK_UP"
END = "STOP"
| allenact-main | allenact_plugins/habitat_plugin/habitat_constants.py |
from abc import ABC
from typing import Tuple, List, Dict, Any, Optional, Union, Sequence, cast
import gym
import numpy as np
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.system import get_logger
from allenact_plugins.habitat_plugin.habitat_constants import (
MOVE_AHEAD,
ROTATE_LEFT,
ROTATE_RIGHT,
END,
LOOK_UP,
LOOK_DOWN,
)
from allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment
from allenact_plugins.habitat_plugin.habitat_sensors import (
AgentCoordinatesSensorHabitat,
)
class HabitatTask(Task[HabitatEnvironment], ABC):
def __init__(
self,
env: HabitatEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._last_action: Optional[str] = None
self._last_action_ind: Optional[int] = None
self._last_action_success: Optional[bool] = None
self._actions_taken: List[str] = []
self._positions = []
pos = self.get_agent_position_and_rotation()
self._positions.append(
{"x": pos[0], "y": pos[1], "z": pos[2], "rotation": pos[3]}
)
ep = self.env.get_current_episode()
# Extract the scene name from the scene path and append the episode id to generate
# a globally unique episode_id
self._episode_id = ep.scene_id.split("/")[-1][:-4] + "_" + ep.episode_id
def get_agent_position_and_rotation(self):
return AgentCoordinatesSensorHabitat.get_observation(self.env, self)
@property
def last_action(self):
return self._last_action
@last_action.setter
def last_action(self, value: str):
self._last_action = value
@property
def last_action_success(self):
return self._last_action_success
@last_action_success.setter
def last_action_success(self, value: Optional[bool]):
self._last_action_success = value
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
if mode == "rgb":
return self.env.current_frame["rgb"]
elif mode == "depth":
return self.env.current_frame["depth"]
else:
raise NotImplementedError()
class PointNavTask(Task[HabitatEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END)
def __init__(
self,
env: HabitatEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
failed_end_reward: float = 0.0,
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible = None
# Get the geodesic distance to target from the environment and make sure it is
# a valid value
self.last_geodesic_distance = self.current_geodesic_dist_to_target()
self.start_distance = self.last_geodesic_distance
assert self.last_geodesic_distance is not None
# noinspection PyProtectedMember
self._shortest_path_follower = ShortestPathFollower(
cast(HabitatSim, env.env.sim), env.env._config.TASK.SUCCESS_DISTANCE, False
)
self._shortest_path_follower.mode = "geodesic_path"
self._rewards: List[float] = []
self._metrics = None
self.failed_end_reward = failed_end_reward
def current_geodesic_dist_to_target(self) -> Optional[float]:
metrics = self.env.env.get_metrics()
if metrics["distance_to_goal"] is None:
habitat_env = self.env.env
habitat_env.task.measurements.update_measures(
episode=habitat_env.current_episode, action=None, task=habitat_env.task
)
metrics = self.env.env.get_metrics()
return metrics["distance_to_goal"]
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self.env.env.episode_over
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
self.env.step({"action": action_str})
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.last_action_success = self.env.last_action_success
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
return self.env.current_frame["rgb"]
def _is_goal_in_range(self) -> bool:
return (
self.current_geodesic_dist_to_target() <= self.task_info["distance_to_goal"]
)
def judge(self) -> float:
reward = -0.01
new_geodesic_distance = self.current_geodesic_dist_to_target()
if self.last_geodesic_distance is None:
self.last_geodesic_distance = new_geodesic_distance
if self.last_geodesic_distance is not None:
if (
new_geodesic_distance is None
or new_geodesic_distance in [float("-inf"), float("inf")]
or np.isnan(new_geodesic_distance)
):
new_geodesic_distance = self.last_geodesic_distance
delta_distance_reward = self.last_geodesic_distance - new_geodesic_distance
reward += delta_distance_reward
self.last_geodesic_distance = new_geodesic_distance
if self.is_done():
reward += 10.0 if self._success else self.failed_end_reward
else:
get_logger().warning("Could not get geodesic distance from habitat env.")
self._rewards.append(float(reward))
return float(reward)
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
_metrics = self.env.env.get_metrics()
metrics = {
**super(PointNavTask, self).metrics(),
"success": 1 * self._success,
"ep_length": self.num_steps_taken(),
"reward": np.sum(self._rewards),
"spl": _metrics["spl"] if _metrics["spl"] is not None else 0.0,
"dist_to_target": self.current_geodesic_dist_to_target(),
}
self._rewards = []
return metrics
def query_expert(self, **kwargs) -> Tuple[int, bool]:
if self._is_goal_in_range():
return self.class_action_names().index(END), True
target = self.task_info["target"]
habitat_action = self._shortest_path_follower.get_next_action(target)
if habitat_action == HabitatSimActions.MOVE_FORWARD:
return self.class_action_names().index(MOVE_AHEAD), True
elif habitat_action == HabitatSimActions.TURN_LEFT:
return self.class_action_names().index(ROTATE_LEFT), True
elif habitat_action == HabitatSimActions.TURN_RIGHT:
return self.class_action_names().index(ROTATE_RIGHT), True
else:
return 0, False
class ObjectNavTask(HabitatTask):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END, LOOK_UP, LOOK_DOWN)
def __init__(
self,
env: HabitatEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
look_constraints: Optional[Tuple[int, int]] = None,
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.look_constraints = look_constraints
self._look_state = 0
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible = None
# Get the geodesic distance to target from the environemnt and make sure it is
# a valid value
self.last_geodesic_distance = self.current_geodesic_dist_to_target()
assert not (
self.last_geodesic_distance is None
or self.last_geodesic_distance in [float("-inf"), float("inf")]
or np.isnan(self.last_geodesic_distance)
), "Bad geodesic distance"
self._min_distance_to_goal = self.last_geodesic_distance
self._num_invalid_actions = 0
# noinspection PyProtectedMember
self._shortest_path_follower = ShortestPathFollower(
env.env.sim, env.env._config.TASK.SUCCESS.SUCCESS_DISTANCE, False
)
self._shortest_path_follower.mode = "geodesic_path"
self._rewards: List[float] = []
self._metrics = None
self.task_info["episode_id"] = self._episode_id
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self.env.env.episode_over
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def action_names(self, **kwargs) -> Tuple[str, ...]:
return self._actions
def close(self) -> None:
self.env.stop()
def current_geodesic_dist_to_target(self) -> Optional[float]:
metrics = self.env.env.get_metrics()
if metrics["distance_to_goal"] is None:
habitat_env = self.env.env
habitat_env.task.measurements.update_measures(
episode=habitat_env.current_episode, action=None, task=habitat_env.task
)
metrics = self.env.env.get_metrics()
return metrics["distance_to_goal"]
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
old_pos = self.get_agent_position_and_rotation()
action_str = self.action_names()[action]
self._actions_taken.append(action_str)
skip_action = False
if self.look_constraints is not None:
max_look_up, max_look_down = self.look_constraints
if action_str == LOOK_UP:
num_look_ups = self._look_state
# assert num_look_ups <= max_look_up
skip_action = num_look_ups >= max_look_up
self._look_state += 1
if action_str == LOOK_DOWN:
num_look_downs = -self._look_state
# assert num_look_downs <= max_look_down
skip_action = num_look_downs >= max_look_down
self._look_state -= 1
self._look_state = min(max(self._look_state, -max_look_down), max_look_up)
if not skip_action:
self.env.step({"action": action_str})
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.last_action_success = self.env.last_action_success
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
new_pos = self.get_agent_position_and_rotation()
if np.all(old_pos == new_pos):
self._num_invalid_actions += 1
pos = self.get_agent_position_and_rotation()
self._positions.append(
{"x": pos[0], "y": pos[1], "z": pos[2], "rotation": pos[3]}
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
return self.env.current_frame["rgb"]
def _is_goal_in_range(self) -> bool:
# The habitat simulator will return an SPL value of 0.0 whenever the goal is not in range
return bool(self.env.env.get_metrics()["spl"])
def judge(self) -> float:
# Set default reward
reward = -0.01
# Get geodesic distance reward
new_geodesic_distance = self.current_geodesic_dist_to_target()
self._min_distance_to_goal = min(
new_geodesic_distance, self._min_distance_to_goal
)
if (
new_geodesic_distance is None
or new_geodesic_distance in [float("-inf"), float("inf")]
or np.isnan(new_geodesic_distance)
):
new_geodesic_distance = self.last_geodesic_distance
delta_distance_reward = self.last_geodesic_distance - new_geodesic_distance
reward += delta_distance_reward
if self._took_end_action:
reward += 10.0 if self._success else 0.0
# Get success reward
self._rewards.append(float(reward))
self.last_geodesic_distance = new_geodesic_distance
return float(reward)
def metrics(self) -> Dict[str, Any]:
self.task_info["taken_actions"] = self._actions_taken
self.task_info["action_names"] = self.action_names()
self.task_info["followed_path"] = self._positions
if not self.is_done():
return {}
else:
_metrics = self.env.env.get_metrics()
metrics = {
"success": self._success,
"ep_length": self.num_steps_taken(),
"total_reward": np.sum(self._rewards),
"spl": _metrics["spl"] if _metrics["spl"] is not None else 0.0,
"min_distance_to_target": self._min_distance_to_goal,
"num_invalid_actions": self._num_invalid_actions,
"task_info": self.task_info,
}
self._rewards = []
return metrics
def query_expert(self, **kwargs) -> Tuple[int, bool]:
if self._is_goal_in_range():
return self.class_action_names().index(END), True
target = self.task_info["target"]
action = self._shortest_path_follower.get_next_action(target)
return action, action is not None
| allenact-main | allenact_plugins/habitat_plugin/habitat_tasks.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"\n\nPlease install habitat following\n\n"
"https://allenact.org/installation/installation-framework/#installation-of-habitat\n\n"
):
import habitat
import habitat_sim
| allenact-main | allenact_plugins/habitat_plugin/__init__.py |
from typing import Any, Optional, Tuple, TYPE_CHECKING
import gym
import numpy as np
from pyquaternion import Quaternion
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment
if TYPE_CHECKING:
from allenact_plugins.habitat_plugin.habitat_tasks import PointNavTask, ObjectNavTask # type: ignore
class RGBSensorHabitat(RGBSensor[HabitatEnvironment, Task[HabitatEnvironment]]):
# For backwards compatibility
def __init__(
self,
use_resnet_normalization: bool = False,
mean: Optional[np.ndarray] = np.array(
[[[0.485, 0.456, 0.406]]], dtype=np.float32
),
stdev: Optional[np.ndarray] = np.array(
[[[0.229, 0.224, 0.225]]], dtype=np.float32
),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "rgb",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 3,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 1.0,
scale_first: bool = True,
**kwargs: Any
):
super().__init__(**prepare_locals_for_super(locals()))
def frame_from_env(
self, env: HabitatEnvironment, task: Optional[Task[HabitatEnvironment]]
) -> np.ndarray:
return env.current_frame["rgb"].copy()
class DepthSensorHabitat(DepthSensor[HabitatEnvironment, Task[HabitatEnvironment]]):
# For backwards compatibility
def __init__(
self,
use_resnet_normalization: Optional[bool] = None,
use_normalization: Optional[bool] = None,
mean: Optional[np.ndarray] = np.array([[0.5]], dtype=np.float32),
stdev: Optional[np.ndarray] = np.array([[0.25]], dtype=np.float32),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "depth",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 1,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 5.0,
scale_first: bool = False,
**kwargs: Any
):
# Give priority to use_normalization, but use_resnet_normalization for backward compat. if not set
if use_resnet_normalization is not None and use_normalization is None:
use_normalization = use_resnet_normalization
elif use_normalization is None:
use_normalization = False
super().__init__(**prepare_locals_for_super(locals()))
def frame_from_env(
self, env: HabitatEnvironment, task: Optional[Task[HabitatEnvironment]]
) -> np.ndarray:
return env.current_frame["depth"].copy()
class TargetCoordinatesSensorHabitat(Sensor[HabitatEnvironment, "PointNavTask"]):
def __init__(
self, coordinate_dims: int, uuid: str = "target_coordinates_ind", **kwargs: Any
):
self.coordinate_dims = coordinate_dims
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
# Distance is a non-negative real and angle is normalized to the range (-Pi, Pi] or [-Pi, Pi)
return gym.spaces.Box(
np.float32(-3.15), np.float32(1000), shape=(self.coordinate_dims,)
)
def get_observation(
self,
env: HabitatEnvironment,
task: Optional["PointNavTask"],
*args: Any,
**kwargs: Any
) -> Any:
frame = env.current_frame
goal = frame["pointgoal_with_gps_compass"]
return goal
class TargetObjectSensorHabitat(Sensor[HabitatEnvironment, "ObjectNavTask"]):
def __init__(self, num_objects: int, uuid: str = "target_object_id", **kwargs: Any):
observation_space = self._get_observation_space(num_objects)
super().__init__(**prepare_locals_for_super(locals()))
@staticmethod
def _get_observation_space(num_objects: int):
return gym.spaces.Discrete(num_objects)
def get_observation(
self,
env: HabitatEnvironment,
task: Optional["ObjectNavTask"],
*args: Any,
**kwargs: Any
) -> Any:
frame = env.current_frame
goal = frame["objectgoal"][0]
return goal
class AgentCoordinatesSensorHabitat(Sensor[HabitatEnvironment, "PointNavTask"]):
def __init__(self, uuid: str = "agent_position_and_rotation", **kwargs: Any):
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
@staticmethod
def _get_observation_space():
return gym.spaces.Box(np.float32(-1000), np.float32(1000), shape=(4,))
@staticmethod
def get_observation(
env: HabitatEnvironment,
task: Optional["PointNavTask"],
*args: Any,
**kwargs: Any
) -> Any:
position = env.env.sim.get_agent_state().position
quaternion = Quaternion(env.env.sim.get_agent_state().rotation.components)
return np.array([position[0], position[1], position[2], quaternion.radians])
| allenact-main | allenact_plugins/habitat_plugin/habitat_sensors.py |
"""A wrapper for interacting with the Habitat environment."""
import os
from typing import Dict, Union, List, Optional
import numpy as np
import habitat
from allenact.utils.cache_utils import DynamicDistanceCache
from allenact.utils.system import get_logger
from habitat.config import Config
from habitat.core.dataset import Dataset
from habitat.core.simulator import Observations, AgentState, ShortestPathPoint
from habitat.tasks.nav.nav import NavigationEpisode as HabitatNavigationEpisode
class HabitatEnvironment:
def __init__(self, config: Config, dataset: Dataset, verbose: bool = False) -> None:
self.env = habitat.Env(config=config, dataset=dataset)
if not verbose:
os.environ["GLOG_minloglevel"] = "2"
os.environ["MAGNUM_LOG"] = "quiet"
# Set the target to a random goal from the provided list for this episode
self.goal_index = 0
self.last_geodesic_distance = None
self.distance_cache = DynamicDistanceCache(rounding=1)
self._current_frame: Optional[np.ndarray] = None
@property
def scene_name(self) -> str:
return self.env.current_episode.scene_id
@property
def current_frame(self) -> np.ndarray:
assert self._current_frame is not None
return self._current_frame
def step(self, action_dict: Dict[str, Union[str, int]]) -> Observations:
obs = self.env.step(action_dict["action"])
self._current_frame = obs
return obs
def get_location(self) -> Optional[np.ndarray]:
return self.env.sim.get_agent_state().position
def get_rotation(self) -> Optional[List[float]]:
return self.env.sim.get_agent_state().rotation
def get_shortest_path(
self, source_state: AgentState, target_state: AgentState,
) -> List[ShortestPathPoint]:
return self.env.sim.action_space_shortest_path(source_state, [target_state])
def get_current_episode(self) -> HabitatNavigationEpisode:
return self.env.current_episode # type: ignore
# noinspection PyMethodMayBeStatic
def start(self):
get_logger().debug("No need to start a habitat_plugin env")
def stop(self):
self.env.close()
def reset(self):
self._current_frame = self.env.reset()
@property
def last_action_success(self) -> bool:
# For now we can not have failure of actions
return True
@property
def num_episodes(self) -> int:
ep_iterator = self.env.episode_iterator
assert isinstance(ep_iterator, habitat.core.dataset.EpisodeIterator)
return len(ep_iterator.episodes)
| allenact-main | allenact_plugins/habitat_plugin/habitat_environment.py |
allenact-main | allenact_plugins/habitat_plugin/habitat_preprocessors.py |
|
from typing import List, Optional, Union, Callable, Any, Dict, Type
import gym
import habitat
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import Builder
from allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment
from allenact_plugins.habitat_plugin.habitat_tasks import PointNavTask, ObjectNavTask # type: ignore
from habitat.config import Config
class PointNavTaskSampler(TaskSampler):
def __init__(
self,
env_config: Config,
sensors: List[Sensor],
max_steps: int,
action_space: gym.Space,
distance_to_goal: float,
filter_dataset_func: Optional[
Callable[[habitat.Dataset], habitat.Dataset]
] = None,
**task_init_kwargs,
) -> None:
self.grid_size = 0.25
self.env: Optional[HabitatEnvironment] = None
self.max_tasks: Optional[int] = None
self.reset_tasks: Optional[int] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.env_config = env_config
self.distance_to_goal = distance_to_goal
self.seed: Optional[int] = None
self.filter_dataset_func = filter_dataset_func
self._last_sampled_task: Optional[PointNavTask] = None
self.task_init_kwargs = task_init_kwargs
def _create_environment(self) -> HabitatEnvironment:
dataset = habitat.make_dataset(
self.env_config.DATASET.TYPE, config=self.env_config.DATASET
)
if len(dataset.episodes) == 0:
raise RuntimeError("Empty input dataset.")
if self.filter_dataset_func is not None:
dataset = self.filter_dataset_func(dataset)
if len(dataset.episodes) == 0:
raise RuntimeError("Empty dataset after filtering.")
env = HabitatEnvironment(config=self.env_config, dataset=dataset)
self.max_tasks = None if self.env_config.MODE == "train" else env.num_episodes
self.reset_tasks = self.max_tasks
return env
@property
def length(self) -> Union[int, float]:
"""
@return: Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Union[int, float, None]:
return self.env.num_episodes
@property
def last_sampled_task(self) -> Optional[PointNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""
@return: True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def next_task(self, force_advance_scene=False) -> Optional[PointNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.env is not None:
self.env.reset()
else:
self.env = self._create_environment()
self.env.reset()
ep_info = self.env.get_current_episode()
assert len(ep_info.goals) == 1
target = ep_info.goals[0].position
task_info = {
"target": target,
"distance_to_goal": self.distance_to_goal,
"episode_id": ep_info.episode_id,
"scene_id": ep_info.scene_id.split("/")[-1],
**ep_info.info,
}
self._last_sampled_task = PointNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
**self.task_init_kwargs,
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self._last_sampled_task
def reset(self):
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
self.env.env.seed(seed)
class ObjectNavTaskSampler(TaskSampler):
def __init__(
self,
env_config: Config,
sensors: List[Sensor],
max_steps: int,
action_space: gym.Space,
filter_dataset_func: Optional[
Callable[[habitat.Dataset], habitat.Dataset]
] = None,
task_kwargs: Dict[str, Any] = None,
objectnav_task_type: Union[
Type[ObjectNavTask], Builder[ObjectNavTask]
] = ObjectNavTask,
**kwargs,
) -> None:
self.grid_size = 0.25
self.env: Optional[HabitatEnvironment] = None
self.max_tasks: Optional[int] = None
self.reset_tasks: Optional[int] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.env_config = env_config
self.seed: Optional[int] = None
self.filter_dataset_func = filter_dataset_func
self.objectnav_task_type = objectnav_task_type
self.task_kwargs = {} if task_kwargs is None else task_kwargs
self._last_sampled_task: Optional[ObjectNavTask] = None
def _create_environment(self) -> HabitatEnvironment:
dataset = habitat.make_dataset(
self.env_config.DATASET.TYPE, config=self.env_config.DATASET
)
if self.filter_dataset_func is not None:
dataset = self.filter_dataset_func(dataset)
if len(dataset.episodes) == 0:
raise RuntimeError("Empty dataset after filtering.")
env = HabitatEnvironment(config=self.env_config, dataset=dataset)
self.max_tasks = (
None if self.env_config.MODE == "train" else env.num_episodes
) # mp3d objectnav val -> 2184
self.reset_tasks = self.max_tasks
return env
@property
def length(self) -> Union[int, float]:
"""
@return: Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Union[int, float, None]:
return self.env.num_episodes
@property
def last_sampled_task(self) -> Optional[ObjectNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""
@return: True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def next_task(self, force_advance_scene=False) -> Optional[ObjectNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.env is not None:
if force_advance_scene:
self.env.env._episode_iterator._forced_scene_switch()
self.env.env._episode_iterator._set_shuffle_intervals()
self.env.reset()
else:
self.env = self._create_environment()
self.env.reset()
ep_info = self.env.get_current_episode()
target_categories = {g.object_category for g in ep_info.goals}
assert len(target_categories) == 1
target_category = list(target_categories)[0]
task_info = {
"target_category": target_category,
"episode_id": ep_info.episode_id,
"scene_id": ep_info.scene_id.split("/")[-1],
**ep_info.info,
}
self._last_sampled_task = self.objectnav_task_type(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
**self.task_kwargs,
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self._last_sampled_task
def reset(self):
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
self.env.env.seed(seed)
| allenact-main | allenact_plugins/habitat_plugin/habitat_task_samplers.py |
import os
from typing import List
import habitat
from allenact_plugins.habitat_plugin.habitat_constants import (
HABITAT_BASE,
HABITAT_CONFIGS_DIR,
)
from habitat import Config
def construct_env_configs(
config: Config, allow_scene_repeat: bool = False,
) -> List[Config]:
"""Create list of Habitat Configs for training on multiple processes To
allow better performance, dataset are split into small ones for each
individual env, grouped by scenes.
# Parameters
config : configs that contain num_processes as well as information
necessary to create individual environments.
allow_scene_repeat: if `True` and the number of distinct scenes
in the dataset is less than the total number of processes this will
result in scenes being repeated across processes. If `False`, then
if the total number of processes is greater than the number of scenes,
this will result in a RuntimeError exception being raised.
# Returns
List of Configs, one for each process.
"""
config.freeze()
num_processes = config.NUM_PROCESSES
configs = []
dataset = habitat.make_dataset(config.DATASET.TYPE)
scenes = dataset.get_scenes_to_load(config.DATASET)
if len(scenes) > 0:
if len(scenes) < num_processes:
if not allow_scene_repeat:
raise RuntimeError(
"reduce the number of processes as there aren't enough number of scenes."
)
else:
scenes = (scenes * (1 + (num_processes // len(scenes))))[:num_processes]
scene_splits: List[List] = [[] for _ in range(num_processes)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_processes):
task_config = config.clone()
task_config.defrost()
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[i]
if len(config.SIMULATOR_GPU_IDS) == 0:
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = -1
else:
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = config.SIMULATOR_GPU_IDS[
i % len(config.SIMULATOR_GPU_IDS)
]
task_config.freeze()
configs.append(task_config.clone())
return configs
def construct_env_configs_mp3d(config: Config) -> List[Config]:
r"""Create list of Habitat Configs for training on multiple processes
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
Args:
config: configs that contain num_processes as well as information
necessary to create individual environments.
Returns:
List of Configs, one for each process
"""
config.freeze()
num_processes = config.NUM_PROCESSES
configs = []
# dataset = habitat.make_dataset(config.DATASET.TYPE)
# scenes = dataset.get_scenes_to_load(config.DATASET)
if num_processes == 1:
scene_splits = [["pRbA3pwrgk9"]]
else:
small = [
"rPc6DW4iMge",
"e9zR4mvMWw7",
"uNb9QFRL6hY",
"qoiz87JEwZ2",
"sKLMLpTHeUy",
"s8pcmisQ38h",
"759xd9YjKW5",
"XcA2TqTSSAj",
"SN83YJsR3w2",
"8WUmhLawc2A",
"JeFG25nYj2p",
"17DRP5sb8fy",
"Uxmj2M2itWa",
"XcA2TqTSSAj",
"SN83YJsR3w2",
"8WUmhLawc2A",
"JeFG25nYj2p",
"17DRP5sb8fy",
"Uxmj2M2itWa",
"D7N2EKCX4Sj",
"b8cTxDM8gDG",
"sT4fr6TAbpF",
"S9hNv5qa7GM",
"82sE5b5pLXE",
"pRbA3pwrgk9",
"aayBHfsNo7d",
"cV4RVeZvu5T",
"i5noydFURQK",
"YmJkqBEsHnH",
"jh4fc5c5qoQ",
"VVfe2KiqLaN",
"29hnd4uzFmX",
"Pm6F8kyY3z2",
"JF19kD82Mey",
"GdvgFV5R1Z5",
"HxpKQynjfin",
"vyrNrziPKCB",
]
med = [
"V2XKFyX4ASd",
"VFuaQ6m2Qom",
"ZMojNkEp431",
"5LpN3gDmAk7",
"r47D5H71a5s",
"ULsKaCPVFJR",
"E9uDoFAP3SH",
"kEZ7cmS4wCh",
"ac26ZMwG7aT",
"dhjEzFoUFzH",
"mJXqzFtmKg4",
"p5wJjkQkbXX",
"Vvot9Ly1tCj",
"EDJbREhghzL",
"VzqfbhrpDEA",
"7y3sRwLe3Va",
]
scene_splits = [[] for _ in range(config.NUM_PROCESSES)]
distribute(
small,
scene_splits,
num_gpus=8,
procs_per_gpu=3,
proc_offset=1,
scenes_per_process=2,
)
distribute(
med,
scene_splits,
num_gpus=8,
procs_per_gpu=3,
proc_offset=0,
scenes_per_process=1,
)
# gpu0 = [['pRbA3pwrgk9', '82sE5b5pLXE', 'S9hNv5qa7GM'],
# ['Uxmj2M2itWa', '17DRP5sb8fy', 'JeFG25nYj2p'],
# ['5q7pvUzZiYa', '759xd9YjKW5', 's8pcmisQ38h'],
# ['e9zR4mvMWw7', 'rPc6DW4iMge', 'vyrNrziPKCB']]
# gpu1 = [['sT4fr6TAbpF', 'b8cTxDM8gDG', 'D7N2EKCX4Sj'],
# ['8WUmhLawc2A', 'SN83YJsR3w2', 'XcA2TqTSSAj'],
# ['sKLMLpTHeUy', 'qoiz87JEwZ2', 'uNb9QFRL6hY'],
# ['V2XKFyX4ASd', 'VFuaQ6m2Qom', 'ZMojNkEp431']]
# gpu2 = [['5LpN3gDmAk7', 'r47D5H71a5s', 'ULsKaCPVFJR', 'E9uDoFAP3SH'],
# ['VVfe2KiqLaN', 'jh4fc5c5qoQ', 'YmJkqBEsHnH'], # small
# ['i5noydFURQK', 'cV4RVeZvu5T', 'aayBHfsNo7d']] # small
# gpu3 = [['kEZ7cmS4wCh', 'ac26ZMwG7aT', 'dhjEzFoUFzH'],
# ['mJXqzFtmKg4', 'p5wJjkQkbXX', 'Vvot9Ly1tCj']]
# gpu4 = [['EDJbREhghzL', 'VzqfbhrpDEA', '7y3sRwLe3Va'],
# ['ur6pFq6Qu1A', 'PX4nDJXEHrG', 'PuKPg4mmafe']]
# gpu5 = [['r1Q1Z4BcV1o', 'gTV8FGcVJC9', '1pXnuDYAj8r'],
# ['JF19kD82Mey', 'Pm6F8kyY3z2', '29hnd4uzFmX']] # small
# gpu6 = [['VLzqgDo317F', '1LXtFkjw3qL'],
# ['HxpKQynjfin', 'gZ6f7yhEvPG', 'GdvgFV5R1Z5']] # small
# gpu7 = [['D7G3Y4RVNrH', 'B6ByNegPMKs']]
#
# scene_splits = gpu0 + gpu1 + gpu2 + gpu3 + gpu4 + gpu5 + gpu6 + gpu7
for i in range(num_processes):
task_config = config.clone()
task_config.defrost()
task_config.DATASET.CONTENT_SCENES = scene_splits[i]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = config.SIMULATOR_GPU_IDS[
i % len(config.SIMULATOR_GPU_IDS)
]
task_config.freeze()
configs.append(task_config.clone())
return configs
def distribute(
data: List[str],
scene_splits: List[List],
num_gpus=8,
procs_per_gpu=4,
proc_offset=0,
scenes_per_process=1,
) -> None:
for idx, scene in enumerate(data):
i = (idx // num_gpus) % scenes_per_process
j = idx % num_gpus
scene_splits[j * procs_per_gpu + i + proc_offset].append(scene)
def get_habitat_config(path: str):
assert (
path[-4:].lower() == ".yml" or path[-5:].lower() == ".yaml"
), f"path ({path}) must be a .yml or .yaml file."
if not os.path.isabs(path):
candidate_paths = [
os.path.join(d, path)
for d in [os.getcwd(), HABITAT_BASE, HABITAT_CONFIGS_DIR]
]
success = False
for candidate_path in candidate_paths:
if os.path.exists(candidate_path):
success = True
path = candidate_path
break
if not success:
raise FileExistsError(
f"Could not find config file with given relative path {path}. Tried the following possible absolute"
f" paths {candidate_paths}."
)
elif not os.path.exists(path):
raise FileExistsError(f"Could not find config file with given path {path}.")
return habitat.get_config(path)
| allenact-main | allenact_plugins/habitat_plugin/habitat_utils.py |
import os
import cv2
import habitat
from pyquaternion import Quaternion
from allenact_plugins.habitat_plugin.habitat_constants import (
HABITAT_CONFIGS_DIR,
HABITAT_DATASETS_DIR,
HABITAT_SCENE_DATASETS_DIR,
)
from allenact_plugins.habitat_plugin.habitat_utils import get_habitat_config
FORWARD_KEY = "w"
LEFT_KEY = "a"
RIGHT_KEY = "d"
FINISH = "f"
def transform_rgb_bgr(image):
return image[:, :, [2, 1, 0]]
def agent_demo():
config = get_habitat_config(
os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav.yaml")
)
config.defrost()
config.DATASET.DATA_PATH = os.path.join(
HABITAT_DATASETS_DIR, "pointnav/gibson/v1/train/train.json.gz"
)
config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR
config.DATASET.CONTENT_SCENES = ["Adrian"]
config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = 0
config.freeze()
env = habitat.Env(config=config)
print("Environment creation successful")
observations = env.reset()
cv2.imshow("RGB", transform_rgb_bgr(observations["rgb"]))
print("Agent stepping around inside environment.")
count_steps = 0
action = None
while not env.episode_over:
keystroke = cv2.waitKey(0)
if keystroke == ord(FORWARD_KEY):
action = 1
print("action: FORWARD")
elif keystroke == ord(LEFT_KEY):
action = 2
print("action: LEFT")
elif keystroke == ord(RIGHT_KEY):
action = 3
print("action: RIGHT")
elif keystroke == ord(FINISH):
action = 0
print("action: FINISH")
else:
print("INVALID KEY")
continue
observations = env.step(action)
count_steps += 1
print("Position:", env.sim.get_agent_state().position)
print("Quaternions:", env.sim.get_agent_state().rotation)
quat = Quaternion(env.sim.get_agent_state().rotation.components)
print(quat.radians)
cv2.imshow("RGB", transform_rgb_bgr(observations["rgb"]))
print("Episode finished after {} steps.".format(count_steps))
if action == habitat.SimulatorActions.STOP and observations["pointgoal"][0] < 0.2:
print("you successfully navigated to destination point")
else:
print("your navigation was unsuccessful")
if __name__ == "__main__":
agent_demo()
| allenact-main | allenact_plugins/habitat_plugin/scripts/agent_demo.py |
allenact-main | allenact_plugins/habitat_plugin/scripts/__init__.py |
|
import os
import habitat
import numpy as np
from tqdm import tqdm
from allenact_plugins.habitat_plugin.habitat_constants import (
HABITAT_CONFIGS_DIR,
HABITAT_DATA_BASE,
HABITAT_SCENE_DATASETS_DIR,
HABITAT_DATASETS_DIR,
)
from allenact_plugins.habitat_plugin.habitat_utils import get_habitat_config
map_resolution = 0.05
map_size = 960
def make_map(env, scene):
vacancy_map = np.zeros([map_size, map_size], dtype=bool)
for i in tqdm(range(map_size)):
for j in range(map_size):
x = (i - map_size // 2) * map_resolution
z = (j - map_size // 2) * map_resolution
vacancy_map[j, i] = env.sim.is_navigable([x, 0.0, z])
np.save(
os.path.join(HABITAT_DATA_BASE, "map_data/pointnav/v1/gibson/data/" + scene),
vacancy_map,
)
def generate_maps():
config = get_habitat_config(
os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav.yaml")
)
config.defrost()
config.DATASET.DATA_PATH = os.path.join(
HABITAT_DATASETS_DIR, "pointnav/gibson/v1/train/train.json.gz"
)
config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR
config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = 0
config.freeze()
dataset = habitat.make_dataset(config.DATASET.TYPE)
scenes = dataset.get_scenes_to_load(config.DATASET)
for scene in scenes:
print("Making environment for:", scene)
config.defrost()
config.DATASET.CONTENT_SCENES = [scene]
config.freeze()
env = habitat.Env(config=config)
make_map(env, scene)
env.close()
if __name__ == "__main__":
generate_maps()
| allenact-main | allenact_plugins/habitat_plugin/scripts/make_map.py |
allenact-main | allenact_plugins/habitat_plugin/data/__init__.py |
|
from typing import Optional, Tuple, cast
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
Memory,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, DistributionType
class LinearAdvisorActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
ensure_same_init_aux_weights: bool = True,
):
super().__init__(action_space=action_space, observation_space=observation_space)
assert (
input_uuid in observation_space.spaces
), "LinearActorCritic expects only a single observational input."
self.input_uuid = input_uuid
box_space: gym.spaces.Box = observation_space[self.input_uuid]
assert isinstance(box_space, gym.spaces.Box), (
"LinearActorCritic requires that"
"observation space corresponding to the input key is a Box space."
)
assert len(box_space.shape) == 1
self.in_dim = box_space.shape[0]
self.num_actions = action_space.n
self.linear = nn.Linear(self.in_dim, 2 * self.num_actions + 1)
nn.init.orthogonal_(self.linear.weight)
if ensure_same_init_aux_weights:
# Ensure main actor / auxiliary actor start with the same weights
self.linear.weight.data[self.num_actions : -1, :] = self.linear.weight[
: self.num_actions, :
]
nn.init.constant_(self.linear.bias, 0)
# noinspection PyMethodMayBeStatic
def _recurrent_memory_specification(self):
return None
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
out = self.linear(cast(torch.Tensor, observations[self.input_uuid]))
main_logits = out[..., : self.num_actions]
aux_logits = out[..., self.num_actions : -1]
values = out[..., -1:]
# noinspection PyArgumentList
return (
ActorCriticOutput(
distributions=cast(
DistributionType, CategoricalDistr(logits=main_logits)
), # step x sampler x ...
values=cast(
torch.FloatTensor, values.view(values.shape[:2] + (-1,))
), # step x sampler x flattened
extras={"auxiliary_distributions": CategoricalDistr(logits=aux_logits)},
),
None,
)
| allenact-main | allenact_plugins/lighthouse_plugin/lighthouse_models.py |
import copy
import curses
import itertools
import time
from functools import lru_cache
from typing import Optional, Tuple, Any, List, Union, cast
import numpy as np
from gym.utils import seeding
from gym_minigrid import minigrid
EMPTY = 0
GOAL = 1
WRONG_CORNER = 2
WALL = 3
@lru_cache(1000)
def _get_world_corners(world_dim: int, world_radius: int):
if world_radius == 0:
return ((0,) * world_dim,)
def combination_to_vec(comb) -> Tuple[int, ...]:
vec = [world_radius] * world_dim
for k in comb:
vec[k] *= -1
return tuple(vec)
return tuple(
sorted(
combination_to_vec(comb)
for i in range(world_dim + 1)
for comb in itertools.combinations(list(range(world_dim)), i)
)
)
@lru_cache(1000)
def _base_world_tensor(world_dim: int, world_radius: int):
tensor = np.full((2 * world_radius + 1,) * world_dim, fill_value=EMPTY)
slices: List[Union[slice, int]] = [slice(0, 2 * world_radius + 1)] * world_dim
for i in range(world_dim):
tmp_slices = [*slices]
tmp_slices[i] = 0
tensor[tuple(tmp_slices)] = WALL
tmp_slices[i] = 2 * world_radius
tensor[tuple(tmp_slices)] = WALL
for corner in _get_world_corners(world_dim=world_dim, world_radius=world_radius):
tensor[tuple([loc + world_radius for loc in corner])] = WRONG_CORNER
return tensor
class LightHouseEnvironment(object):
EMPTY = 0
GOAL = 1
WRONG_CORNER = 2
WALL = 3
SPACE_LEVELS = [EMPTY, GOAL, WRONG_CORNER, WALL]
def __init__(self, world_dim: int, world_radius: int, **kwargs):
self.world_dim = world_dim
self.world_radius = world_radius
self.world_corners = np.array(
_get_world_corners(world_dim=world_dim, world_radius=world_radius),
dtype=int,
)
self.curses_screen: Optional[Any] = None
self.world_tensor: np.ndarray = copy.deepcopy(
_base_world_tensor(world_radius=world_radius, world_dim=world_dim)
)
self.current_position = np.zeros(world_dim, dtype=int)
self.closest_distance_to_corners = np.full(
2 ** world_dim, fill_value=world_radius, dtype=int
)
self.positions: List[Tuple[int, ...]] = [tuple(self.current_position)]
self.goal_position: Optional[np.ndarray] = None
self.last_action: Optional[int] = None
self.seed: Optional[int] = None
self.np_seeded_random_gen: Optional[np.random.RandomState] = None
self.set_seed(seed=int(kwargs.get("seed", np.random.randint(0, 2 ** 31 - 1))))
self.random_reset()
def set_seed(self, seed: int):
# More information about why `np_seeded_random_gen` is used rather than just `np.random.seed`
# can be found at gym/utils/seeding.py
# There's literature indicating that having linear correlations between seeds of multiple
# PRNG's can correlate the outputs
self.seed = seed
self.np_seeded_random_gen, _ = cast(
Tuple[np.random.RandomState, Any], seeding.np_random(self.seed)
)
def random_reset(self, goal_position: Optional[bool] = None):
self.last_action = None
self.world_tensor = copy.deepcopy(
_base_world_tensor(world_radius=self.world_radius, world_dim=self.world_dim)
)
if goal_position is None:
self.goal_position = self.world_corners[
self.np_seeded_random_gen.randint(low=0, high=len(self.world_corners))
]
self.world_tensor[
tuple(cast(np.ndarray, self.world_radius + self.goal_position))
] = GOAL
if self.curses_screen is not None:
curses.nocbreak()
self.curses_screen.keypad(False)
curses.echo()
curses.endwin()
self.curses_screen = None
self.current_position = np.zeros(self.world_dim, dtype=int)
self.closest_distance_to_corners = np.abs(
(self.world_corners - self.current_position.reshape(1, -1))
).max(1)
self.positions = [tuple(self.current_position)]
def step(self, action: int) -> bool:
assert 0 <= action < 2 * self.world_dim
self.last_action = action
delta = -1 if action >= self.world_dim else 1
ind = action % self.world_dim
old = self.current_position[ind]
new = min(max(delta + old, -self.world_radius), self.world_radius)
if new == old:
self.positions.append(self.positions[-1])
return False
else:
self.current_position[ind] = new
self.closest_distance_to_corners = np.minimum(
np.abs((self.world_corners - self.current_position.reshape(1, -1))).max(
1
),
self.closest_distance_to_corners,
)
self.positions.append(tuple(self.current_position))
return True
def render(self, mode="array", **kwargs):
if mode == "array":
arr = copy.deepcopy(self.world_tensor)
arr[tuple(self.world_radius + self.current_position)] = 9
return arr
elif mode == "curses":
if self.world_dim == 1:
space_list = ["_"] * (1 + 2 * self.world_radius)
goal_ind = self.goal_position[0] + self.world_radius
space_list[goal_ind] = "G"
space_list[2 * self.world_radius - goal_ind] = "W"
space_list[self.current_position[0] + self.world_radius] = "X"
to_print = " ".join(space_list)
if self.curses_screen is None:
self.curses_screen = curses.initscr()
self.curses_screen.addstr(0, 0, to_print)
if "extra_text" in kwargs:
self.curses_screen.addstr(1, 0, kwargs["extra_text"])
self.curses_screen.refresh()
elif self.world_dim == 2:
space_list = [
["_"] * (1 + 2 * self.world_radius)
for _ in range(1 + 2 * self.world_radius)
]
for row_ind in range(1 + 2 * self.world_radius):
for col_ind in range(1 + 2 * self.world_radius):
if self.world_tensor[row_ind][col_ind] == self.GOAL:
space_list[row_ind][col_ind] = "G"
if self.world_tensor[row_ind][col_ind] == self.WRONG_CORNER:
space_list[row_ind][col_ind] = "C"
if self.world_tensor[row_ind][col_ind] == self.WALL:
space_list[row_ind][col_ind] = "W"
if (
(row_ind, col_ind)
== self.world_radius + self.current_position
).all():
space_list[row_ind][col_ind] = "X"
if self.curses_screen is None:
self.curses_screen = curses.initscr()
for i, sl in enumerate(space_list):
self.curses_screen.addstr(i, 0, " ".join(sl))
self.curses_screen.addstr(len(space_list), 0, str(self.state()))
if "extra_text" in kwargs:
self.curses_screen.addstr(
len(space_list) + 1, 0, kwargs["extra_text"]
)
self.curses_screen.refresh()
else:
raise NotImplementedError("Cannot render worlds of > 2 dimensions.")
elif mode == "minigrid":
height = width = 2 * self.world_radius + 2
grid = minigrid.Grid(width, height)
# Generate the surrounding walls
grid.horz_wall(0, 0)
grid.horz_wall(0, height - 1)
grid.vert_wall(0, 0)
grid.vert_wall(width - 1, 0)
# Place fake agent at the center
agent_pos = np.array(self.positions[-1]) + 1 + self.world_radius
# grid.set(*agent_pos, None)
agent = minigrid.Goal()
agent.color = "red"
grid.set(agent_pos[0], agent_pos[1], agent)
agent.init_pos = tuple(agent_pos)
agent.cur_pos = tuple(agent_pos)
goal_pos = self.goal_position + self.world_radius
goal = minigrid.Goal()
grid.set(goal_pos[0], goal_pos[1], goal)
goal.init_pos = tuple(goal_pos)
goal.cur_pos = tuple(goal_pos)
highlight_mask = np.zeros((height, width), dtype=bool)
minx, maxx = max(1, agent_pos[0] - 5), min(height - 1, agent_pos[0] + 5)
miny, maxy = max(1, agent_pos[1] - 5), min(height - 1, agent_pos[1] + 5)
highlight_mask[minx : (maxx + 1), miny : (maxy + 1)] = True
img = grid.render(
minigrid.TILE_PIXELS, agent_pos, None, highlight_mask=highlight_mask
)
return img
else:
raise NotImplementedError("Unknown render mode {}.".format(mode))
time.sleep(0.0 if "sleep_time" not in kwargs else kwargs["sleep_time"])
def close(self):
if self.curses_screen is not None:
curses.nocbreak()
self.curses_screen.keypad(False)
curses.echo()
curses.endwin()
@staticmethod
def optimal_ave_ep_length(world_dim: int, world_radius: int, view_radius: int):
if world_dim == 1:
max_steps_wrong_dir = max(world_radius - view_radius, 0)
return max_steps_wrong_dir + world_radius
elif world_dim == 2:
tau = 2 * (world_radius - view_radius)
average_steps_needed = 0.25 * (4 * 2 * view_radius + 10 * tau)
return average_steps_needed
else:
raise NotImplementedError(
"`optimal_average_ep_length` is only implemented"
" for when the `world_dim` is 1 or 2 ({} given).".format(world_dim)
)
| allenact-main | allenact_plugins/lighthouse_plugin/lighthouse_environment.py |
import abc
import string
from typing import List, Dict, Any, Optional, Tuple, Union, Sequence, cast
import gym
import numpy as np
from gym.utils import seeding
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.experiment_utils import set_seed
from allenact.utils.system import get_logger
from allenact_plugins.lighthouse_plugin.lighthouse_environment import (
LightHouseEnvironment,
)
from allenact_plugins.lighthouse_plugin.lighthouse_sensors import get_corner_observation
DISCOUNT_FACTOR = 0.99
STEP_PENALTY = -0.01
FOUND_TARGET_REWARD = 1.0
class LightHouseTask(Task[LightHouseEnvironment], abc.ABC):
"""Defines an abstract embodied task in the light house gridworld.
# Attributes
env : The light house environment.
sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer.
task_info : Dictionary of (k, v) pairs defining task goals and other task information.
max_steps : The maximum number of steps an agent can take an in the task before it is considered failed.
observation_space: The observation space returned on each step from the sensors.
"""
def __init__(
self,
env: LightHouseEnvironment,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._last_action: Optional[int] = None
@property
def last_action(self) -> int:
return self._last_action
@last_action.setter
def last_action(self, value: int):
self._last_action = value
def step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
self.last_action = action
return super(LightHouseTask, self).step(action=action)
def render(self, mode: str = "array", *args, **kwargs) -> np.ndarray:
if mode == "array":
return self.env.render(mode, **kwargs)
elif mode in ["rgb", "rgb_array", "human"]:
arr = self.env.render("array", **kwargs)
colors = np.array(
[
(31, 119, 180),
(255, 127, 14),
(44, 160, 44),
(214, 39, 40),
(148, 103, 189),
(140, 86, 75),
(227, 119, 194),
(127, 127, 127),
(188, 189, 34),
(23, 190, 207),
],
dtype=np.uint8,
)
return colors[arr]
else:
raise NotImplementedError("Render mode '{}' is not supported.".format(mode))
class FindGoalLightHouseTask(LightHouseTask):
_CACHED_ACTION_NAMES: Dict[int, Tuple[str, ...]] = {}
def __init__(
self,
env: LightHouseEnvironment,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
):
super().__init__(env, sensors, task_info, max_steps, **kwargs)
self._found_target = False
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(2 * self.env.world_dim)
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
self.env.step(action)
reward = STEP_PENALTY
if np.all(self.env.current_position == self.env.goal_position):
self._found_target = True
reward += FOUND_TARGET_REWARD
elif self.num_steps_taken() == self.max_steps - 1:
reward = STEP_PENALTY / (1 - DISCOUNT_FACTOR)
return RLStepResult(
observation=self.get_observations(),
reward=reward,
done=self.is_done(),
info=None,
)
def reached_terminal_state(self) -> bool:
return self._found_target
@classmethod
def class_action_names(cls, world_dim: int = 2, **kwargs) -> Tuple[str, ...]:
assert 1 <= world_dim <= 26, "Too many dimensions."
if world_dim not in cls._CACHED_ACTION_NAMES:
action_names = [
"{}(+1)".format(string.ascii_lowercase[i] for i in range(world_dim))
]
action_names.extend(
"{}(-1)".format(string.ascii_lowercase[i] for i in range(world_dim))
)
cls._CACHED_ACTION_NAMES[world_dim] = tuple(action_names)
return cls._CACHED_ACTION_NAMES[world_dim]
def action_names(self) -> Tuple[str, ...]:
return self.class_action_names(world_dim=self.env.world_dim)
def close(self) -> None:
pass
def query_expert(
self,
expert_view_radius: int,
return_policy: bool = False,
deterministic: bool = False,
**kwargs,
) -> Tuple[Any, bool]:
view_tuple = get_corner_observation(
env=self.env, view_radius=expert_view_radius, view_corner_offsets=None,
)
goal = self.env.GOAL
wrong = self.env.WRONG_CORNER
if self.env.world_dim == 1:
left_view, right_view, hitting, last_action = view_tuple
left = 1
right = 0
expert_action: Optional[int] = None
policy: Optional[np.ndarray] = None
if left_view == goal:
expert_action = left
elif right_view == goal:
expert_action = right
elif hitting != 2 * self.env.world_dim:
expert_action = left if last_action == right else right
elif left_view == wrong:
expert_action = right
elif right_view == wrong:
expert_action = left
elif last_action == 2 * self.env.world_dim:
policy = np.array([0.5, 0.5])
else:
expert_action = last_action
if policy is None:
policy = np.array([expert_action == right, expert_action == left])
elif self.env.world_dim == 2:
tl, tr, bl, br, hitting, last_action = view_tuple
wall = self.env.WALL
d, r, u, l, none = 0, 1, 2, 3, 4
if tr == goal:
if hitting != r:
expert_action = r
else:
expert_action = u
elif br == goal:
if hitting != d:
expert_action = d
else:
expert_action = r
elif bl == goal:
if hitting != l:
expert_action = l
else:
expert_action = d
elif tl == goal:
if hitting != u:
expert_action = u
else:
expert_action = l
elif tr == wrong and not any(x == wrong for x in [br, bl, tl]):
expert_action = l
elif br == wrong and not any(x == wrong for x in [bl, tl, tr]):
expert_action = u
elif bl == wrong and not any(x == wrong for x in [tl, tr, br]):
expert_action = r
elif tl == wrong and not any(x == wrong for x in [tr, br, bl]):
expert_action = d
elif all(x == wrong for x in [tr, br]) and not any(
x == wrong for x in [bl, tl]
):
expert_action = l
elif all(x == wrong for x in [br, bl]) and not any(
x == wrong for x in [tl, tr]
):
expert_action = u
elif all(x == wrong for x in [bl, tl]) and not any(
x == wrong for x in [tr, br]
):
expert_action = r
elif all(x == wrong for x in [tl, tr]) and not any(
x == wrong for x in [br, bl]
):
expert_action = d
elif hitting != none and tr == br == bl == tl:
# Only possible if in 0 vis setting
if tr == self.env.WRONG_CORNER or last_action == hitting:
if last_action == r:
expert_action = u
elif last_action == u:
expert_action = l
elif last_action == l:
expert_action = d
elif last_action == d:
expert_action = r
else:
raise NotImplementedError()
else:
expert_action = last_action
elif last_action == r and tr == wall:
expert_action = u
elif last_action == u and tl == wall:
expert_action = l
elif last_action == l and bl == wall:
expert_action = d
elif last_action == d and br == wall:
expert_action = r
elif last_action == none:
expert_action = r
else:
expert_action = last_action
policy = np.array(
[
expert_action == d,
expert_action == r,
expert_action == u,
expert_action == l,
]
)
else:
raise NotImplementedError("Can only query expert for world dims of 1 or 2.")
if return_policy:
return policy, True
elif deterministic:
return int(np.argmax(policy)), True
else:
return (
int(np.argmax(np.random.multinomial(1, policy / (1.0 * policy.sum())))),
True,
)
class FindGoalLightHouseTaskSampler(TaskSampler):
def __init__(
self,
world_dim: int,
world_radius: int,
sensors: Union[SensorSuite, List[Sensor]],
max_steps: int,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
seed: Optional[int] = None,
**kwargs,
):
self.env = LightHouseEnvironment(world_dim=world_dim, world_radius=world_radius)
self._last_sampled_task: Optional[FindGoalLightHouseTask] = None
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_steps = max_steps
self.max_tasks = max_tasks
self.num_tasks_generated = 0
self.deterministic_sampling = deterministic_sampling
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
self.seed: int = int(
seed if seed is not None else np.random.randint(0, 2 ** 31 - 1)
)
self.np_seeded_random_gen: Optional[np.random.RandomState] = None
self.set_seed(self.seed)
@property
def world_dim(self):
return self.env.world_dim
@property
def world_radius(self):
return self.env.world_radius
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
n = 2 ** self.world_dim
return n if self.num_unique_seeds is None else min(n, self.num_unique_seeds)
@property
def last_sampled_task(self) -> Optional[Task]:
return self._last_sampled_task
def next_task(self, force_advance_scene: bool = False) -> Optional[Task]:
if self.length <= 0:
return None
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
seed = self.np_seeded_random_gen.choice(self.task_seeds_list)
else:
seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
self.num_tasks_generated += 1
self.env.set_seed(seed)
self.env.random_reset()
return FindGoalLightHouseTask(
env=self.env, sensors=self.sensors, task_info={}, max_steps=self.max_steps
)
def close(self) -> None:
pass
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.set_seed(seed=self.seed)
def set_seed(self, seed: int) -> None:
set_seed(seed)
self.np_seeded_random_gen, _ = seeding.np_random(seed)
self.seed = seed
| allenact-main | allenact_plugins/lighthouse_plugin/lighthouse_tasks.py |
allenact-main | allenact_plugins/lighthouse_plugin/__init__.py |
|
import itertools
from typing import Any, Dict, Optional, Tuple, Sequence
import gym
import numpy as np
import pandas as pd
import patsy
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task
from allenact_plugins.lighthouse_plugin.lighthouse_environment import (
LightHouseEnvironment,
)
def get_corner_observation(
env: LightHouseEnvironment,
view_radius: int,
view_corner_offsets: Optional[np.array],
):
if view_corner_offsets is None:
view_corner_offsets = view_radius * (2 * (env.world_corners > 0) - 1)
world_corners_offset = env.world_corners + env.world_radius
multidim_view_corner_indices = np.clip(
np.reshape(env.current_position, (1, -1))
+ view_corner_offsets
+ env.world_radius,
a_min=0,
a_max=2 * env.world_radius,
)
flat_view_corner_indices = np.ravel_multi_index(
np.transpose(multidim_view_corner_indices), env.world_tensor.shape
)
view_values = env.world_tensor.reshape(-1)[flat_view_corner_indices]
last_action = 2 * env.world_dim if env.last_action is None else env.last_action
on_border_bools = np.concatenate(
(
env.current_position == env.world_radius,
env.current_position == -env.world_radius,
),
axis=0,
)
if last_action == 2 * env.world_dim or on_border_bools[last_action]:
on_border_value = last_action
elif on_border_bools.any():
on_border_value = np.argwhere(on_border_bools).reshape(-1)[0]
else:
on_border_value = 2 * env.world_dim
seen_mask = np.array(env.closest_distance_to_corners <= view_radius, dtype=int)
seen_corner_values = (
env.world_tensor.reshape(-1)[
np.ravel_multi_index(
np.transpose(world_corners_offset), env.world_tensor.shape
)
]
* seen_mask
)
return np.concatenate(
(
seen_corner_values + view_values * (1 - seen_mask),
[on_border_value, last_action],
),
axis=0,
out=np.zeros((seen_corner_values.shape[0] + 2,), dtype=np.float32,),
)
class CornerSensor(Sensor[LightHouseEnvironment, Any]):
def __init__(
self,
view_radius: int,
world_dim: int,
uuid: str = "corner_fixed_radius",
**kwargs: Any
):
self.view_radius = view_radius
self.world_dim = world_dim
self.view_corner_offsets: Optional[np.ndarray] = None
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(
low=min(LightHouseEnvironment.SPACE_LEVELS),
high=max(LightHouseEnvironment.SPACE_LEVELS),
shape=(2 ** self.world_dim + 2,),
dtype=int,
)
def get_observation(
self,
env: LightHouseEnvironment,
task: Optional[Task],
*args: Any,
**kwargs: Any
) -> Any:
if self.view_corner_offsets is None:
self.view_corner_offsets = self.view_radius * (
2 * (env.world_corners > 0) - 1
)
return get_corner_observation(
env=env,
view_radius=self.view_radius,
view_corner_offsets=self.view_corner_offsets,
)
class FactorialDesignCornerSensor(Sensor[LightHouseEnvironment, Any]):
_DESIGN_MAT_CACHE: Dict[Tuple, Any] = {}
def __init__(
self,
view_radius: int,
world_dim: int,
degree: int,
uuid: str = "corner_fixed_radius_categorical",
**kwargs: Any
):
self.view_radius = view_radius
self.world_dim = world_dim
self.degree = degree
if self.world_dim > 2:
raise NotImplementedError(
"When using the `FactorialDesignCornerSensor`,"
"`world_dim` must be <= 2 due to memory constraints."
"In the current implementation, creating the design"
"matrix in the `world_dim == 3` case would require"
"instantiating a matrix of size ~ 3Mx3M (9 trillion entries)."
)
self.view_corner_offsets: Optional[np.ndarray] = None
# self.world_corners_offset: Optional[List[typing.Tuple[int, ...]]] = None
self.corner_sensor = CornerSensor(self.view_radius, self.world_dim)
self.variables_and_levels = self._get_variables_and_levels(
world_dim=self.world_dim
)
self._design_mat_formula = self._create_formula(
variables_and_levels=self._get_variables_and_levels(
world_dim=self.world_dim
),
degree=self.degree,
)
self.single_row_df = pd.DataFrame(
data=[[0] * len(self.variables_and_levels)],
columns=[x[0] for x in self.variables_and_levels],
)
self._view_tuple_to_design_array: Dict[Tuple[int, ...], np.ndarray] = {}
(
design_matrix,
tuple_to_ind,
) = self._create_full_design_matrix_and_tuple_to_ind_dict(
variables_and_levels=tuple(self.variables_and_levels), degree=self.degree
)
self.design_matrix = design_matrix
self.tuple_to_ind = tuple_to_ind
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(
low=min(LightHouseEnvironment.SPACE_LEVELS),
high=max(LightHouseEnvironment.SPACE_LEVELS),
shape=(
len(
self.view_tuple_to_design_array(
(0,) * len(self.variables_and_levels)
)
),
),
dtype=int,
)
def view_tuple_to_design_array(self, view_tuple: Tuple):
return np.array(
self.design_matrix[self.tuple_to_ind[view_tuple], :], dtype=np.float32
)
@classmethod
def output_dim(cls, world_dim: int):
return ((3 if world_dim == 1 else 4) ** (2 ** world_dim)) * (
2 * world_dim + 1
) ** 2
@classmethod
def _create_full_design_matrix_and_tuple_to_ind_dict(
cls, variables_and_levels: Sequence[Tuple[str, Sequence[int]]], degree: int
):
variables_and_levels = tuple((x, tuple(y)) for x, y in variables_and_levels)
key = (variables_and_levels, degree)
if key not in cls._DESIGN_MAT_CACHE:
all_tuples = [
tuple(x)
for x in itertools.product(
*[levels for _, levels in variables_and_levels]
)
]
tuple_to_ind = {}
for i, t in enumerate(all_tuples):
tuple_to_ind[t] = i
df = pd.DataFrame(
data=all_tuples,
columns=[var_name for var_name, _ in variables_and_levels],
)
cls._DESIGN_MAT_CACHE[key] = (
np.array(
1.0
* patsy.dmatrix(
cls._create_formula(
variables_and_levels=variables_and_levels, degree=degree
),
data=df,
),
dtype=bool,
),
tuple_to_ind,
)
return cls._DESIGN_MAT_CACHE[key]
@staticmethod
def _get_variables_and_levels(world_dim: int):
return (
[
("s{}".format(i), list(range(3 if world_dim == 1 else 4)))
for i in range(2 ** world_dim)
]
+ [("b{}".format(i), list(range(2 * world_dim + 1))) for i in range(1)]
+ [("a{}".format(i), list(range(2 * world_dim + 1))) for i in range(1)]
)
@classmethod
def _create_formula(
cls, variables_and_levels: Sequence[Tuple[str, Sequence[int]]], degree: int
):
def make_categorial(var_name, levels):
return "C({}, levels={})".format(var_name, levels)
if degree == -1:
return ":".join(
make_categorial(var_name, levels)
for var_name, levels in variables_and_levels
)
else:
return "({})**{}".format(
"+".join(
make_categorial(var_name, levels)
for var_name, levels in variables_and_levels
),
degree,
)
def get_observation(
self,
env: LightHouseEnvironment,
task: Optional[Task],
*args: Any,
**kwargs: Any
) -> Any:
kwargs["as_tuple"] = True
view_array = self.corner_sensor.get_observation(env, task, *args, **kwargs)
return self.view_tuple_to_design_array(tuple(view_array))
| allenact-main | allenact_plugins/lighthouse_plugin/lighthouse_sensors.py |
import numpy as np
from allenact.utils.experiment_utils import EarlyStoppingCriterion, ScalarMeanTracker
class StopIfNearOptimal(EarlyStoppingCriterion):
def __init__(self, optimal: float, deviation: float, min_memory_size: int = 100):
self.optimal = optimal
self.deviation = deviation
self.current_pos = 0
self.has_filled = False
self.memory: np.ndarray = np.zeros(min_memory_size)
def __call__(
self, stage_steps: int, total_steps: int, training_metrics: ScalarMeanTracker,
) -> bool:
sums = training_metrics.sums()
counts = training_metrics.counts()
k = "ep_length"
if k in sums:
count = counts[k]
ep_length_ave = sums[k] / count
n = self.memory.shape[0]
if count >= n:
if count > n:
# Increase memory size to fit all of the new values
self.memory = np.full(count, fill_value=ep_length_ave)
else:
# We have exactly as many values as the memory size,
# simply set the whole memory to be equal to the new
# average ep length.
self.memory[:] = ep_length_ave
self.current_pos = 0
self.has_filled = True
else:
self.memory[
self.current_pos : (self.current_pos + count)
] = ep_length_ave
if self.current_pos + count > n:
self.has_filled = True
self.current_pos = self.current_pos + count % n
self.memory[: self.current_pos] = ep_length_ave
if not self.has_filled:
return False
return self.memory.mean() < self.optimal + self.deviation
| allenact-main | allenact_plugins/lighthouse_plugin/lighthouse_util.py |
allenact-main | allenact_plugins/lighthouse_plugin/configs/__init__.py |
|
allenact-main | allenact_plugins/lighthouse_plugin/scripts/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.