python_code
stringlengths
0
187k
repo_name
stringlengths
8
46
file_path
stringlengths
6
135
allenact-main
projects/objectnav_baselines/experiments/robothor/clip/__init__.py
from typing import Sequence, Union import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor from allenact_plugins.ithor_plugin.ithor_sensors import ( GoalObjectTypeThorSensor, RGBSensorThor, ) from projects.objectnav_baselines.experiments.clip.mixins import ( ClipResNetPreprocessGRUActorCriticMixin, ) from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import ( ObjectNavRoboThorBaseConfig, ) from projects.objectnav_baselines.mixins import ObjectNavPPOMixin class ObjectNavRoboThorClipRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig): """An Object Navigation experiment configuration in RoboThor with RGB input.""" CLIP_MODEL_TYPE = "RN50" SENSORS = [ RGBSensorThor( height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE, width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", mean=ClipResNetPreprocessor.CLIP_RGB_MEANS, stdev=ClipResNetPreprocessor.CLIP_RGB_STDS, ), GoalObjectTypeThorSensor( object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES, ), ] def __init__(self, add_prev_actions: bool = False, **kwargs): super().__init__(**kwargs) self.preprocessing_and_model = ClipResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, clip_model_type=self.CLIP_MODEL_TYPE, screen_size=self.SCREEN_SIZE, goal_sensor_type=GoalObjectTypeThorSensor, ) self.add_prev_actions = add_prev_actions def training_pipeline(self, **kwargs) -> TrainingPipeline: return ObjectNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, add_prev_actions=self.add_prev_actions, **kwargs ) @classmethod def tag(cls): return "ObjectNav-RoboTHOR-RGB-ClipResNet50GRU-DDPPO"
allenact-main
projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50gru_ddppo.py
from typing import Sequence, Union import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor from allenact_plugins.ithor_plugin.ithor_sensors import ( GoalObjectTypeThorSensor, RGBSensorThor, ) from projects.objectnav_baselines.experiments.clip.mixins import ( ClipResNetPreprocessGRUActorCriticMixin, ) from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import ( ObjectNavRoboThorBaseConfig, ) from projects.objectnav_baselines.mixins import ObjectNavPPOMixin class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig): """An Object Navigation experiment configuration in RoboThor with RGB input.""" CLIP_MODEL_TYPE = "RN50x16" SENSORS = [ RGBSensorThor( height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE, width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", mean=ClipResNetPreprocessor.CLIP_RGB_MEANS, stdev=ClipResNetPreprocessor.CLIP_RGB_STDS, ), GoalObjectTypeThorSensor( object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES, ), ] def __init__(self, add_prev_actions: bool = False, **kwargs): super().__init__(**kwargs) self.preprocessing_and_model = ClipResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, clip_model_type=self.CLIP_MODEL_TYPE, screen_size=self.SCREEN_SIZE, goal_sensor_type=GoalObjectTypeThorSensor, ) self.add_prev_actions = add_prev_actions def training_pipeline(self, **kwargs) -> TrainingPipeline: return ObjectNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, add_prev_actions=self.add_prev_actions, **kwargs ) @classmethod def tag(cls): return "ObjectNav-RoboTHOR-RGB-ClipResNet50x16GRU-DDPPO"
allenact-main
projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50x16gru_ddppo.py
allenact-main
projects/objectnav_baselines/experiments/habitat/__init__.py
import glob import math import os import warnings from abc import ABC from typing import Dict, Any, List, Optional, Sequence, Union, Tuple import gym import numpy as np import torch from torch.distributions.utils import lazy_property # noinspection PyUnresolvedReferences import habitat from allenact.base_abstractions.experiment_config import MachineParams from allenact.base_abstractions.preprocessor import ( SensorPreprocessorGraph, Preprocessor, ) from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor from allenact.utils.experiment_utils import evenly_distribute_count_into_bins, Builder from allenact.utils.system import get_logger from allenact_plugins.habitat_plugin.habitat_constants import ( HABITAT_DATASETS_DIR, HABITAT_CONFIGS_DIR, HABITAT_SCENE_DATASETS_DIR, ) from allenact_plugins.habitat_plugin.habitat_task_samplers import ObjectNavTaskSampler from allenact_plugins.habitat_plugin.habitat_tasks import ObjectNavTask from allenact_plugins.habitat_plugin.habitat_utils import ( get_habitat_config, construct_env_configs, ) from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig def create_objectnav_config( config_yaml_path: str, mode: str, scenes_path: str, simulator_gpu_ids: Sequence[int], rotation_degrees: float, step_size: float, max_steps: int, num_processes: int, camera_width: int, camera_height: int, using_rgb: bool, using_depth: bool, training: bool, num_episode_sample: int, horizontal_fov: Optional[int] = None, ) -> habitat.Config: config = get_habitat_config(config_yaml_path) config.defrost() config.NUM_PROCESSES = num_processes config.SIMULATOR_GPU_IDS = simulator_gpu_ids config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR config.DATASET.DATA_PATH = scenes_path config.SIMULATOR.AGENT_0.SENSORS = [] if using_rgb: config.SIMULATOR.AGENT_0.SENSORS.append("RGB_SENSOR") if using_depth: config.SIMULATOR.AGENT_0.SENSORS.append("DEPTH_SENSOR") config.SIMULATOR.RGB_SENSOR.WIDTH = camera_width config.SIMULATOR.RGB_SENSOR.HEIGHT = camera_height config.SIMULATOR.DEPTH_SENSOR.WIDTH = camera_width config.SIMULATOR.DEPTH_SENSOR.HEIGHT = camera_height config.SIMULATOR.SEMANTIC_SENSOR.WIDTH = camera_width config.SIMULATOR.SEMANTIC_SENSOR.HEIGHT = camera_height if horizontal_fov is not None: config.SIMULATOR.RGB_SENSOR.HFOV = horizontal_fov config.SIMULATOR.DEPTH_SENSOR.HFOV = horizontal_fov config.SIMULATOR.SEMANTIC_SENSOR.HFOV = horizontal_fov assert rotation_degrees == config.SIMULATOR.TURN_ANGLE assert step_size == config.SIMULATOR.FORWARD_STEP_SIZE assert max_steps == config.ENVIRONMENT.MAX_EPISODE_STEPS config.SIMULATOR.MAX_EPISODE_STEPS = max_steps assert config.TASK.TYPE == "ObjectNav-v1" assert config.TASK.SUCCESS.SUCCESS_DISTANCE == 0.1 assert config.TASK.DISTANCE_TO_GOAL.DISTANCE_TO == "VIEW_POINTS" config.TASK.SENSORS = ["OBJECTGOAL_SENSOR", "COMPASS_SENSOR", "GPS_SENSOR"] config.TASK.GOAL_SENSOR_UUID = "objectgoal" config.TASK.MEASUREMENTS = ["DISTANCE_TO_GOAL", "SUCCESS", "SPL", "SOFT_SPL"] if not training: config.SEED = 0 config.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False if num_episode_sample > 0: config.ENVIRONMENT.ITERATOR_OPTIONS.NUM_EPISODE_SAMPLE = num_episode_sample config.MODE = mode config.freeze() return config class ObjectNavHabitatBaseConfig(ObjectNavBaseConfig, ABC): """The base config for all Habitat ObjectNav experiments.""" # selected auxiliary uuids ## if comment all the keys, then it's vanilla DD-PPO _AUXILIARY_UUIDS = [ # InverseDynamicsLoss.UUID, # TemporalDistanceLoss.UUID, # CPCA1Loss.UUID, # CPCA4Loss.UUID, # CPCA8Loss.UUID, # CPCA16Loss.UUID, ] MULTIPLE_BELIEFS = False BELIEF_FUSION = ( # choose one None # AttentiveFusion # AverageFusion # SoftmaxFusion ) FAILED_END_REWARD = -1.0 ACTION_SPACE = gym.spaces.Discrete(len(ObjectNavTask.class_action_names())) DEFAULT_NUM_TRAIN_PROCESSES = ( 5 * torch.cuda.device_count() if torch.cuda.is_available() else 1 ) DEFAULT_NUM_TEST_PROCESSES = 11 DEFAULT_TRAIN_GPU_IDS = tuple(range(torch.cuda.device_count())) DEFAULT_VALID_GPU_IDS = [torch.cuda.device_count() - 1] DEFAULT_TEST_GPU_IDS = tuple(range(torch.cuda.device_count())) def __init__( self, scene_dataset: str, # Should be "mp3d" or "hm3d" debug: bool = False, num_train_processes: Optional[int] = None, num_test_processes: Optional[int] = None, test_on_validation: bool = False, run_valid: bool = True, train_gpu_ids: Optional[Sequence[int]] = None, val_gpu_ids: Optional[Sequence[int]] = None, test_gpu_ids: Optional[Sequence[int]] = None, add_prev_actions: bool = False, look_constraints: Optional[Tuple[int, int]] = None, **kwargs, ): super().__init__(**kwargs) self.scene_dataset = scene_dataset self.debug = debug assert look_constraints is None or all( lc in [0, 1, 2, 3] for lc in look_constraints ), "Look constraints limit the number of times agents can look up/down when starting from the horizon line." assert ( look_constraints is None or look_constraints[1] > 0 ), "The agent must be allowed to look down from the horizon at least once." self.look_constraints = look_constraints def v_or_default(v, default): return v if v is not None else default self.num_train_processes = v_or_default( num_train_processes, self.DEFAULT_NUM_TRAIN_PROCESSES ) self.num_test_processes = v_or_default( num_test_processes, (10 if torch.cuda.is_available() else 1) ) self.test_on_validation = test_on_validation self.run_valid = run_valid self.train_gpu_ids = v_or_default(train_gpu_ids, self.DEFAULT_TRAIN_GPU_IDS) self.val_gpu_ids = v_or_default( val_gpu_ids, self.DEFAULT_VALID_GPU_IDS if run_valid else [] ) self.test_gpu_ids = v_or_default(test_gpu_ids, self.DEFAULT_TEST_GPU_IDS) self.add_prev_actions = add_prev_actions self.auxiliary_uuids = self._AUXILIARY_UUIDS def _create_config( self, mode: str, scenes_path: str, num_processes: int, simulator_gpu_ids: Sequence[int], training: bool = True, num_episode_sample: int = -1, ): return create_objectnav_config( config_yaml_path=self.BASE_CONFIG_YAML_PATH, mode=mode, scenes_path=scenes_path, simulator_gpu_ids=simulator_gpu_ids, rotation_degrees=self.ROTATION_DEGREES, step_size=self.STEP_SIZE, max_steps=self.MAX_STEPS, num_processes=num_processes, camera_width=self.CAMERA_WIDTH, camera_height=self.CAMERA_HEIGHT, horizontal_fov=self.HORIZONTAL_FIELD_OF_VIEW, using_rgb=any(isinstance(s, RGBSensor) for s in self.SENSORS), using_depth=any(isinstance(s, DepthSensor) for s in self.SENSORS), training=training, num_episode_sample=num_episode_sample, ) @lazy_property def DEFAULT_OBJECT_CATEGORIES_TO_IND(self): if self.scene_dataset == "mp3d": return { "chair": 0, "table": 1, "picture": 2, "cabinet": 3, "cushion": 4, "sofa": 5, "bed": 6, "chest_of_drawers": 7, "plant": 8, "sink": 9, "toilet": 10, "stool": 11, "towel": 12, "tv_monitor": 13, "shower": 14, "bathtub": 15, "counter": 16, "fireplace": 17, "gym_equipment": 18, "seating": 19, "clothes": 20, } elif self.scene_dataset == "hm3d": return { "chair": 0, "bed": 1, "plant": 2, "toilet": 3, "tv_monitor": 4, "sofa": 5, } else: raise NotImplementedError @lazy_property def TASK_DATA_DIR_TEMPLATE(self): return os.path.join( HABITAT_DATASETS_DIR, f"objectnav/{self.scene_dataset}/v1/{{}}/{{}}.json.gz" ) @lazy_property def BASE_CONFIG_YAML_PATH(self): return os.path.join( HABITAT_CONFIGS_DIR, f"tasks/objectnav_{self.scene_dataset}.yaml" ) @lazy_property def TRAIN_CONFIG(self): return self._create_config( mode="train", scenes_path=self.train_scenes_path(), num_processes=self.num_train_processes, simulator_gpu_ids=self.train_gpu_ids, training=True, ) @lazy_property def VALID_CONFIG(self): return self._create_config( mode="validate", scenes_path=self.valid_scenes_path(), num_processes=1, simulator_gpu_ids=self.val_gpu_ids, training=False, num_episode_sample=200, ) @lazy_property def TEST_CONFIG(self): return self._create_config( mode="validate", scenes_path=self.test_scenes_path(), num_processes=self.num_test_processes, simulator_gpu_ids=self.test_gpu_ids, training=False, ) @lazy_property def TRAIN_CONFIGS_PER_PROCESS(self): configs = construct_env_configs(self.TRAIN_CONFIG, allow_scene_repeat=True) if len(self.train_gpu_ids) >= 2: scenes_dir = configs[0].DATASET.SCENES_DIR memory_use_per_config = [] for config in configs: assert ( len(config.DATASET.CONTENT_SCENES) == 1 ), config.DATASET.CONTENT_SCENES scene_name = config.DATASET.CONTENT_SCENES[0] paths = glob.glob( os.path.join( scenes_dir, self.scene_dataset, "**", f"{scene_name}.*" ), recursive=True, ) if self.scene_dataset == "mp3d": assert len(paths) == 4 else: assert len(paths) == 2 memory_use_per_config.append(sum(os.path.getsize(p) for p in paths)) max_configs_per_device = math.ceil(len(configs) / len(self.train_gpu_ids)) mem_per_device = np.array([0.0 for _ in range(len(self.train_gpu_ids))]) configs_per_device = [[] for _ in range(len(mem_per_device))] for mem, config in sorted( list(zip(memory_use_per_config, configs)), key=lambda x: x[0] ): ind = int(np.argmin(mem_per_device)) config.defrost() config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = self.train_gpu_ids[ind] config.freeze() configs_per_device[ind].append(config) mem_per_device[ind] += mem if len(configs_per_device[ind]) >= max_configs_per_device: mem_per_device[ind] = float("inf") configs_per_device.sort(key=lambda x: len(x)) configs = sum(configs_per_device, []) if self.debug: warnings.warn( "IN DEBUG MODE, WILL ONLY USE `1LXtFkjw3qL` SCENE IN MP3D OR `1S7LAXRdDqK` scene in HM3D!!!" ) for config in configs: config.defrost() if self.scene_dataset == "mp3d": config.DATASET.CONTENT_SCENES = ["1LXtFkjw3qL"] elif self.scene_dataset == "hm3d": config.DATASET.CONTENT_SCENES = ["1S7LAXRdDqK"] else: raise NotImplementedError config.freeze() return configs @lazy_property def TEST_CONFIG_PER_PROCESS(self): return construct_env_configs(self.TEST_CONFIG, allow_scene_repeat=False) def train_scenes_path(self): return self.TASK_DATA_DIR_TEMPLATE.format(*(["train"] * 2)) def valid_scenes_path(self): return self.TASK_DATA_DIR_TEMPLATE.format(*(["val"] * 2)) def test_scenes_path(self): get_logger().warning("Running tests on the validation set!") return self.TASK_DATA_DIR_TEMPLATE.format(*(["val"] * 2)) # return self.TASK_DATA_DIR_TEMPLATE.format(*(["test"] * 2)) def tag(self): t = f"ObjectNav-Habitat-{self.scene_dataset.upper()}" if self.add_prev_actions: t = f"{t}-PrevActions" if self.look_constraints is not None: t = f"{t}-Look{','.join(map(str, self.look_constraints))}" return t def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return tuple() def machine_params(self, mode="train", **kwargs): has_gpus = torch.cuda.is_available() if not has_gpus: gpu_ids = [] nprocesses = 1 elif mode == "train": gpu_ids = self.train_gpu_ids nprocesses = self.num_train_processes elif mode == "valid": gpu_ids = self.val_gpu_ids nprocesses = 1 if self.run_valid else 0 elif mode == "test": gpu_ids = self.test_gpu_ids nprocesses = self.num_test_processes else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") if has_gpus: nprocesses = evenly_distribute_count_into_bins(nprocesses, len(gpu_ids)) sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces, preprocessors=self.preprocessors(), ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sensor_preprocessor_graph=sensor_preprocessor_graph, ) def make_sampler_fn(self, **kwargs) -> TaskSampler: return ObjectNavTaskSampler( task_kwargs={"look_constraints": self.look_constraints,}, **{"failed_end_reward": self.FAILED_END_REWARD, **kwargs}, # type: ignore ) def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: config = self.TRAIN_CONFIGS_PER_PROCESS[process_ind] return { "env_config": config, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": self.ACTION_SPACE, } def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: if total_processes != 1: raise NotImplementedError( "In validation, `total_processes` must equal 1 for habitat tasks" ) return { "env_config": self.VALID_CONFIG, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete( len(ObjectNavTask.class_action_names()) ), } def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: config = self.TEST_CONFIG_PER_PROCESS[process_ind] return { "env_config": config, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete( len(ObjectNavTask.class_action_names()) ), }
allenact-main
projects/objectnav_baselines/experiments/habitat/objectnav_habitat_base.py
allenact-main
projects/objectnav_baselines/experiments/habitat/clip/__init__.py
import torch import torch.optim as optim from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.utils.experiment_utils import ( Builder, TrainingPipeline, PipelineStage, TrainingSettings, ) from projects.objectnav_baselines.experiments.habitat.clip.objectnav_habitat_rgb_clipresnet50gru_ddppo import ( ObjectNavHabitatRGBClipResNet50GRUDDPPOExperimentConfig, ) from projects.objectnav_baselines.mixins import update_with_auxiliary_losses class ObjectNavHabitatRGBClipResNet50GRUDDPPOIncreasingLengthExpConfig( ObjectNavHabitatRGBClipResNet50GRUDDPPOExperimentConfig ): def __init__(self, lr=1e-4, **kwargs): super().__init__(lr, **kwargs) self.lr = lr def training_pipeline(self, **kwargs) -> TrainingPipeline: auxiliary_uuids = self.auxiliary_uuids multiple_beliefs = False normalize_advantage = False advance_scene_rollout_period = self.ADVANCE_SCENE_ROLLOUT_PERIOD log_interval_small = ( self.num_train_processes * 32 * 10 if torch.cuda.is_available() else 1 ) log_interval_med = ( self.num_train_processes * 64 * 5 if torch.cuda.is_available() else 1 ) log_interval_large = ( self.num_train_processes * 128 * 5 if torch.cuda.is_available() else 1 ) batch_steps_0 = int(10e6) batch_steps_1 = int(10e6) batch_steps_2 = int(1e9) - batch_steps_0 - batch_steps_1 lr = self.lr num_mini_batch = 1 update_repeats = 4 save_interval = 5000000 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 named_losses = { "ppo_loss": (PPO(**PPOConfig, normalize_advantage=normalize_advantage), 1.0) } named_losses = update_with_auxiliary_losses( named_losses=named_losses, auxiliary_uuids=auxiliary_uuids, multiple_beliefs=multiple_beliefs, ) return TrainingPipeline( save_interval=save_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, named_losses={key: val[0] for key, val in named_losses.items()}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=advance_scene_rollout_period, pipeline_stages=[ PipelineStage( loss_names=list(named_losses.keys()), max_stage_steps=batch_steps_0, training_settings=TrainingSettings( num_steps=32, metric_accumulate_interval=log_interval_small ), ), PipelineStage( loss_names=list(named_losses.keys()), max_stage_steps=batch_steps_1, training_settings=TrainingSettings( num_steps=64, metric_accumulate_interval=log_interval_med, ), ), PipelineStage( loss_names=list(named_losses.keys()), max_stage_steps=batch_steps_2, training_settings=TrainingSettings( num_steps=128, metric_accumulate_interval=log_interval_large, ), ), ], lr_scheduler_builder=None, ) def tag(self): return ( super( ObjectNavHabitatRGBClipResNet50GRUDDPPOIncreasingLengthExpConfig, self ) .tag() .replace("-DDPPO-lr", "-DDPPO-IncRollouts-lr") )
allenact-main
projects/objectnav_baselines/experiments/habitat/clip/objectnav_habitat_rgb_clipresnet50gru_ddppo_increasingrollouts.py
from typing import Sequence, Union import torch.nn as nn from torch.distributions.utils import lazy_property from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor from allenact_plugins.habitat_plugin.habitat_sensors import ( RGBSensorHabitat, TargetObjectSensorHabitat, ) from projects.objectnav_baselines.experiments.clip.mixins import ( ClipResNetPreprocessGRUActorCriticMixin, ) from projects.objectnav_baselines.experiments.habitat.objectnav_habitat_base import ( ObjectNavHabitatBaseConfig, ) from projects.objectnav_baselines.mixins import ObjectNavPPOMixin class ObjectNavHabitatRGBClipResNet50GRUDDPPOExperimentConfig( ObjectNavHabitatBaseConfig ): """An Object Navigation experiment configuration in Habitat.""" CLIP_MODEL_TYPE = "RN50" def __init__(self, lr: float, **kwargs): super().__init__(**kwargs) self.lr = lr self.preprocessing_and_model = ClipResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, clip_model_type=self.CLIP_MODEL_TYPE, screen_size=self.SCREEN_SIZE, goal_sensor_type=TargetObjectSensorHabitat, ) @lazy_property def SENSORS(self): return [ RGBSensorHabitat( height=ObjectNavHabitatBaseConfig.SCREEN_SIZE, width=ObjectNavHabitatBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, mean=ClipResNetPreprocessor.CLIP_RGB_MEANS, stdev=ClipResNetPreprocessor.CLIP_RGB_STDS, ), TargetObjectSensorHabitat(len(self.DEFAULT_OBJECT_CATEGORIES_TO_IND)), ] def training_pipeline(self, **kwargs) -> TrainingPipeline: return ObjectNavPPOMixin.training_pipeline( lr=self.lr, auxiliary_uuids=self.auxiliary_uuids, multiple_beliefs=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, add_prev_actions=self.add_prev_actions, auxiliary_uuids=self.auxiliary_uuids, **kwargs, ) def tag(self): return ( f"{super(ObjectNavHabitatRGBClipResNet50GRUDDPPOExperimentConfig, self).tag()}" f"-RGB-ClipResNet50GRU-DDPPO-lr{self.lr}" )
allenact-main
projects/objectnav_baselines/experiments/habitat/clip/objectnav_habitat_rgb_clipresnet50gru_ddppo.py
from typing import Sequence, Union, Type, Tuple, Optional, Dict, Any import attr import gym import numpy as np import torch import torch.nn as nn from allenact.base_abstractions.distributions import CategoricalDistr from allenact.base_abstractions.misc import ( ObservationType, Memory, ActorCriticOutput, DistributionType, ) from allenact.base_abstractions.preprocessor import Preprocessor from allenact.base_abstractions.sensor import Sensor from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor from allenact.utils.experiment_utils import Builder from allenact.utils.misc_utils import prepare_locals_for_super from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor from allenact_plugins.navigation_plugin.objectnav.models import ( ResnetTensorNavActorCritic, ) class LookDownFirstResnetTensorNavActorCritic(ResnetTensorNavActorCritic): def __init__(self, look_down_action_index: int, **kwargs): super().__init__(**kwargs) self.look_down_action_index = look_down_action_index self.register_buffer( "look_down_delta", torch.zeros(1, 1, self.action_space.n), persistent=False ) self.look_down_delta[0, 0, self.look_down_action_index] = 99999 def forward( # type:ignore self, observations: ObservationType, memory: Memory, prev_actions: torch.Tensor, masks: torch.FloatTensor, ) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]: ac_out, memory = super(LookDownFirstResnetTensorNavActorCritic, self).forward( **prepare_locals_for_super(locals()) ) logits = ac_out.distributions.logits * masks + self.look_down_delta * ( 1 - masks ) ac_out = ActorCriticOutput( distributions=CategoricalDistr(logits=logits), values=ac_out.values, extras=ac_out.extras, ) return ac_out, memory @attr.s(kw_only=True) class ClipResNetPreprocessGRUActorCriticMixin: sensors: Sequence[Sensor] = attr.ib() clip_model_type: str = attr.ib() screen_size: int = attr.ib() goal_sensor_type: Type[Optional[Sensor]] = attr.ib() pool: bool = attr.ib(default=False) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: preprocessors = [] rgb_sensor = next((s for s in self.sensors if isinstance(s, RGBSensor)), None) assert ( np.linalg.norm( np.array(rgb_sensor._norm_means) - np.array(ClipResNetPreprocessor.CLIP_RGB_MEANS) ) < 1e-5 ) assert ( np.linalg.norm( np.array(rgb_sensor._norm_sds) - np.array(ClipResNetPreprocessor.CLIP_RGB_STDS) ) < 1e-5 ) if rgb_sensor is not None: preprocessors.append( ClipResNetPreprocessor( rgb_input_uuid=rgb_sensor.uuid, clip_model_type=self.clip_model_type, pool=self.pool, output_uuid="rgb_clip_resnet", input_img_height_width=(rgb_sensor.height, rgb_sensor.width), ) ) depth_sensor = next( (s for s in self.sensors if isinstance(s, DepthSensor)), None ) if depth_sensor is not None: preprocessors.append( ClipResNetPreprocessor( rgb_input_uuid=depth_sensor.uuid, clip_model_type=self.clip_model_type, pool=self.pool, output_uuid="depth_clip_resnet", input_img_height_width=(depth_sensor.height, depth_sensor.width), ) ) return preprocessors def create_model( self, num_actions: int, add_prev_actions: bool, look_down_first: bool = False, look_down_action_index: Optional[int] = None, hidden_size: int = 512, rnn_type="GRU", model_kwargs: Optional[Dict[str, Any]] = None, **kwargs ) -> nn.Module: has_rgb = any(isinstance(s, RGBSensor) for s in self.sensors) has_depth = any(isinstance(s, DepthSensor) for s in self.sensors) goal_sensor_uuid = next( (s.uuid for s in self.sensors if isinstance(s, self.goal_sensor_type)), None, ) if model_kwargs is None: model_kwargs = {} model_kwargs = dict( action_space=gym.spaces.Discrete(num_actions), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, goal_sensor_uuid=goal_sensor_uuid, rgb_resnet_preprocessor_uuid="rgb_clip_resnet" if has_rgb else None, depth_resnet_preprocessor_uuid="depth_clip_resnet" if has_depth else None, hidden_size=hidden_size, goal_dims=32, add_prev_actions=add_prev_actions, rnn_type=rnn_type, **model_kwargs ) if not look_down_first: return ResnetTensorNavActorCritic(**model_kwargs) else: return LookDownFirstResnetTensorNavActorCritic( look_down_action_index=look_down_action_index, **model_kwargs )
allenact-main
projects/objectnav_baselines/experiments/clip/mixins.py
allenact-main
projects/objectnav_baselines/experiments/clip/__init__.py
from typing import Sequence, Union import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor from projects.objectnav_baselines.experiments.ithor.objectnav_ithor_base import ( ObjectNaviThorBaseConfig, ) from projects.objectnav_baselines.mixins import ( ResNetPreprocessGRUActorCriticMixin, ObjectNavPPOMixin, ) class ObjectNaviThorDepthPPOExperimentConfig(ObjectNaviThorBaseConfig): """An Object Navigation experiment configuration in iThor with Depth input.""" SENSORS = ( DepthSensorThor( height=ObjectNaviThorBaseConfig.SCREEN_SIZE, width=ObjectNaviThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), GoalObjectTypeThorSensor(object_types=ObjectNaviThorBaseConfig.TARGET_TYPES,), ) def __init__(self, **kwargs): super().__init__(**kwargs) self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, resnet_type="RN18", screen_size=self.SCREEN_SIZE, goal_sensor_type=GoalObjectTypeThorSensor, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return ObjectNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, **kwargs ) def tag(self): return "ObjectNav-iTHOR-Depth-ResNet18GRU-DDPPO"
allenact-main
projects/objectnav_baselines/experiments/ithor/objectnav_ithor_depth_resnet18gru_ddppo.py
allenact-main
projects/objectnav_baselines/experiments/ithor/__init__.py
from typing import Sequence, Union import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import ( RGBSensorThor, GoalObjectTypeThorSensor, ) from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor from projects.objectnav_baselines.experiments.ithor.objectnav_ithor_base import ( ObjectNaviThorBaseConfig, ) from projects.objectnav_baselines.mixins import ( ResNetPreprocessGRUActorCriticMixin, ObjectNavPPOMixin, ) class ObjectNaviThorRGBDPPOExperimentConfig(ObjectNaviThorBaseConfig): """An Object Navigation experiment configuration in iTHOR with RGBD input.""" SENSORS = [ RGBSensorThor( height=ObjectNaviThorBaseConfig.SCREEN_SIZE, width=ObjectNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), DepthSensorThor( height=ObjectNaviThorBaseConfig.SCREEN_SIZE, width=ObjectNaviThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), GoalObjectTypeThorSensor(object_types=ObjectNaviThorBaseConfig.TARGET_TYPES,), ] def __init__(self, **kwargs): super().__init__(**kwargs) self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, resnet_type="RN18", screen_size=self.SCREEN_SIZE, goal_sensor_type=GoalObjectTypeThorSensor, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return ObjectNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, **kwargs ) def tag(self): return "ObjectNav-iTHOR-RGBD-ResNet18GRU-DDPPO"
allenact-main
projects/objectnav_baselines/experiments/ithor/objectnav_ithor_rgbd_resnet18gru_ddppo.py
from typing import Sequence, Union import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import ( GoalObjectTypeThorSensor, RGBSensorThor, ) from projects.objectnav_baselines.experiments.ithor.objectnav_ithor_base import ( ObjectNaviThorBaseConfig, ) from projects.objectnav_baselines.mixins import ( ResNetPreprocessGRUActorCriticMixin, ObjectNavPPOMixin, ) class ObjectNaviThorRGBPPOExperimentConfig(ObjectNaviThorBaseConfig): """An Object Navigation experiment configuration in iThor with RGB input.""" SENSORS = [ RGBSensorThor( height=ObjectNaviThorBaseConfig.SCREEN_SIZE, width=ObjectNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), GoalObjectTypeThorSensor(object_types=ObjectNaviThorBaseConfig.TARGET_TYPES,), ] def __init__(self, **kwargs): super().__init__(**kwargs) self.preprocessing_and_model = ResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, resnet_type="RN18", screen_size=self.SCREEN_SIZE, goal_sensor_type=GoalObjectTypeThorSensor, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return ObjectNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, **kwargs ) @classmethod def tag(cls): return "ObjectNav-iTHOR-RGB-ResNet18GRU-DDPPO"
allenact-main
projects/objectnav_baselines/experiments/ithor/objectnav_ithor_rgb_resnet18gru_ddppo.py
import os from abc import ABC import torch from projects.objectnav_baselines.experiments.objectnav_thor_base import ( ObjectNavThorBaseConfig, ) class ObjectNaviThorBaseConfig(ObjectNavThorBaseConfig, ABC): """The base config for all iTHOR ObjectNav experiments.""" THOR_COMMIT_ID = "9549791ce2e7f472063a10abb1fb7664159fec23" AGENT_MODE = "default" DEFAULT_NUM_TRAIN_PROCESSES = 40 if torch.cuda.is_available() else 1 TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-objectnav/train") VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-objectnav/val") TARGET_TYPES = tuple( sorted( [ "AlarmClock", "Apple", "Book", "Bowl", "Box", "Candle", "GarbageCan", "HousePlant", "Laptop", "SoapBottle", "Television", "Toaster", ], ) )
allenact-main
projects/objectnav_baselines/experiments/ithor/objectnav_ithor_base.py
allenact-main
projects/babyai_baselines/__init__.py
allenact-main
projects/babyai_baselines/experiments/__init__.py
from abc import ABC from typing import Dict, Any, List, Optional, Union, Sequence, cast import gym import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO, A2C from allenact.algorithms.onpolicy_sync.losses.a2cacktr import A2CConfig from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams from allenact.base_abstractions.misc import Loss from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import ( Builder, LinearDecay, PipelineStage, TrainingPipeline, ) from allenact_plugins.babyai_plugin.babyai_models import BabyAIRecurrentACModel from allenact_plugins.babyai_plugin.babyai_tasks import BabyAITask, BabyAITaskSampler from allenact_plugins.minigrid_plugin.minigrid_sensors import ( EgocentricMiniGridSensor, MiniGridMissionSensor, ) class BaseBabyAIExperimentConfig(ExperimentConfig, ABC): """Base experimental config.""" LEVEL: Optional[str] = None TOTAL_RL_TRAIN_STEPS: Optional[int] = None AGENT_VIEW_SIZE: int = 7 ROLLOUT_STEPS: Optional[int] = None NUM_TRAIN_SAMPLERS: Optional[int] = None NUM_TEST_TASKS: Optional[int] = None INSTR_LEN: Optional[int] = None USE_INSTR: Optional[bool] = None GPU_ID: Optional[int] = None USE_EXPERT = False SHOULD_LOG = True PPO_NUM_MINI_BATCH = 2 ARCH: Optional[str] = None NUM_CKPTS_TO_SAVE = 50 TEST_SEED_OFFSET = 0 DEFAULT_LR = 1e-3 @classmethod def METRIC_ACCUMULATE_INTERVAL(cls): return cls.NUM_TRAIN_SAMPLERS * 1000 @classmethod def get_sensors(cls) -> Sequence[Sensor]: assert cls.USE_INSTR is not None return ( [ EgocentricMiniGridSensor( agent_view_size=cls.AGENT_VIEW_SIZE, view_channels=3 ), ] + ( [MiniGridMissionSensor(instr_len=cls.INSTR_LEN)] # type:ignore if cls.USE_INSTR else [] ) + ( [ ExpertActionSensor( # type: ignore nactions=len(BabyAITask.class_action_names()) ) ] if cls.USE_EXPERT else [] ) ) @classmethod def rl_loss_default(cls, alg: str, steps: Optional[int] = None): if alg == "ppo": assert steps is not None return { "loss": Builder( PPO, kwargs={"clip_decay": LinearDecay(steps)}, default=PPOConfig, ), "num_mini_batch": cls.PPO_NUM_MINI_BATCH, "update_repeats": 4, } elif alg == "a2c": return { "loss": A2C(**A2CConfig), "num_mini_batch": 1, "update_repeats": 1, } elif alg == "imitation": return { "loss": Imitation(), "num_mini_batch": cls.PPO_NUM_MINI_BATCH, "update_repeats": 4, } else: raise NotImplementedError @classmethod def _training_pipeline( cls, named_losses: Dict[str, Union[Loss, Builder]], pipeline_stages: List[PipelineStage], num_mini_batch: int, update_repeats: int, total_train_steps: int, lr: Optional[float] = None, ): lr = cls.DEFAULT_LR if lr is None else lr num_steps = cls.ROLLOUT_STEPS metric_accumulate_interval = ( cls.METRIC_ACCUMULATE_INTERVAL() ) # Log every 10 max length tasks save_interval = int(cls.TOTAL_RL_TRAIN_STEPS / cls.NUM_CKPTS_TO_SAVE) gamma = 0.99 use_gae = "reinforce_loss" not in named_losses gae_lambda = 0.99 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses=named_losses, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=None, should_log=cls.SHOULD_LOG, pipeline_stages=pipeline_stages, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=cls.TOTAL_RL_TRAIN_STEPS)} # type: ignore ), ) @classmethod def machine_params( cls, mode="train", gpu_id="default", n_train_processes="default", **kwargs ): if mode == "train": if n_train_processes == "default": nprocesses = cls.NUM_TRAIN_SAMPLERS else: nprocesses = n_train_processes elif mode == "valid": nprocesses = 0 elif mode == "test": nprocesses = min( 100 if torch.cuda.is_available() else 8, cls.NUM_TEST_TASKS ) else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") if gpu_id == "default": devices = [] if cls.GPU_ID is None else [cls.GPU_ID] else: devices = [gpu_id] return MachineParams(nprocesses=nprocesses, devices=devices) @classmethod def create_model(cls, **kwargs) -> nn.Module: sensors = cls.get_sensors() return BabyAIRecurrentACModel( action_space=gym.spaces.Discrete(len(BabyAITask.class_action_names())), observation_space=SensorSuite(sensors).observation_spaces, use_instr=cls.USE_INSTR, use_memory=True, arch=cls.ARCH, ) @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return BabyAITaskSampler(**kwargs) def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return { "env_builder": self.LEVEL, "sensors": self.get_sensors(), "seed": seeds[process_ind] if seeds is not None else None, } def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: raise RuntimeError def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: max_tasks = self.NUM_TEST_TASKS // total_processes + ( process_ind < (self.NUM_TEST_TASKS % total_processes) ) task_seeds_list = [ 2 ** 31 - 1 + self.TEST_SEED_OFFSET + process_ind + total_processes * i for i in range(max_tasks) ] # print(max_tasks, process_ind, total_processes, task_seeds_list) assert len(task_seeds_list) == 0 or ( min(task_seeds_list) >= 0 and max(task_seeds_list) <= 2 ** 32 - 1 ) train_sampler_args = self.train_task_sampler_args( process_ind=process_ind, total_processes=total_processes, devices=devices, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) return { **train_sampler_args, "task_seeds_list": task_seeds_list, "max_tasks": max_tasks, "deterministic_sampling": True, "sensors": [ s for s in train_sampler_args["sensors"] if "Expert" not in str(type(s)) ], }
allenact-main
projects/babyai_baselines/experiments/base.py
from allenact.utils.experiment_utils import PipelineStage from projects.babyai_baselines.experiments.go_to_local.base import ( BaseBabyAIGoToLocalExperimentConfig, ) class PPOBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig): """Behavior clone then PPO.""" USE_EXPERT = True @classmethod def tag(cls): return "BabyAIGoToLocalBC" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=-1) imitation_info = cls.rl_loss_default("imitation") return cls._training_pipeline( named_losses={"imitation_loss": imitation_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], max_stage_steps=total_train_steps, ), ], num_mini_batch=min( info["num_mini_batch"] for info in [ppo_info, imitation_info] ), update_repeats=min( info["update_repeats"] for info in [ppo_info, imitation_info] ), total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_local/bc.py
import os from typing import Optional from typing import Sequence import torch from allenact.algorithms.onpolicy_sync.storage import RolloutBlockStorage from allenact.utils.experiment_utils import ( PipelineStage, StageComponent, TrainingSettings, ) from allenact_plugins.babyai_plugin.babyai_constants import ( BABYAI_EXPERT_TRAJECTORIES_DIR, ) from allenact_plugins.minigrid_plugin.minigrid_offpolicy import ( MiniGridOffPolicyExpertCELoss, MiniGridExpertTrajectoryStorage, ) from projects.tutorials.minigrid_offpolicy_tutorial import ( BCOffPolicyBabyAIGoToLocalExperimentConfig, ) class DistributedBCOffPolicyBabyAIGoToLocalExperimentConfig( BCOffPolicyBabyAIGoToLocalExperimentConfig ): """Distributed Off policy imitation.""" @classmethod def tag(cls): return "DistributedBabyAIGoToLocalBCOffPolicy" @classmethod def machine_params( cls, mode="train", gpu_id="default", n_train_processes="default", **kwargs ): res = super().machine_params(mode, gpu_id, n_train_processes, **kwargs) if res["nprocesses"] > 0 and torch.cuda.is_available(): ngpu_to_use = min(torch.cuda.device_count(), 2) res["nprocesses"] = [res["nprocesses"] // ngpu_to_use] * ngpu_to_use res["gpu_ids"] = list(range(ngpu_to_use)) return res @classmethod def expert_ce_loss_kwargs_generator( cls, worker_id: int, rollouts_per_worker: Sequence[int], seed: Optional[int] ): return dict(num_workers=len(rollouts_per_worker), current_worker=worker_id) @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=-1) num_mini_batch = ppo_info["num_mini_batch"] update_repeats = ppo_info["update_repeats"] return cls._training_pipeline( named_losses={ "offpolicy_expert_ce_loss": MiniGridOffPolicyExpertCELoss( total_episodes_in_epoch=int(1e6) ), }, named_storages={ "onpolicy": RolloutBlockStorage(), "minigrid_offpolicy_expert": MiniGridExpertTrajectoryStorage( data_path=os.path.join( BABYAI_EXPERT_TRAJECTORIES_DIR, "BabyAI-GoToLocal-v0{}.pkl".format( "" if torch.cuda.is_available() else "-small" ), ), num_samplers=cls.NUM_TRAIN_SAMPLERS, rollout_len=cls.ROLLOUT_STEPS, instr_len=cls.INSTR_LEN, ), }, pipeline_stages=[ PipelineStage( loss_names=["offpolicy_expert_ce_loss"], max_stage_steps=total_train_steps, stage_components=[ StageComponent( uuid="offpolicy", storage_uuid="minigrid_offpolicy_expert", loss_names=["offpolicy_expert_ce_loss"], training_settings=TrainingSettings( update_repeats=num_mini_batch * update_repeats, num_mini_batch=1, ), ) ], ), ], num_mini_batch=0, update_repeats=0, total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_local/distributed_bc_offpolicy.py
from allenact.utils.experiment_utils import PipelineStage, LinearDecay from projects.babyai_baselines.experiments.go_to_local.base import ( BaseBabyAIGoToLocalExperimentConfig, ) class DaggerBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig): """Find goal in lighthouse env using imitation learning. Training with Dagger. """ USE_EXPERT = True @classmethod def tag(cls): return "BabyAIGoToLocalDagger" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS loss_info = cls.rl_loss_default("imitation") return cls._training_pipeline( named_losses={"imitation_loss": loss_info["loss"]}, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], teacher_forcing=LinearDecay( startp=1.0, endp=0.0, steps=total_train_steps // 2, ), max_stage_steps=total_train_steps, ) ], num_mini_batch=loss_info["num_mini_batch"], update_repeats=loss_info["update_repeats"], total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_local/dagger.py
allenact-main
projects/babyai_baselines/experiments/go_to_local/__init__.py
import torch from allenact.utils.experiment_utils import PipelineStage, LinearDecay from projects.babyai_baselines.experiments.go_to_local.base import ( BaseBabyAIGoToLocalExperimentConfig, ) class BCTeacherForcingBabyAIGoToLocalExperimentConfig( BaseBabyAIGoToLocalExperimentConfig ): """Behavior clone with teacher forcing.""" USE_EXPERT = True GPU_ID = 0 if torch.cuda.is_available() else None @classmethod def METRIC_ACCUMULATE_INTERVAL(cls): return 1 @classmethod def tag(cls): return "BabyAIGoToLocalBCTeacherForcing" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=-1) imitation_info = cls.rl_loss_default("imitation") return cls._training_pipeline( named_losses={"imitation_loss": imitation_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], teacher_forcing=LinearDecay( startp=1.0, endp=1.0, steps=total_train_steps, ), max_stage_steps=total_train_steps, ), ], num_mini_batch=min( info["num_mini_batch"] for info in [ppo_info, imitation_info] ), update_repeats=min( info["update_repeats"] for info in [ppo_info, imitation_info] ), total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_local/bc_teacher_forcing.py
import torch from .bc_teacher_forcing import BCTeacherForcingBabyAIGoToLocalExperimentConfig class DistributedBCTeacherForcingBabyAIGoToLocalExperimentConfig( BCTeacherForcingBabyAIGoToLocalExperimentConfig ): """Distributed behavior clone with teacher forcing.""" USE_EXPERT = True GPU_ID = 0 if torch.cuda.is_available() else None @classmethod def METRIC_ACCUMULATE_INTERVAL(cls): return 1 @classmethod def tag(cls): return "BabyAIGoToLocalBCTeacherForcingDistributed" @classmethod def machine_params( cls, mode="train", gpu_id="default", n_train_processes="default", **kwargs ): res = super().machine_params(mode, gpu_id, n_train_processes, **kwargs) if res["nprocesses"] > 0 and torch.cuda.is_available(): ngpu_to_use = min(torch.cuda.device_count(), 2) res["nprocesses"] = [res["nprocesses"] // ngpu_to_use] * ngpu_to_use res["gpu_ids"] = list(range(ngpu_to_use)) return res
allenact-main
projects/babyai_baselines/experiments/go_to_local/distributed_bc_teacher_forcing.py
import torch from allenact.utils.experiment_utils import PipelineStage from projects.babyai_baselines.experiments.go_to_local.base import ( BaseBabyAIGoToLocalExperimentConfig, ) class PPOBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig): """PPO only.""" NUM_TRAIN_SAMPLERS: int = ( 128 * 12 if torch.cuda.is_available() else BaseBabyAIGoToLocalExperimentConfig.NUM_TRAIN_SAMPLERS ) ROLLOUT_STEPS: int = 32 USE_LR_DECAY = False DEFAULT_LR = 1e-4 @classmethod def tag(cls): return "BabyAIGoToLocalPPO" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_RL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=total_train_steps) return cls._training_pipeline( named_losses={"ppo_loss": ppo_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["ppo_loss"], max_stage_steps=total_train_steps, ), ], num_mini_batch=ppo_info["num_mini_batch"], update_repeats=ppo_info["update_repeats"], total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_local/ppo.py
import torch from allenact.utils.experiment_utils import PipelineStage from projects.babyai_baselines.experiments.go_to_local.base import ( BaseBabyAIGoToLocalExperimentConfig, ) class A2CBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig): """A2C only.""" NUM_TRAIN_SAMPLERS: int = ( 128 * 6 if torch.cuda.is_available() else BaseBabyAIGoToLocalExperimentConfig.NUM_TRAIN_SAMPLERS ) ROLLOUT_STEPS: int = 16 USE_LR_DECAY = False DEFAULT_LR = 1e-4 @classmethod def tag(cls): return "BabyAIGoToLocalA2C" @classmethod def training_pipeline(cls, **kwargs): total_training_steps = cls.TOTAL_RL_TRAIN_STEPS a2c_info = cls.rl_loss_default("a2c", steps=total_training_steps) return cls._training_pipeline( named_losses={"a2c_loss": a2c_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["a2c_loss"], max_stage_steps=total_training_steps, ), ], num_mini_batch=a2c_info["num_mini_batch"], update_repeats=a2c_info["update_repeats"], total_train_steps=total_training_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_local/a2c.py
from abc import ABC from typing import Dict, List, Optional, Union, Any, cast import gym import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.storage import ExperienceStorage from allenact.base_abstractions.misc import Loss from allenact.base_abstractions.sensor import SensorSuite from allenact.utils.experiment_utils import ( Builder, LinearDecay, PipelineStage, TrainingPipeline, ) from allenact_plugins.babyai_plugin.babyai_models import BabyAIRecurrentACModel from allenact_plugins.babyai_plugin.babyai_tasks import BabyAITask from projects.babyai_baselines.experiments.base import BaseBabyAIExperimentConfig class BaseBabyAIGoToLocalExperimentConfig(BaseBabyAIExperimentConfig, ABC): """Base experimental config.""" LEVEL: Optional[str] = "BabyAI-GoToLocal-v0" TOTAL_RL_TRAIN_STEPS = int(15e6) TOTAL_IL_TRAIN_STEPS = int(7.5e6) ROLLOUT_STEPS: int = 128 NUM_TRAIN_SAMPLERS: int = 128 if torch.cuda.is_available() else 4 PPO_NUM_MINI_BATCH = 4 NUM_CKPTS_TO_SAVE = 20 NUM_TEST_TASKS: int = 1000 USE_LR_DECAY: bool = True # ARCH = "cnn1" # ARCH = "cnn2" ARCH = "expert_filmcnn" USE_INSTR = True INSTR_LEN: int = 5 INCLUDE_AUXILIARY_HEAD = False @classmethod def METRIC_ACCUMULATE_INTERVAL(cls): return cls.NUM_TRAIN_SAMPLERS * 64 @classmethod def _training_pipeline( # type:ignore cls, named_losses: Dict[str, Union[Loss, Builder]], pipeline_stages: List[PipelineStage], num_mini_batch: int, update_repeats: int, total_train_steps: int, lr: Optional[float] = None, named_storages: Optional[Dict[str, Union[ExperienceStorage, Builder]]] = None, ): lr = cls.DEFAULT_LR num_steps = cls.ROLLOUT_STEPS metric_accumulate_interval = ( cls.METRIC_ACCUMULATE_INTERVAL() ) # Log every 10 max length tasks save_interval = int(total_train_steps / cls.NUM_CKPTS_TO_SAVE) gamma = 0.99 use_gae = "reinforce_loss" not in named_losses gae_lambda = 0.99 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses=named_losses, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=None, should_log=cls.SHOULD_LOG, pipeline_stages=pipeline_stages, named_storages=named_storages, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore ) if cls.USE_LR_DECAY else None, ) @classmethod def create_model(cls, **kwargs) -> nn.Module: sensors = cls.get_sensors() return BabyAIRecurrentACModel( action_space=gym.spaces.Discrete(len(BabyAITask.class_action_names())), observation_space=SensorSuite(sensors).observation_spaces, use_instr=cls.USE_INSTR, use_memory=True, arch=cls.ARCH, instr_dim=256, lang_model="attgru", memory_dim=2048, include_auxiliary_head=cls.INCLUDE_AUXILIARY_HEAD, ) def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: raise RuntimeError("No validation processes for these tasks")
allenact-main
projects/babyai_baselines/experiments/go_to_local/base.py
from allenact.utils.experiment_utils import PipelineStage from projects.babyai_baselines.experiments.go_to_obj.base import ( BaseBabyAIGoToObjExperimentConfig, ) class PPOBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig): """Behavior clone then PPO.""" USE_EXPERT = True @classmethod def tag(cls): return "BabyAIGoToObjBC" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=-1) imitation_info = cls.rl_loss_default("imitation") return cls._training_pipeline( named_losses={"imitation_loss": imitation_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], max_stage_steps=total_train_steps, ), ], num_mini_batch=min( info["num_mini_batch"] for info in [ppo_info, imitation_info] ), update_repeats=min( info["update_repeats"] for info in [ppo_info, imitation_info] ), total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_obj/bc.py
from allenact.utils.experiment_utils import PipelineStage, LinearDecay from projects.babyai_baselines.experiments.go_to_obj.base import ( BaseBabyAIGoToObjExperimentConfig, ) class DaggerBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig): """Find goal in lighthouse env using imitation learning. Training with Dagger. """ USE_EXPERT = True @classmethod def tag(cls): return "BabyAIGoToObjDagger" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS loss_info = cls.rl_loss_default("imitation") return cls._training_pipeline( named_losses={"imitation_loss": loss_info["loss"]}, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], teacher_forcing=LinearDecay( startp=1.0, endp=0.0, steps=total_train_steps // 2, ), max_stage_steps=total_train_steps, ) ], num_mini_batch=loss_info["num_mini_batch"], update_repeats=loss_info["update_repeats"], total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_obj/dagger.py
allenact-main
projects/babyai_baselines/experiments/go_to_obj/__init__.py
from allenact.utils.experiment_utils import PipelineStage, LinearDecay from projects.babyai_baselines.experiments.go_to_obj.base import ( BaseBabyAIGoToObjExperimentConfig, ) class PPOBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig): """Behavior clone (with teacher forcing) then PPO.""" USE_EXPERT = True @classmethod def tag(cls): return "BabyAIGoToObjBCTeacherForcing" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=-1) imitation_info = cls.rl_loss_default("imitation") return cls._training_pipeline( named_losses={"imitation_loss": imitation_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], teacher_forcing=LinearDecay( startp=1.0, endp=1.0, steps=total_train_steps, ), max_stage_steps=total_train_steps, ), ], num_mini_batch=min( info["num_mini_batch"] for info in [ppo_info, imitation_info] ), update_repeats=min( info["update_repeats"] for info in [ppo_info, imitation_info] ), total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_obj/bc_teacher_forcing.py
from allenact.utils.experiment_utils import PipelineStage from projects.babyai_baselines.experiments.go_to_obj.base import ( BaseBabyAIGoToObjExperimentConfig, ) class PPOBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig): """PPO only.""" @classmethod def tag(cls): return "BabyAIGoToObjPPO" @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_RL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=total_train_steps) return cls._training_pipeline( named_losses={"ppo_loss": ppo_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["ppo_loss"], max_stage_steps=total_train_steps, ), ], num_mini_batch=ppo_info["num_mini_batch"], update_repeats=ppo_info["update_repeats"], total_train_steps=total_train_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_obj/ppo.py
from allenact.utils.experiment_utils import PipelineStage from projects.babyai_baselines.experiments.go_to_obj.base import ( BaseBabyAIGoToObjExperimentConfig, ) class A2CBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig): """A2C only.""" TOTAL_RL_TRAIN_STEPS = int(1e5) @classmethod def tag(cls): return "BabyAIGoToObjA2C" @classmethod def training_pipeline(cls, **kwargs): total_training_steps = cls.TOTAL_RL_TRAIN_STEPS a2c_info = cls.rl_loss_default("a2c", steps=total_training_steps) return cls._training_pipeline( named_losses={"a2c_loss": a2c_info["loss"],}, pipeline_stages=[ PipelineStage( loss_names=["a2c_loss"], max_stage_steps=total_training_steps, ), ], num_mini_batch=a2c_info["num_mini_batch"], update_repeats=a2c_info["update_repeats"], total_train_steps=total_training_steps, )
allenact-main
projects/babyai_baselines/experiments/go_to_obj/a2c.py
from abc import ABC from typing import Dict, List, Optional, Union, cast import gym import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.base_abstractions.misc import Loss from allenact.base_abstractions.sensor import SensorSuite from allenact.utils.experiment_utils import ( Builder, LinearDecay, PipelineStage, TrainingPipeline, ) from allenact_plugins.babyai_plugin.babyai_models import BabyAIRecurrentACModel from allenact_plugins.babyai_plugin.babyai_tasks import BabyAITask from projects.babyai_baselines.experiments.base import BaseBabyAIExperimentConfig class BaseBabyAIGoToObjExperimentConfig(BaseBabyAIExperimentConfig, ABC): """Base experimental config.""" LEVEL: Optional[str] = "BabyAI-GoToObj-v0" TOTAL_RL_TRAIN_STEPS = int(5e4) TOTAL_IL_TRAIN_STEPS = int(2e4) ROLLOUT_STEPS: int = 32 NUM_TRAIN_SAMPLERS: int = 16 PPO_NUM_MINI_BATCH = 2 NUM_TEST_TASKS: int = 50 USE_LR_DECAY: bool = False DEFAULT_LR = 1e-3 ARCH = "cnn1" # ARCH = "cnn2" # ARCH = "expert_filmcnn" USE_INSTR = False INSTR_LEN: int = -1 @classmethod def METRIC_ACCUMULATE_INTERVAL(cls): return cls.NUM_TRAIN_SAMPLERS * 128 @classmethod def _training_pipeline( # type:ignore cls, named_losses: Dict[str, Union[Loss, Builder]], pipeline_stages: List[PipelineStage], num_mini_batch: int, update_repeats: int, total_train_steps: int, lr: Optional[float] = None, **kwargs, ): lr = cls.DEFAULT_LR num_steps = cls.ROLLOUT_STEPS metric_accumulate_interval = ( cls.METRIC_ACCUMULATE_INTERVAL() ) # Log every 10 max length tasks save_interval = 2 ** 31 gamma = 0.99 use_gae = "reinforce_loss" not in named_losses gae_lambda = 0.99 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses=named_losses, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=None, should_log=cls.SHOULD_LOG, pipeline_stages=pipeline_stages, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore ) if cls.USE_LR_DECAY else None, **kwargs, ) @classmethod def create_model(cls, **kwargs) -> nn.Module: sensors = cls.get_sensors() return BabyAIRecurrentACModel( action_space=gym.spaces.Discrete(len(BabyAITask.class_action_names())), observation_space=SensorSuite(sensors).observation_spaces, use_instr=cls.USE_INSTR, use_memory=True, arch=cls.ARCH, instr_dim=8, lang_model="gru", memory_dim=128, )
allenact-main
projects/babyai_baselines/experiments/go_to_obj/base.py
from typing import Optional from typing import Sequence import attr import gym import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.sensor import Sensor from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor from allenact.utils.experiment_utils import ( Builder, TrainingPipeline, PipelineStage, LinearDecay, ) from projects.objectnav_baselines.mixins import update_with_auxiliary_losses # fmt: off try: # Habitat may not be installed, just create a fake class here in that case from allenact_plugins.habitat_plugin.habitat_sensors import TargetCoordinatesSensorHabitat except ImportError: class TargetCoordinatesSensorHabitat: #type:ignore pass # fmt: on from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask from allenact_plugins.navigation_plugin.pointnav.models import PointNavActorCritic @attr.s(kw_only=True) class PointNavUnfrozenResNetWithGRUActorCriticMixin: backbone: str = attr.ib() sensors: Sequence[Sensor] = attr.ib() auxiliary_uuids: Sequence[str] = attr.ib() add_prev_actions: bool = attr.ib() multiple_beliefs: bool = attr.ib() belief_fusion: Optional[str] = attr.ib() def create_model(self, **kwargs) -> nn.Module: rgb_uuid = next( (s.uuid for s in self.sensors if isinstance(s, RGBSensor)), None ) depth_uuid = next( (s.uuid for s in self.sensors if isinstance(s, DepthSensor)), None ) goal_sensor_uuid = next( ( s.uuid for s in self.sensors if isinstance( s, (GPSCompassSensorRoboThor, TargetCoordinatesSensorHabitat) ) ) ) return PointNavActorCritic( # Env and Tak action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, rgb_uuid=rgb_uuid, depth_uuid=depth_uuid, goal_sensor_uuid=goal_sensor_uuid, # RNN hidden_size=228 if self.multiple_beliefs and len(self.auxiliary_uuids) > 1 else 512, num_rnn_layers=1, rnn_type="GRU", add_prev_actions=self.add_prev_actions, action_embed_size=4, # CNN backbone=self.backbone, resnet_baseplanes=32, embed_coordinates=False, coordinate_dims=2, # Aux auxiliary_uuids=self.auxiliary_uuids, multiple_beliefs=self.multiple_beliefs, beliefs_fusion=self.belief_fusion, ) class PointNavPPOMixin: @staticmethod def training_pipeline( auxiliary_uuids: Sequence[str], multiple_beliefs: bool, normalize_advantage: bool, advance_scene_rollout_period: Optional[int] = None, ) -> TrainingPipeline: ppo_steps = int(75000000) lr = 3e-4 num_mini_batch = 1 update_repeats = 4 num_steps = 128 save_interval = 5000000 log_interval = 10000 if torch.cuda.is_available() else 1 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 named_losses = { "ppo_loss": (PPO(**PPOConfig, normalize_advantage=normalize_advantage), 1.0) } named_losses = update_with_auxiliary_losses( named_losses=named_losses, auxiliary_uuids=auxiliary_uuids, multiple_beliefs=multiple_beliefs, ) return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={key: val[0] for key, val in named_losses.items()}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=advance_scene_rollout_period, pipeline_stages=[ PipelineStage( loss_names=list(named_losses.keys()), max_stage_steps=ppo_steps, loss_weights=[val[1] for val in named_losses.values()], ) ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), )
allenact-main
projects/pointnav_baselines/mixins.py
allenact-main
projects/pointnav_baselines/__init__.py
from abc import ABC from typing import Optional, Sequence from allenact.base_abstractions.experiment_config import ExperimentConfig from allenact.base_abstractions.sensor import Sensor class PointNavBaseConfig(ExperimentConfig, ABC): """An Object Navigation experiment configuration in iThor.""" ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None SENSORS: Optional[Sequence[Sensor]] = None STEP_SIZE = 0.25 ROTATION_DEGREES = 30.0 DISTANCE_TO_GOAL = 0.2 STOCHASTIC = True CAMERA_WIDTH = 400 CAMERA_HEIGHT = 300 SCREEN_SIZE = 224 MAX_STEPS = 500 def __init__(self): self.REWARD_CONFIG = { "step_penalty": -0.01, "goal_success_reward": 10.0, "failed_stop_reward": 0.0, "reached_max_steps_reward": 0.0, "shaping_weight": 1.0, }
allenact-main
projects/pointnav_baselines/experiments/pointnav_base.py
allenact-main
projects/pointnav_baselines/experiments/__init__.py
import glob import os import platform from abc import ABC from math import ceil from typing import Dict, Any, List, Optional, Sequence import ai2thor import gym import numpy as np import torch from packaging import version from allenact.base_abstractions.experiment_config import MachineParams from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import evenly_distribute_count_into_bins from allenact.utils.system import get_logger from allenact_plugins.ithor_plugin.ithor_util import get_open_x_displays from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor from allenact_plugins.robothor_plugin.robothor_task_samplers import ( PointNavDatasetTaskSampler, ) from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask from projects.pointnav_baselines.experiments.pointnav_base import PointNavBaseConfig if ai2thor.__version__ not in ["0.0.1", None] and version.parse( ai2thor.__version__ ) < version.parse("2.7.2"): raise ImportError( "To run the PointNav baseline experiments you must use" " ai2thor version 2.7.1 or higher." ) class PointNavThorBaseConfig(PointNavBaseConfig, ABC): """The base config for all iTHOR PointNav experiments.""" NUM_PROCESSES: Optional[int] = None TRAIN_GPU_IDS = list(range(torch.cuda.device_count())) VALID_GPU_IDS = [torch.cuda.device_count() - 1] TEST_GPU_IDS = [torch.cuda.device_count() - 1] TRAIN_DATASET_DIR: Optional[str] = None VAL_DATASET_DIR: Optional[str] = None TARGET_TYPES: Optional[Sequence[str]] = None ACTION_SPACE = gym.spaces.Discrete(len(PointNavTask.class_action_names())) def __init__(self): super().__init__() self.ENV_ARGS = dict( width=self.CAMERA_WIDTH, height=self.CAMERA_HEIGHT, continuousMode=True, applyActionNoise=self.STOCHASTIC, rotateStepDegrees=self.ROTATION_DEGREES, gridSize=self.STEP_SIZE, snapToGrid=False, agentMode="bot", include_private_scenes=False, renderDepthImage=any(isinstance(s, DepthSensorThor) for s in self.SENSORS), ) def preprocessors(self): return tuple() def machine_params(self, mode="train", **kwargs): sampler_devices: Sequence[int] = [] if mode == "train": workers_per_device = 1 gpu_ids = ( [] if not torch.cuda.is_available() else self.TRAIN_GPU_IDS * workers_per_device ) nprocesses = ( 1 if not torch.cuda.is_available() else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids)) ) sampler_devices = self.TRAIN_GPU_IDS elif mode == "valid": nprocesses = 1 if torch.cuda.is_available() else 0 gpu_ids = [] if not torch.cuda.is_available() else self.VALID_GPU_IDS elif mode == "test": nprocesses = 10 gpu_ids = [] if not torch.cuda.is_available() else self.TEST_GPU_IDS else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces, preprocessors=self.preprocessors(), ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sampler_devices=sampler_devices if mode == "train" else gpu_ids, # ignored with > 1 gpu_ids sensor_preprocessor_graph=sensor_preprocessor_graph, ) @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return PointNavDatasetTaskSampler(**kwargs) @staticmethod def _partition_inds(n: int, num_parts: int): return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype( np.int32 ) def _get_sampler_args_for_scene_split( self, scenes_dir: str, process_ind: int, total_processes: int, devices: Optional[List[int]], seeds: Optional[List[int]], deterministic_cudnn: bool, include_expert_sensor: bool = True, ) -> Dict[str, Any]: path = os.path.join(scenes_dir, "*.json.gz") scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)] if len(scenes) == 0: raise RuntimeError( ( "Could find no scene dataset information in directory {}." " Are you sure you've downloaded them? " " If not, see https://allenact.org/installation/download-datasets/ information" " on how this can be done." ).format(scenes_dir) ) oversample_warning = ( f"Warning: oversampling some of the scenes ({scenes}) to feed all processes ({total_processes})." " You can avoid this by setting a number of workers divisible by the number of scenes" ) if total_processes > len(scenes): # oversample some scenes -> bias if total_processes % len(scenes) != 0: get_logger().warning(oversample_warning) scenes = scenes * int(ceil(total_processes / len(scenes))) scenes = scenes[: total_processes * (len(scenes) // total_processes)] elif len(scenes) % total_processes != 0: get_logger().warning(oversample_warning) inds = self._partition_inds(len(scenes), total_processes) x_display: Optional[str] = None if platform.system() == "Linux": x_displays = get_open_x_displays(throw_error_if_empty=True) if len([d for d in devices if d != torch.device("cpu")]) > len(x_displays): get_logger().warning( f"More GPU devices found than X-displays (devices: `{x_displays}`, x_displays: `{x_displays}`)." f" This is not necessarily a bad thing but may mean that you're not using GPU memory as" f" efficiently as possible. Consider following the instructions here:" f" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin" f" describing how to start an X-display on every GPU." ) x_display = x_displays[process_ind % len(x_displays)] return { "scenes": scenes[inds[process_ind] : inds[process_ind + 1]], "object_types": self.TARGET_TYPES, "max_steps": self.MAX_STEPS, "sensors": [ s for s in self.SENSORS if (include_expert_sensor or not isinstance(s, ExpertActionSensor)) ], "action_space": self.ACTION_SPACE, "seed": seeds[process_ind] if seeds is not None else None, "deterministic_cudnn": deterministic_cudnn, "rewards_config": self.REWARD_CONFIG, "env_args": {**self.ENV_ARGS, "x_display": x_display,}, } def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.TRAIN_DATASET_DIR, "episodes"), process_ind, total_processes, devices=devices, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.TRAIN_DATASET_DIR res["loop_dataset"] = True res["allow_flipping"] = True return res def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.VAL_DATASET_DIR, "episodes"), process_ind, total_processes, devices=devices, seeds=seeds, deterministic_cudnn=deterministic_cudnn, include_expert_sensor=False, ) res["scene_directory"] = self.VAL_DATASET_DIR res["loop_dataset"] = False return res def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self.valid_task_sampler_args( process_ind=process_ind, total_processes=total_processes, devices=devices, seeds=seeds, deterministic_cudnn=deterministic_cudnn, )
allenact-main
projects/pointnav_baselines/experiments/pointnav_thor_base.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import ( PointNavRoboThorBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNavRoboThorRGBPPOExperimentConfig(PointNavRoboThorBaseConfig,): """An Point Navigation experiment configuration in RoboThor with RGBD input.""" SENSORS = [ RGBSensorThor( height=PointNavRoboThorBaseConfig.SCREEN_SIZE, width=PointNavRoboThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), DepthSensorThor( height=PointNavRoboThorBaseConfig.SCREEN_SIZE, width=PointNavRoboThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), GPSCompassSensorRoboThor(), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-RoboTHOR-RGBD-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/robothor/pointnav_robothor_rgbd_simpleconvgru_ddppo.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.robothor_plugin.robothor_sensors import ( DepthSensorThor, GPSCompassSensorRoboThor, ) from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import ( PointNavRoboThorBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNavRoboThorRGBPPOExperimentConfig(PointNavRoboThorBaseConfig,): """An Point Navigation experiment configuration in RoboTHOR with Depth input.""" SENSORS = [ DepthSensorThor( height=PointNavRoboThorBaseConfig.SCREEN_SIZE, width=PointNavRoboThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), GPSCompassSensorRoboThor(), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-RoboTHOR-Depth-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/robothor/pointnav_robothor_depth_simpleconvgru_ddppo.py
allenact-main
projects/pointnav_baselines/experiments/robothor/__init__.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import ( PointNavRoboThorBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNavRoboThorRGBPPOExperimentConfig(PointNavRoboThorBaseConfig,): """An Point Navigation experiment configuration in RoboThor with RGB input.""" SENSORS = [ RGBSensorThor( height=PointNavRoboThorBaseConfig.SCREEN_SIZE, width=PointNavRoboThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), GPSCompassSensorRoboThor(), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-RoboTHOR-RGB-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/robothor/pointnav_robothor_rgb_simpleconvgru_ddppo.py
import os from abc import ABC from projects.pointnav_baselines.experiments.pointnav_thor_base import ( PointNavThorBaseConfig, ) class PointNavRoboThorBaseConfig(PointNavThorBaseConfig, ABC): """The base config for all iTHOR PointNav experiments.""" NUM_PROCESSES = 60 TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-pointnav/train") VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-pointnav/val")
allenact-main
projects/pointnav_baselines/experiments/robothor/pointnav_robothor_base.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.habitat_plugin.habitat_sensors import RGBSensorHabitat from allenact_plugins.habitat_plugin.habitat_sensors import ( TargetCoordinatesSensorHabitat, ) from projects.pointnav_baselines.experiments.habitat.pointnav_habitat_base import ( PointNavHabitatBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) class PointNavHabitatDepthDeterministiSimpleConvGRUDDPPOExperimentConfig( PointNavHabitatBaseConfig ): """An Point Navigation experiment configuration in Habitat with Depth input.""" SENSORS = [ RGBSensorHabitat( height=PointNavHabitatBaseConfig.SCREEN_SIZE, width=PointNavHabitatBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, ), TargetCoordinatesSensorHabitat(coordinate_dims=2), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) @classmethod def tag(cls): return "PointNav-Habitat-RGB-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/habitat/pointnav_habitat_rgb_simpleconvgru_ddppo.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.habitat_plugin.habitat_sensors import DepthSensorHabitat from allenact_plugins.habitat_plugin.habitat_sensors import RGBSensorHabitat from allenact_plugins.habitat_plugin.habitat_sensors import ( TargetCoordinatesSensorHabitat, ) from projects.pointnav_baselines.experiments.habitat.pointnav_habitat_base import ( PointNavHabitatBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) class PointNavHabitatDepthDeterministiSimpleConvGRUDDPPOExperimentConfig( PointNavHabitatBaseConfig ): """An Point Navigation experiment configuration in Habitat with RGBD input.""" SENSORS = [ RGBSensorHabitat( height=PointNavHabitatBaseConfig.SCREEN_SIZE, width=PointNavHabitatBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, ), DepthSensorHabitat( height=PointNavHabitatBaseConfig.SCREEN_SIZE, width=PointNavHabitatBaseConfig.SCREEN_SIZE, use_normalization=True, ), TargetCoordinatesSensorHabitat(coordinate_dims=2), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-Habitat-RGBD-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/habitat/pointnav_habitat_rgbd_simpleconvgru_ddppo.py
allenact-main
projects/pointnav_baselines/experiments/habitat/__init__.py
import os from abc import ABC from typing import Dict, Any, List, Optional, Sequence, Union import gym import torch # noinspection PyUnresolvedReferences import habitat from allenact.base_abstractions.experiment_config import MachineParams from allenact.base_abstractions.preprocessor import ( SensorPreprocessorGraph, Preprocessor, ) from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor from allenact.utils.experiment_utils import evenly_distribute_count_into_bins, Builder from allenact.utils.system import get_logger from allenact_plugins.habitat_plugin.habitat_constants import ( HABITAT_DATASETS_DIR, HABITAT_CONFIGS_DIR, HABITAT_SCENE_DATASETS_DIR, ) from allenact_plugins.habitat_plugin.habitat_task_samplers import PointNavTaskSampler from allenact_plugins.habitat_plugin.habitat_tasks import PointNavTask from allenact_plugins.habitat_plugin.habitat_utils import ( get_habitat_config, construct_env_configs, ) from projects.pointnav_baselines.experiments.pointnav_base import PointNavBaseConfig def create_pointnav_config( config_yaml_path: str, mode: str, scenes_path: str, simulator_gpu_ids: Sequence[int], distance_to_goal: float, rotation_degrees: float, step_size: float, max_steps: int, num_processes: int, camera_width: int, camera_height: int, using_rgb: bool, using_depth: bool, training: bool, num_episode_sample: int, ) -> habitat.Config: config = get_habitat_config(config_yaml_path) config.defrost() config.NUM_PROCESSES = num_processes config.SIMULATOR_GPU_IDS = simulator_gpu_ids config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR config.DATASET.DATA_PATH = scenes_path config.SIMULATOR.AGENT_0.SENSORS = [] if using_rgb: config.SIMULATOR.AGENT_0.SENSORS.append("RGB_SENSOR") if using_depth: config.SIMULATOR.AGENT_0.SENSORS.append("DEPTH_SENSOR") config.SIMULATOR.RGB_SENSOR.WIDTH = camera_width config.SIMULATOR.RGB_SENSOR.HEIGHT = camera_height config.SIMULATOR.DEPTH_SENSOR.WIDTH = camera_width config.SIMULATOR.DEPTH_SENSOR.HEIGHT = camera_height config.SIMULATOR.TURN_ANGLE = rotation_degrees config.SIMULATOR.FORWARD_STEP_SIZE = step_size config.ENVIRONMENT.MAX_EPISODE_STEPS = max_steps config.TASK.TYPE = "Nav-v0" config.TASK.SUCCESS_DISTANCE = distance_to_goal config.TASK.SENSORS = ["POINTGOAL_WITH_GPS_COMPASS_SENSOR"] config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "POLAR" config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 2 config.TASK.GOAL_SENSOR_UUID = "pointgoal_with_gps_compass" config.TASK.MEASUREMENTS = ["DISTANCE_TO_GOAL", "SUCCESS", "SPL"] config.TASK.SPL.TYPE = "SPL" config.TASK.SPL.SUCCESS_DISTANCE = distance_to_goal config.TASK.SUCCESS.SUCCESS_DISTANCE = distance_to_goal if not training: config.SEED = 0 config.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False if num_episode_sample > 0: config.ENVIRONMENT.ITERATOR_OPTIONS.NUM_EPISODE_SAMPLE = num_episode_sample config.MODE = mode config.freeze() return config class PointNavHabitatBaseConfig(PointNavBaseConfig, ABC): """The base config for all Habitat PointNav experiments.""" # selected auxiliary uuids ## if comment all the keys, then it's vanilla DD-PPO AUXILIARY_UUIDS = [ # InverseDynamicsLoss.UUID, # TemporalDistanceLoss.UUID, # CPCA1Loss.UUID, # CPCA4Loss.UUID, # CPCA8Loss.UUID, # CPCA16Loss.UUID, ] ADD_PREV_ACTIONS = False MULTIPLE_BELIEFS = False BELIEF_FUSION = ( # choose one None # AttentiveFusion # AverageFusion # SoftmaxFusion ) FAILED_END_REWARD = -1.0 TASK_DATA_DIR_TEMPLATE = os.path.join( HABITAT_DATASETS_DIR, "pointnav/gibson/v1/{}/{}.json.gz" ) BASE_CONFIG_YAML_PATH = os.path.join( HABITAT_CONFIGS_DIR, "tasks/pointnav_gibson.yaml" ) ACTION_SPACE = gym.spaces.Discrete(len(PointNavTask.class_action_names())) DEFAULT_NUM_TRAIN_PROCESSES = ( 5 * torch.cuda.device_count() if torch.cuda.is_available() else 1 ) DEFAULT_NUM_TEST_PROCESSES = 10 DEFAULT_TRAIN_GPU_IDS = tuple(range(torch.cuda.device_count())) DEFAULT_VALID_GPU_IDS = [torch.cuda.device_count() - 1] DEFAULT_TEST_GPU_IDS = [torch.cuda.device_count() - 1] def __init__( self, debug: bool = False, num_train_processes: Optional[int] = None, num_test_processes: Optional[int] = None, test_on_validation: bool = False, run_valid: bool = True, train_gpu_ids: Optional[Sequence[int]] = None, val_gpu_ids: Optional[Sequence[int]] = None, test_gpu_ids: Optional[Sequence[int]] = None, **kwargs, ): super().__init__(**kwargs) def v_or_default(v, default): return v if v is not None else default self.num_train_processes = v_or_default( num_train_processes, self.DEFAULT_NUM_TRAIN_PROCESSES ) self.num_test_processes = v_or_default( num_test_processes, (10 if torch.cuda.is_available() else 1) ) self.test_on_validation = test_on_validation self.run_valid = run_valid self.train_gpu_ids = v_or_default(train_gpu_ids, self.DEFAULT_TRAIN_GPU_IDS) self.val_gpu_ids = v_or_default( val_gpu_ids, self.DEFAULT_VALID_GPU_IDS if run_valid else [] ) self.test_gpu_ids = v_or_default(test_gpu_ids, self.DEFAULT_TEST_GPU_IDS) def create_config( mode: str, scenes_path: str, num_processes: int, simulator_gpu_ids: Sequence[int], training: bool = True, num_episode_sample: int = -1, ): return create_pointnav_config( config_yaml_path=self.BASE_CONFIG_YAML_PATH, mode=mode, scenes_path=scenes_path, simulator_gpu_ids=simulator_gpu_ids, distance_to_goal=self.DISTANCE_TO_GOAL, rotation_degrees=self.ROTATION_DEGREES, step_size=self.STEP_SIZE, max_steps=self.MAX_STEPS, num_processes=num_processes, camera_width=self.CAMERA_WIDTH, camera_height=self.CAMERA_HEIGHT, using_rgb=any(isinstance(s, RGBSensor) for s in self.SENSORS), using_depth=any(isinstance(s, DepthSensor) for s in self.SENSORS), training=training, num_episode_sample=num_episode_sample, ) self.TRAIN_CONFIG = create_config( mode="train", scenes_path=self.train_scenes_path(), num_processes=self.num_train_processes, simulator_gpu_ids=self.train_gpu_ids, training=True, ) self.VALID_CONFIG = create_config( mode="validate", scenes_path=self.valid_scenes_path(), num_processes=1, simulator_gpu_ids=self.val_gpu_ids, training=False, num_episode_sample=200, ) self.TEST_CONFIG = create_config( mode="validate", scenes_path=self.test_scenes_path(), num_processes=self.num_test_processes, simulator_gpu_ids=self.test_gpu_ids, training=False, ) self.TRAIN_CONFIGS_PER_PROCESS = construct_env_configs( self.TRAIN_CONFIG, allow_scene_repeat=True ) if debug: get_logger().warning("IN DEBUG MODE, WILL ONLY USE `Adrian` SCENE!!!") for config in self.TRAIN_CONFIGS_PER_PROCESS: config.defrost() config.DATASET.CONTENT_SCENES = ["Adrian"] config.freeze() self.TEST_CONFIG_PER_PROCESS = construct_env_configs( self.TEST_CONFIG, allow_scene_repeat=False ) def train_scenes_path(self): return self.TASK_DATA_DIR_TEMPLATE.format(*(["train"] * 2)) def valid_scenes_path(self): return self.TASK_DATA_DIR_TEMPLATE.format(*(["val"] * 2)) def test_scenes_path(self): get_logger().warning("Running tests on the validation set!") return self.TASK_DATA_DIR_TEMPLATE.format(*(["val"] * 2)) # return self.TASK_DATA_DIR_TEMPLATE.format(*(["test"] * 2)) @classmethod def tag(cls): return "PointNav" def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return tuple() def machine_params(self, mode="train", **kwargs): has_gpus = torch.cuda.is_available() if not has_gpus: gpu_ids = [] nprocesses = 1 elif mode == "train": gpu_ids = self.train_gpu_ids nprocesses = self.num_train_processes elif mode == "valid": gpu_ids = self.val_gpu_ids nprocesses = 1 if self.run_valid else 0 elif mode == "test": gpu_ids = self.test_gpu_ids nprocesses = self.num_test_processes else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") if has_gpus: nprocesses = evenly_distribute_count_into_bins(nprocesses, len(gpu_ids)) sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces, preprocessors=self.preprocessors(), ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sensor_preprocessor_graph=sensor_preprocessor_graph, ) @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return PointNavTaskSampler( **{"failed_end_reward": cls.FAILED_END_REWARD, **kwargs} # type: ignore ) def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: config = self.TRAIN_CONFIGS_PER_PROCESS[process_ind] return { "env_config": config, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": self.ACTION_SPACE, "distance_to_goal": self.DISTANCE_TO_GOAL, } def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: if total_processes != 1: raise NotImplementedError( "In validation, `total_processes` must equal 1 for habitat tasks" ) return { "env_config": self.VALID_CONFIG, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())), "distance_to_goal": self.DISTANCE_TO_GOAL, } def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: config = self.TEST_CONFIG_PER_PROCESS[process_ind] return { "env_config": config, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())), "distance_to_goal": self.DISTANCE_TO_GOAL, }
allenact-main
projects/pointnav_baselines/experiments/habitat/pointnav_habitat_base.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.habitat_plugin.habitat_sensors import ( DepthSensorHabitat, TargetCoordinatesSensorHabitat, ) from projects.pointnav_baselines.experiments.habitat.pointnav_habitat_base import ( PointNavHabitatBaseConfig, ) from projects.pointnav_baselines.mixins import ( PointNavPPOMixin, PointNavUnfrozenResNetWithGRUActorCriticMixin, ) class PointNavHabitatDepthDeterministiSimpleConvGRUDDPPOExperimentConfig( PointNavHabitatBaseConfig, ): """An Point Navigation experiment configuration in Habitat with Depth input.""" SENSORS = [ DepthSensorHabitat( height=PointNavHabitatBaseConfig.SCREEN_SIZE, width=PointNavHabitatBaseConfig.SCREEN_SIZE, use_normalization=True, ), TargetCoordinatesSensorHabitat(coordinate_dims=2), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-Habitat-Depth-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/habitat/pointnav_habitat_depth_simpleconvgru_ddppo.py
allenact-main
projects/pointnav_baselines/experiments/habitat/clip/__init__.py
from typing import Sequence, Union import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.utils.experiment_utils import Builder, TrainingPipeline from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor from allenact_plugins.habitat_plugin.habitat_sensors import ( RGBSensorHabitat, TargetCoordinatesSensorHabitat, ) from projects.objectnav_baselines.experiments.clip.mixins import ( ClipResNetPreprocessGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.habitat.pointnav_habitat_base import ( PointNavHabitatBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNavHabitatRGBClipResNet50GRUDDPPOExperimentConfig(PointNavHabitatBaseConfig): """An Point Navigation experiment configuration in Habitat with Depth input.""" CLIP_MODEL_TYPE = "RN50" SENSORS = [ RGBSensorHabitat( height=PointNavHabitatBaseConfig.SCREEN_SIZE, width=PointNavHabitatBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, mean=ClipResNetPreprocessor.CLIP_RGB_MEANS, stdev=ClipResNetPreprocessor.CLIP_RGB_STDS, ), TargetCoordinatesSensorHabitat(coordinate_dims=2), ] def __init__(self, add_prev_actions: bool = False, **kwargs): super().__init__(**kwargs) self.preprocessing_and_model = ClipResNetPreprocessGRUActorCriticMixin( sensors=self.SENSORS, clip_model_type=self.CLIP_MODEL_TYPE, screen_size=self.SCREEN_SIZE, goal_sensor_type=TargetCoordinatesSensorHabitat, ) self.add_prev_actions = add_prev_actions def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=False, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def preprocessors(self) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return self.preprocessing_and_model.preprocessors() def create_model(self, **kwargs) -> nn.Module: return self.preprocessing_and_model.create_model( num_actions=self.ACTION_SPACE.n, add_prev_actions=self.add_prev_actions, **kwargs, ) @classmethod def tag(cls): return "PointNav-Habitat-RGB-ClipResNet50GRU-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/habitat/clip/pointnav_habitat_rgb_clipresnet50gru_ddppo.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.ithor.pointnav_ithor_base import ( PointNaviThorBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNaviThorRGBDPPOExperimentConfig(PointNaviThorBaseConfig): """An Point Navigation experiment configuration in iThor with RGBD input.""" SENSORS = [ RGBSensorThor( height=PointNaviThorBaseConfig.SCREEN_SIZE, width=PointNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), DepthSensorThor( height=PointNaviThorBaseConfig.SCREEN_SIZE, width=PointNaviThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), GPSCompassSensorRoboThor(), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-iTHOR-RGBD-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_rgbd_simpleconvgru_ddppo.py
allenact-main
projects/pointnav_baselines/experiments/ithor/__init__.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.ithor.pointnav_ithor_base import ( PointNaviThorBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNaviThorRGBPPOExperimentConfig(PointNaviThorBaseConfig): """An Point Navigation experiment configuration in iThor with RGB input.""" SENSORS = [ RGBSensorThor( height=PointNaviThorBaseConfig.SCREEN_SIZE, width=PointNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), GPSCompassSensorRoboThor(), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-iTHOR-RGB-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_rgb_simpleconvgru_ddppo.py
import os from abc import ABC from projects.pointnav_baselines.experiments.pointnav_thor_base import ( PointNavThorBaseConfig, ) class PointNaviThorBaseConfig(PointNavThorBaseConfig, ABC): """The base config for all iTHOR PointNav experiments.""" NUM_PROCESSES = 40 TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-pointnav/train") VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-pointnav/val")
allenact-main
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_base.py
from allenact.utils.experiment_utils import TrainingPipeline from allenact_plugins.robothor_plugin.robothor_sensors import ( DepthSensorThor, GPSCompassSensorRoboThor, ) from projects.pointnav_baselines.mixins import ( PointNavUnfrozenResNetWithGRUActorCriticMixin, ) from projects.pointnav_baselines.experiments.ithor.pointnav_ithor_base import ( PointNaviThorBaseConfig, ) from projects.pointnav_baselines.mixins import PointNavPPOMixin class PointNaviThorDepthPPOExperimentConfig(PointNaviThorBaseConfig): """An Point Navigation experiment configuration in iThor with Depth input.""" SENSORS = [ DepthSensorThor( height=PointNaviThorBaseConfig.SCREEN_SIZE, width=PointNaviThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), GPSCompassSensorRoboThor(), ] def __init__(self): super().__init__() self.model_creation_handler = PointNavUnfrozenResNetWithGRUActorCriticMixin( backbone="simple_cnn", sensors=self.SENSORS, auxiliary_uuids=[], add_prev_actions=True, multiple_beliefs=False, belief_fusion=None, ) def training_pipeline(self, **kwargs) -> TrainingPipeline: return PointNavPPOMixin.training_pipeline( auxiliary_uuids=[], multiple_beliefs=False, normalize_advantage=True, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, ) def create_model(self, **kwargs): return self.model_creation_handler.create_model(**kwargs) def tag(self): return "PointNav-iTHOR-Depth-SimpleConv-DDPPO"
allenact-main
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_depth_simpleconvgru_ddppo.py
import os from typing import Dict, Any, List, Optional, Sequence import gym import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from torchvision import models from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, evenly_distribute_count_into_bins, ) from allenact_plugins.habitat_plugin.habitat_constants import ( HABITAT_DATASETS_DIR, HABITAT_CONFIGS_DIR, ) from allenact_plugins.habitat_plugin.habitat_sensors import ( RGBSensorHabitat, TargetCoordinatesSensorHabitat, ) from allenact_plugins.habitat_plugin.habitat_task_samplers import PointNavTaskSampler from allenact_plugins.habitat_plugin.habitat_utils import ( construct_env_configs, get_habitat_config, ) from allenact_plugins.navigation_plugin.objectnav.models import ( ResnetTensorNavActorCritic, ) from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask class PointNavHabitatRGBPPOTutorialExperimentConfig(ExperimentConfig): """A Point Navigation experiment configuration in Habitat.""" # Task Parameters MAX_STEPS = 500 REWARD_CONFIG = { "step_penalty": -0.01, "goal_success_reward": 10.0, "failed_stop_reward": 0.0, "shaping_weight": 1.0, } DISTANCE_TO_GOAL = 0.2 # Simulator Parameters CAMERA_WIDTH = 640 CAMERA_HEIGHT = 480 SCREEN_SIZE = 224 # Training Engine Parameters ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None NUM_PROCESSES = max(5 * torch.cuda.device_count() - 1, 4) TRAINING_GPUS = list(range(torch.cuda.device_count())) VALIDATION_GPUS = [torch.cuda.device_count() - 1] TESTING_GPUS = [torch.cuda.device_count() - 1] task_data_dir_template = os.path.join( HABITAT_DATASETS_DIR, "pointnav/gibson/v1/{}/{}.json.gz" ) TRAIN_SCENES = task_data_dir_template.format(*(["train"] * 2)) VALID_SCENES = task_data_dir_template.format(*(["val"] * 2)) TEST_SCENES = task_data_dir_template.format(*(["test"] * 2)) CONFIG = get_habitat_config( os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav_gibson.yaml") ) CONFIG.defrost() CONFIG.NUM_PROCESSES = NUM_PROCESSES CONFIG.SIMULATOR_GPU_IDS = TRAINING_GPUS CONFIG.DATASET.SCENES_DIR = "habitat/habitat-api/data/scene_datasets/" CONFIG.DATASET.POINTNAVV1.CONTENT_SCENES = ["*"] CONFIG.DATASET.DATA_PATH = TRAIN_SCENES CONFIG.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR"] CONFIG.SIMULATOR.RGB_SENSOR.WIDTH = CAMERA_WIDTH CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT = CAMERA_HEIGHT CONFIG.SIMULATOR.TURN_ANGLE = 30 CONFIG.SIMULATOR.FORWARD_STEP_SIZE = 0.25 CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = MAX_STEPS CONFIG.TASK.TYPE = "Nav-v0" CONFIG.TASK.SUCCESS_DISTANCE = DISTANCE_TO_GOAL CONFIG.TASK.SENSORS = ["POINTGOAL_WITH_GPS_COMPASS_SENSOR"] CONFIG.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "POLAR" CONFIG.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 2 CONFIG.TASK.GOAL_SENSOR_UUID = "pointgoal_with_gps_compass" CONFIG.TASK.MEASUREMENTS = ["DISTANCE_TO_GOAL", "SUCCESS", "SPL"] CONFIG.TASK.SPL.TYPE = "SPL" CONFIG.TASK.SPL.SUCCESS_DISTANCE = DISTANCE_TO_GOAL CONFIG.TASK.SUCCESS.SUCCESS_DISTANCE = DISTANCE_TO_GOAL CONFIG.MODE = "train" SENSORS = [ RGBSensorHabitat( height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, ), TargetCoordinatesSensorHabitat(coordinate_dims=2), ] PREPROCESSORS = [ Builder( ResNetPreprocessor, { "input_height": SCREEN_SIZE, "input_width": SCREEN_SIZE, "output_width": 7, "output_height": 7, "output_dims": 512, "pool": False, "torchvision_resnet_model": models.resnet18, "input_uuids": ["rgb_lowres"], "output_uuid": "rgb_resnet", }, ), ] OBSERVATIONS = [ "rgb_resnet", "target_coordinates_ind", ] TRAIN_CONFIGS = construct_env_configs(CONFIG) @classmethod def tag(cls): return "PointNavHabitatRGBPPO" @classmethod def training_pipeline(cls, **kwargs): ppo_steps = int(250000000) lr = 3e-4 num_mini_batch = 1 update_repeats = 3 num_steps = 30 save_interval = 5000000 log_interval = 10000 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={"ppo_loss": PPO(**PPOConfig)}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps) ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), ) def machine_params(self, mode="train", **kwargs): if mode == "train": workers_per_device = 1 gpu_ids = ( [] if not torch.cuda.is_available() else self.TRAINING_GPUS * workers_per_device ) nprocesses = ( 1 if not torch.cuda.is_available() else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids)) ) elif mode == "valid": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.VALIDATION_GPUS elif mode == "test": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.TESTING_GPUS else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces, preprocessors=self.PREPROCESSORS, ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sensor_preprocessor_graph=sensor_preprocessor_graph, ) # Define Model @classmethod def create_model(cls, **kwargs) -> nn.Module: return ResnetTensorNavActorCritic( action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, goal_sensor_uuid="target_coordinates_ind", rgb_resnet_preprocessor_uuid="rgb_resnet", hidden_size=512, goal_dims=32, ) # Define Task Sampler @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return PointNavTaskSampler(**kwargs) def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: config = self.TRAIN_CONFIGS[process_ind] return { "env_config": config, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())), "distance_to_goal": self.DISTANCE_TO_GOAL, # type:ignore } def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: config = self.CONFIG.clone() config.defrost() config.DATASET.DATA_PATH = self.VALID_SCENES config.MODE = "validate" config.freeze() return { "env_config": config, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())), "distance_to_goal": self.DISTANCE_TO_GOAL, # type:ignore } def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: raise NotImplementedError("Testing not implemented for this tutorial.")
allenact-main
projects/tutorials/pointnav_habitat_rgb_ddppo.py
# literate: tutorials/running-inference-on-a-pretrained-model.md # %% """# Tutorial: Inference with a pre-trained model.""" # %% """ In this tutorial we will run inference on a pre-trained model for the PointNav task in the RoboTHOR environment. In this task the agent is tasked with going to a specific location within a realistic 3D environment. For information on how to train a PointNav Model see [this tutorial](training-a-pointnav-model.md) We will need to [install the full AllenAct library](../installation/installation-allenact.md#full-library), the `robothor_plugin` requirements via ```bash pip install -r allenact_plugins/robothor_plugin/extra_requirements.txt ``` and [download the RoboTHOR Pointnav dataset](../installation/download-datasets.md) before we get started. For this tutorial we will download the weights of a model trained on the debug dataset. This can be done with a handy script in the `pretrained_model_ckpts` directory: ```bash bash pretrained_model_ckpts/download_navigation_model_ckpts.sh robothor-pointnav-rgb-resnet ``` This will download the weights for an RGB model that has been trained on the PointNav task in RoboTHOR to `pretrained_model_ckpts/robothor-pointnav-rgb-resnet` Next we need to run the inference, using the PointNav experiment config from the [tutorial on making a PointNav experiment](training-a-pointnav-model.md). We can do this with the following command: ```bash PYTHONPATH=. python allenact/main.py -o <PATH_TO_OUTPUT> -b <BASE_DIRECTORY_OF_YOUR_EXPERIMENT> -c <PATH_TO_CHECKPOINT> --eval ``` Where `<PATH_TO_OUTPUT>` is the location where the results of the test will be dumped, `<PATH_TO_CHECKPOINT>` is the location of the downloaded model weights, and `<BASE_DIRECTORY_OF_YOUR_EXPERIMENT>` is a path to the directory where our experiment definition is stored. For our current setup the following command would work: ```bash PYTHONPATH=. python allenact/main.py \ training_a_pointnav_model \ -o pretrained_model_ckpts/robothor-pointnav-rgb-resnet/ \ -b projects/tutorials \ -c pretrained_model_ckpts/robothor-pointnav-rgb-resnet/checkpoints/PointNavRobothorRGBPPO/2020-08-31_12-13-30/exp_PointNavRobothorRGBPPO__stage_00__steps_000039031200.pt \ --eval ``` For testing on all saved checkpoints we pass a directory to `--checkpoint` rather than just a single file: ```bash PYTHONPATH=. python allenact/main.py \ training_a_pointnav_model \ -o pretrained_model_ckpts/robothor-pointnav-rgb-resnet/ \ -b projects/tutorials \ -c pretrained_model_ckpts/robothor-pointnav-rgb-resnet/checkpoints/PointNavRobothorRGBPPO/2020-08-31_12-13-30 --eval ``` ## Visualization We also show examples of visualizations that can be extracted from the `"valid"` and `"test"` modes. Currently, visualization is still undergoing design changes and does not support multi-agent tasks, but the available functionality is sufficient for pointnav in RoboThor. Following up on the example above, we can make a specialized pontnav `ExperimentConfig` where we instantiate the base visualization class, `VizSuite`, defined in [`allenact.utils.viz_utils`](https://github.com/allenai/allenact/tree/master/allenact/utils/viz_utils.py), when in `test` mode. Each visualization type can be thought of as a plugin to the base `VizSuite`. For example, all `episode_ids` passed to `VizSuite` will be processed with each of the instantiated visualization types (possibly with the exception of the `AgentViewViz`). In the example below we show how to instantiate different visualization types from 4 different data sources. The data sources available to `VizSuite` are: * Task output (e.g. 2D trajectories) * Vector task (e.g. egocentric views) * Rollout storage (e.g. recurrent memory, taken action logprobs...) * `ActorCriticOutput` (e.g. action probabilities) The visualization types included below are: * `TrajectoryViz`: Generic 2D trajectory view. * `AgentViewViz`: RGB egocentric view. * `ActorViz`: Action probabilities from `ActorCriticOutput[CategoricalDistr]`. * `TensorViz1D`: Evolution of a point from RolloutStorage over time. * `TensorViz2D`: Evolution of a vector from RolloutStorage over time. * `ThorViz`: Specialized 2D trajectory view [for RoboThor](https://github.com/allenai/allenact/tree/master/allenact_plugins/robothor_plugin/robothor_viz.py). Note that we need to explicitly set the `episode_ids` that we wish to visualize. For `AgentViewViz` we have the option of using a different (typically shorter) list of episodes or enforce the ones used for the rest of visualizations. """ # %% hide from typing import Optional from allenact.utils.viz_utils import ( VizSuite, TrajectoryViz, ActorViz, AgentViewViz, TensorViz1D, TensorViz2D, ) from allenact_plugins.robothor_plugin.robothor_viz import ThorViz from projects.tutorials.training_a_pointnav_model import ( PointNavRoboThorRGBPPOExperimentConfig, ) # %% class PointNavRoboThorRGBPPOVizExperimentConfig(PointNavRoboThorRGBPPOExperimentConfig): """ExperimentConfig used to demonstrate how to set up visualization code. # Attributes viz_ep_ids : Scene names that will be visualized. viz_video_ids : Scene names that will have videos visualizations associated with them. """ viz_ep_ids = [ "FloorPlan_Train1_1_3", "FloorPlan_Train1_1_4", "FloorPlan_Train1_1_5", "FloorPlan_Train1_1_6", ] viz_video_ids = [["FloorPlan_Train1_1_3"], ["FloorPlan_Train1_1_4"]] viz: Optional[VizSuite] = None def get_viz(self, mode): if self.viz is not None: return self.viz self.viz = VizSuite( episode_ids=self.viz_ep_ids, mode=mode, # Basic 2D trajectory visualizer (task output source): base_trajectory=TrajectoryViz( path_to_target_location=("task_info", "target",), ), # Egocentric view visualizer (vector task source): egeocentric=AgentViewViz( max_video_length=100, episode_ids=self.viz_video_ids ), # Default action probability visualizer (actor critic output source): action_probs=ActorViz(figsize=(3.25, 10), fontsize=18), # Default taken action logprob visualizer (rollout storage source): taken_action_logprobs=TensorViz1D(), # Same episode mask visualizer (rollout storage source): episode_mask=TensorViz1D(rollout_source=("masks",)), # Default recurrent memory visualizer (rollout storage source): rnn_memory=TensorViz2D(rollout_source=("memory_first_last", "single_belief")), # Specialized 2D trajectory visualizer (task output source): thor_trajectory=ThorViz( figsize=(16, 8), viz_rows_cols=(448, 448), scenes=("FloorPlan_Train{}_{}", 1, 1, 1, 1), ), ) return self.viz def machine_params(self, mode="train", **kwargs): res = super().machine_params(mode, **kwargs) if mode == "test": res.set_visualizer(self.get_viz(mode)) return res # %% """ Running test on the same downloaded models, but using the visualization-enabled `ExperimentConfig` with ```bash PYTHONPATH=. python allenact/main.py \ running_inference_tutorial \ -o pretrained_model_ckpts/robothor-pointnav-rgb-resnet/ \ -b projects/tutorials \ -c pretrained_model_ckpts/robothor-pointnav-rgb-resnet/checkpoints/PointNavRobothorRGBPPO/2020-08-31_12-13-30/exp_PointNavRobothorRGBPPO__stage_00__steps_000039031200.pt \ --eval ``` generates different types of visualization and logs them in tensorboard. If everything is properly setup and tensorboard includes the `robothor-pointnav-rgb-resnet` folder, under the `IMAGES` tab, we should see something similar to ![Visualization example](../img/viz_pretrained_2videos.jpg) """
allenact-main
projects/tutorials/running_inference_tutorial.py
import torch import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, ) from allenact.base_abstractions.sensor import ExpertActionSensor from projects.tutorials.object_nav_ithor_ppo_one_object import ( ObjectNavThorPPOExperimentConfig, ObjectNaviThorGridTask, ) class ObjectNavThorDaggerThenPPOExperimentConfig(ObjectNavThorPPOExperimentConfig): """A simple object navigation experiment in THOR. Training with DAgger and then PPO. """ SENSORS = ObjectNavThorPPOExperimentConfig.SENSORS + [ ExpertActionSensor( action_space=len(ObjectNaviThorGridTask.class_action_names()), ), ] @classmethod def tag(cls): return "ObjectNavThorDaggerThenPPO" @classmethod def training_pipeline(cls, **kwargs): dagger_steos = int(1e4) ppo_steps = int(1e6) lr = 2.5e-4 num_mini_batch = 2 if not torch.cuda.is_available() else 6 update_repeats = 4 num_steps = 128 metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks save_interval = 10000 gamma = 0.99 use_gae = True gae_lambda = 1.0 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={ "ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig), "imitation_loss": Imitation(), # We add an imitation loss. }, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage( loss_names=["imitation_loss"], teacher_forcing=LinearDecay( startp=1.0, endp=0.0, steps=dagger_steos, ), max_stage_steps=dagger_steos, ), PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,), ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), )
allenact-main
projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py
# literate: tutorials/offpolicy-tutorial.md # %% """# Tutorial: Off-policy training.""" # %% """ **Note** The provided commands to execute in this tutorial assume you have [installed the full library](../installation/installation-allenact.md#full-library) and the `extra_requirements` for the `babyai_plugin` and `minigrid_plugin`. The latter can be installed with: ```bash pip install -r allenact_plugins/babyai_plugin/extra_requirements.txt; pip install -r allenact_plugins/minigrid_plugin/extra_requirements.txt ``` In this tutorial we'll learn how to train an agent from an external dataset by imitating expert actions via Behavior Cloning. We'll use a [BabyAI agent](/api/allenact_plugins/babyai_plugin/babyai_models#BabyAIRecurrentACModel) to solve `GoToLocal` tasks on [MiniGrid](https://github.com/maximecb/gym-minigrid); see the `projects/babyai_baselines/experiments/go_to_local` directory for more details. This tutorial assumes `AllenAct`'s [abstractions](../getting_started/abstractions.md) are known. ## The task In a `GoToLocal` task, the agent immersed in a grid world has to navigate to a specific object in the presence of multiple distractors, requiring the agent to understand `go to` instructions like "go to the red ball". For further details, please consult the [original paper](https://arxiv.org/abs/1810.08272). ## Getting the dataset We will use a large dataset (**more than 4 GB**) including expert demonstrations for `GoToLocal` tasks. To download the data we'll run ```bash PYTHONPATH=. python allenact_plugins/babyai_plugin/scripts/download_babyai_expert_demos.py GoToLocal ``` from the project's root directory, which will download `BabyAI-GoToLocal-v0.pkl` and `BabyAI-GoToLocal-v0_valid.pkl` to the `allenact_plugins/babyai_plugin/data/demos` directory. We will also generate small versions of the datasets, which will be useful if running on CPU, by calling ```bash PYTHONPATH=. python allenact_plugins/babyai_plugin/scripts/truncate_expert_demos.py ``` from the project's root directory, which will generate `BabyAI-GoToLocal-v0-small.pkl` under the same `allenact_plugins/babyai_plugin/data/demos` directory. ## Data storage In order to train with an off-policy dataset, we need to define an `ExperienceStorage`. In AllenAct, an `ExperienceStorage` object has two primary functions: 1. It stores/manages relevant data (e.g. similarly to the `Dataset` class in PyTorch). 2. It loads stored data into batches that will be used for loss computation (e.g. similarly to the `Dataloader` class in PyTorch). Unlike a PyTorch `Dataset` however, an `ExperienceStorage` object can build its dataset **at runtime** by processing rollouts from the agent. This flexibility allows for us to, for exmaple, implement the experience replay datastructure used in deep Q-learning. For this tutorial we won't need this additional functionality as our off-policy dataset is a fixed collection of expert trajectories. An example of a `ExperienceStorage` for BabyAI expert demos might look as follows: """ # %% import_summary allenact_plugins.minigrid_plugin.minigrid_offpolicy.MiniGridExpertTrajectoryStorage # %% """ A complete example can be found in [MiniGridExpertTrajectoryStorage](/api/allenact_plugins/minigrid_plugin/minigrid_offpolicy#MiniGridExpertTrajectoryStorage). ## Loss function Off-policy losses must implement the [`GenericAbstractLoss`](/api/allenact/base_abstractions/misc/#genericabstractloss) interface. In this case, we minimize the cross-entropy between the actor's policy and the expert action: """ # %% import allenact_plugins.minigrid_plugin.minigrid_offpolicy.MiniGridOffPolicyExpertCELoss # %% """ A complete example can be found in [MiniGridOffPolicyExpertCELoss](/api/allenact_plugins/minigrid_plugin/minigrid_offpolicy#MiniGridOffPolicyExpertCELoss). Note that in this case we train the entire actor, but it would also be possible to forward data through a different subgraph of the ActorCriticModel. ## Experiment configuration For the experiment configuration, we'll build on top of an existing [base BabyAI GoToLocal Experiment Config](/api/projects/babyai_baselines/experiments/go_to_local/base/#basebabyaigotolocalexperimentconfig). The complete `ExperimentConfig` file for off-policy training is [here](/api/projects/tutorials/minigrid_offpolicy_tutorial/#bcoffpolicybabyaigotolocalexperimentconfig), but let's focus on the most relevant aspect to enable this type of training: providing an [OffPolicyPipelineComponent](/api/allenact/utils/experiment_utils/#offpolicypipelinecomponent) object as input to a `PipelineStage` when instantiating the `TrainingPipeline` in the `training_pipeline` method. """ # %% hide import os from typing import Optional, List, Tuple import torch from gym_minigrid.minigrid import MiniGridEnv from allenact.algorithms.onpolicy_sync.storage import RolloutBlockStorage from allenact.utils.experiment_utils import ( PipelineStage, StageComponent, TrainingSettings, ) from allenact_plugins.babyai_plugin.babyai_constants import ( BABYAI_EXPERT_TRAJECTORIES_DIR, ) from allenact_plugins.minigrid_plugin.minigrid_offpolicy import ( MiniGridOffPolicyExpertCELoss, MiniGridExpertTrajectoryStorage, ) from projects.babyai_baselines.experiments.go_to_local.base import ( BaseBabyAIGoToLocalExperimentConfig, ) # %% class BCOffPolicyBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig): """BC Off-policy imitation.""" DATASET: Optional[List[Tuple[str, bytes, List[int], MiniGridEnv.Actions]]] = None GPU_ID = 0 if torch.cuda.is_available() else None @classmethod def tag(cls): return "BabyAIGoToLocalBCOffPolicy" @classmethod def METRIC_ACCUMULATE_INTERVAL(cls): # See BaseBabyAIGoToLocalExperimentConfig for how this is used. return 1 @classmethod def training_pipeline(cls, **kwargs): total_train_steps = cls.TOTAL_IL_TRAIN_STEPS ppo_info = cls.rl_loss_default("ppo", steps=-1) num_mini_batch = ppo_info["num_mini_batch"] update_repeats = ppo_info["update_repeats"] # fmt: off return cls._training_pipeline( named_losses={ "offpolicy_expert_ce_loss": MiniGridOffPolicyExpertCELoss( total_episodes_in_epoch=int(1e6) ), }, named_storages={ "onpolicy": RolloutBlockStorage(), "minigrid_offpolicy_expert": MiniGridExpertTrajectoryStorage( data_path=os.path.join( BABYAI_EXPERT_TRAJECTORIES_DIR, "BabyAI-GoToLocal-v0{}.pkl".format( "" if torch.cuda.is_available() else "-small" ), ), num_samplers=cls.NUM_TRAIN_SAMPLERS, rollout_len=cls.ROLLOUT_STEPS, instr_len=cls.INSTR_LEN, ), }, pipeline_stages=[ # Single stage, only with off-policy training PipelineStage( loss_names=["offpolicy_expert_ce_loss"], # no on-policy losses max_stage_steps=total_train_steps, # keep sampling episodes in the stage stage_components=[ StageComponent( uuid="offpolicy", storage_uuid="minigrid_offpolicy_expert", loss_names=["offpolicy_expert_ce_loss"], training_settings=TrainingSettings( update_repeats=num_mini_batch * update_repeats, num_mini_batch=1, ) ) ], ), ], # As we don't have any on-policy losses, we set the next # two values to zero to ensure we don't attempt to # compute gradients for on-policy rollouts: num_mini_batch=0, update_repeats=0, total_train_steps=total_train_steps, ) # fmt: on # %% """ You'll have noted that it is possible to combine on-policy and off-policy training in the same stage, even though here we apply pure off-policy training. ## Training We recommend using a machine with a CUDA-capable GPU for this experiment. In order to start training, we just need to invoke ```bash PYTHONPATH=. python allenact/main.py -b projects/tutorials minigrid_offpolicy_tutorial -m 8 -o <OUTPUT_PATH> ``` Note that with the `-m 8` option we limit to 8 the number of on-policy task sampling processes used between off-policy updates. If everything goes well, the training success should quickly reach values around 0.7-0.8 on GPU and converge to values close to 1 if given sufficient time to train. If running tensorboard, you'll notice a separate group of scalars named `train-offpolicy-losses` and `train-offpolicy-misc` with losses, approximate "experiences per second" (i.e. the number of off-policy experiences/steps being used to update the model per second), and other tracked values in addition to the standard `train-onpolicy-*` used for on-policy training. In the `train-metrics` and `train-misc` sections you'll find the metrics quantifying the performance of the agent throughout training and some other plots showing training details. *Note that the x-axis for these plots is different than for the `train-offpolicy-*` sections*. This is because these plots use the number of rollout steps as the x-axis (i.e. steps that the trained agent takes interactively) while the `train-offpolicy-*` plots uses the number of offpolicy "experiences" that have been shown to the agent. A view of the training progress about 5 hours after starting on a CUDA-capable GPU should look similar to the below (note that training reached >99% success after about 50 minutes). ![off-policy progress](https://ai2-prior-allenact-public-assets.s3.us-west-2.amazonaws.com/tutorials/minigrid-offpolicy/minigrid-offpolicy-tutorial-tb.png) """
allenact-main
projects/tutorials/minigrid_offpolicy_tutorial.py
from math import ceil from typing import Dict, Any, List, Optional import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, ) from allenact.utils.multi_agent_viz_utils import MultiTrajectoryViz from allenact.utils.viz_utils import VizSuite, AgentViewViz from allenact_plugins.robothor_plugin.robothor_models import ( NavToPartnerActorCriticSimpleConvRNN, ) from allenact_plugins.robothor_plugin.robothor_sensors import RGBSensorMultiRoboThor from allenact_plugins.robothor_plugin.robothor_task_samplers import ( NavToPartnerTaskSampler, ) from allenact_plugins.robothor_plugin.robothor_tasks import NavToPartnerTask from allenact_plugins.robothor_plugin.robothor_viz import ThorMultiViz class NavToPartnerRoboThorRGBPPOExperimentConfig(ExperimentConfig): """A Multi-Agent Navigation experiment configuration in RoboThor.""" # Task Parameters MAX_STEPS = 500 REWARD_CONFIG = { "step_penalty": -0.01, "max_success_distance": 0.75, "success_reward": 5.0, } # Simulator Parameters CAMERA_WIDTH = 300 CAMERA_HEIGHT = 300 SCREEN_SIZE = 224 # Training Engine Parameters ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None NUM_PROCESSES = 20 TRAINING_GPUS: List[int] = [0] VALIDATION_GPUS: List[int] = [0] TESTING_GPUS: List[int] = [0] SENSORS = [ RGBSensorMultiRoboThor( agent_count=2, height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb", ), ] OBSERVATIONS = [ "rgb", ] ENV_ARGS = dict( width=CAMERA_WIDTH, height=CAMERA_HEIGHT, rotateStepDegrees=30.0, visibilityDistance=1.0, gridSize=0.25, agentCount=2, ) @classmethod def tag(cls): return "NavToPartnerRobothorRGBPPO" @classmethod def training_pipeline(cls, **kwargs): ppo_steps = int(1000000) lr = 3e-4 num_mini_batch = 1 update_repeats = 3 num_steps = 30 save_interval = 200000 log_interval = 1 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={"ppo_loss": PPO(**PPOConfig)}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps) ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), ) def split_num_processes(self, ndevices): assert self.NUM_PROCESSES >= ndevices, "NUM_PROCESSES {} < ndevices {}".format( self.NUM_PROCESSES, ndevices ) res = [0] * ndevices for it in range(self.NUM_PROCESSES): res[it % ndevices] += 1 return res viz: Optional[VizSuite] = None def get_viz(self, mode): if self.viz is not None: return self.viz self.viz = VizSuite( mode=mode, # Basic 2D trajectory visualizer (task output source): base_trajectory=MultiTrajectoryViz(), # plt_colormaps=["cool", "cool"]), # Egocentric view visualizer (vector task source): egeocentric=AgentViewViz(max_video_length=100, max_episodes_in_group=1), # Specialized 2D trajectory visualizer (task output source): thor_trajectory=ThorMultiViz( figsize=(16, 8), viz_rows_cols=(448, 448), scenes=("FloorPlan_Train{}_{}", 1, 1, 1, 1), ), ) return self.viz def machine_params(self, mode="train", **kwargs): visualizer = None if mode == "train": devices = ( ["cpu"] if not torch.cuda.is_available() else list(self.TRAINING_GPUS) ) nprocesses = ( 4 if not torch.cuda.is_available() else self.split_num_processes(len(devices)) ) elif mode == "valid": nprocesses = 0 devices = ["cpu"] if not torch.cuda.is_available() else self.VALIDATION_GPUS elif mode == "test": nprocesses = 1 devices = ["cpu"] if not torch.cuda.is_available() else self.TESTING_GPUS visualizer = self.get_viz(mode=mode) else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") return { "nprocesses": nprocesses, "devices": devices, "visualizer": visualizer, } # TODO Define Model @classmethod def create_model(cls, **kwargs) -> nn.Module: return NavToPartnerActorCriticSimpleConvRNN( action_space=gym.spaces.Tuple( [ gym.spaces.Discrete(len(NavToPartnerTask.class_action_names())), gym.spaces.Discrete(len(NavToPartnerTask.class_action_names())), ] ), observation_space=SensorSuite(cls.SENSORS).observation_spaces, hidden_size=512, ) # Define Task Sampler @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return NavToPartnerTaskSampler(**kwargs) # Utility Functions for distributing scenes between GPUs @staticmethod def _partition_inds(n: int, num_parts: int): return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype( np.int32 ) def _get_sampler_args_for_scene_split( self, scenes: List[str], process_ind: int, total_processes: int, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: if total_processes > len(scenes): # oversample some scenes -> bias if total_processes % len(scenes) != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisible by the number of scenes" ) scenes = scenes * int(ceil(total_processes / len(scenes))) scenes = scenes[: total_processes * (len(scenes) // total_processes)] else: if len(scenes) % total_processes != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisor of the number of scenes" ) inds = self._partition_inds(len(scenes), total_processes) return { "scenes": scenes[inds[process_ind] : inds[process_ind + 1]], "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Tuple( [ gym.spaces.Discrete(len(NavToPartnerTask.class_action_names())), gym.spaces.Discrete(len(NavToPartnerTask.class_action_names())), ] ), "seed": seeds[process_ind] if seeds is not None else None, "deterministic_cudnn": deterministic_cudnn, "rewards_config": self.REWARD_CONFIG, } def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: scenes = ["FloorPlan_Train1_1"] res = self._get_sampler_args_for_scene_split( scenes, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["env_args"] = { **self.ENV_ARGS, "x_display": ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None, } return res def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: scenes = ["FloorPlan_Train1_1"] res = self._get_sampler_args_for_scene_split( scenes, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["env_args"] = { **self.ENV_ARGS, "x_display": ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None, } res["max_tasks"] = 20 return res def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: scenes = ["FloorPlan_Train1_1"] res = self._get_sampler_args_for_scene_split( scenes, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["env_args"] = { **self.ENV_ARGS, "x_display": ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None, } res["max_tasks"] = 4 return res
allenact-main
projects/tutorials/navtopartner_robothor_rgb_ppo.py
# literate: tutorials/gym-tutorial.md # %% """# Tutorial: OpenAI gym for continuous control.""" # %% """ **Note** The provided commands to execute in this tutorial assume you have [installed the full library](../installation/installation-allenact.md#full-library) and the requirements for the `gym_plugin`. The latter can be installed by ```bash pip install -r allenact_plugins/gym_plugin/extra_requirements.txt ``` In this tutorial, we: 1. Introduce the `gym_plugin`, which enables some of the tasks in [OpenAI's gym](https://gym.openai.com/) for training and inference within AllenAct. 1. Show an example of continuous control with an arbitrary action space covering 2 policies for one of the `gym` tasks. ## The task For this tutorial, we'll focus on one of the continuous-control environments under the `Box2D` group of `gym` environments: [LunarLanderContinuous-v2](https://gym.openai.com/envs/LunarLanderContinuous-v2/). In this task, the goal is to smoothly land a lunar module in a landing pad, as shown below. ![The LunarLanderContinuous-v2 task](../img/lunar_lander_continuous_demo.png). To achieve this goal, we need to provide continuous control for a main engine and directional one (2 real values). In order to solve the task, the expected reward is of at least 200 points. The controls for main and directional engines are both in the range [-1.0, 1.0] and the observation space is composed of 8 scalars indicating `x` and `y` positions, `x` and `y` velocities, lander angle and angular velocity, and left and right ground contact. Note that these 8 scalars provide a full observation of the state. ## Implementation For this tutorial, we'll use the readily available `gym_plugin`, which includes a [wrapper for `gym` environments](../api/allenact_plugins/gym_plugin/gym_environment.md#gymenvironment), a [task sampler](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymtasksampler) and [task definition](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymcontinuousbox2dtask), a [sensor](../api/allenact_plugins/gym_plugin/gym_sensors.md#gymbox2dsensor) to wrap the observations provided by the `gym` environment, and a simple [model](../api/allenact_plugins/gym_plugin/gym_models.md#memorylessactorcritic). The experiment config, similar to the one used for the [Navigation in MiniGrid tutorial](../tutorials/minigrid-tutorial.md), is defined as follows: """ # %% from typing import Dict, Optional, List, Any, cast import gym import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses.ppo import PPO from allenact.base_abstractions.experiment_config import ExperimentConfig, TaskSampler from allenact.base_abstractions.sensor import SensorSuite from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic from allenact_plugins.gym_plugin.gym_sensors import GymBox2DSensor from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler from allenact.utils.experiment_utils import ( TrainingPipeline, Builder, PipelineStage, LinearDecay, ) from allenact.utils.viz_utils import VizSuite, AgentViewViz class GymTutorialExperimentConfig(ExperimentConfig): @classmethod def tag(cls) -> str: return "GymTutorial" # %% """ ### Sensors and Model As mentioned above, we'll use a [GymBox2DSensor](../api/allenact_plugins/gym_plugin/gym_sensors.md#gymbox2dsensor) to provide full observations from the state of the `gym` environment to our model. """ # %% SENSORS = [ GymBox2DSensor("LunarLanderContinuous-v2", uuid="gym_box_data"), ] # %% """ We define our `ActorCriticModel` agent using a lightweight implementation with separate MLPs for actors and critic, [MemorylessActorCritic](../api/allenact_plugins/gym_plugin/gym_models.md#memorylessactorcritic). Since this is a model for continuous control, note that the superclass of our model is `ActorCriticModel[GaussianDistr]` instead of `ActorCriticModel[CategoricalDistr]`, since we'll use a [Gaussian distribution](../api/allenact_plugins/gym_plugin/gym_distributions.md#gaussiandistr) to sample actions. """ # %% @classmethod def create_model(cls, **kwargs) -> nn.Module: return MemorylessActorCritic( input_uuid="gym_box_data", action_space=gym.spaces.Box( -1.0, 1.0, (2,) ), # 2 actors, each in the range [-1.0, 1.0] observation_space=SensorSuite(cls.SENSORS).observation_spaces, action_std=0.5, ) # %% """ ### Task samplers We use an available `TaskSampler` implementation for `gym` environments that allows to sample [GymTasks](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymtask): [GymTaskSampler](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymtasksampler). Even though it is possible to let the task sampler instantiate the proper sensor for the chosen task name (by passing `None`), we use the sensors we created above, which contain a custom identifier for the actual observation space (`gym_box_data`) also used by the model. """ # %% @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return GymTaskSampler(**kwargs) # %% """ For convenience, we will use a `_get_sampler_args` method to generate the task sampler arguments for all three modes, `train, valid, test`: """ # %% def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args( process_ind=process_ind, mode="train", seeds=seeds ) def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args( process_ind=process_ind, mode="valid", seeds=seeds ) def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="test", seeds=seeds) # %% """ Similarly to what we do in the Minigrid navigation tutorial, the task sampler samples random tasks for ever, while, during testing (or validation), we sample a fixed number of tasks. """ # %% def _get_sampler_args( self, process_ind: int, mode: str, seeds: List[int] ) -> Dict[str, Any]: """Generate initialization arguments for train, valid, and test TaskSamplers. # Parameters process_ind : index of the current task sampler mode: one of `train`, `valid`, or `test` """ if mode == "train": max_tasks = None # infinite training tasks task_seeds_list = None # no predefined random seeds for training deterministic_sampling = False # randomly sample tasks in training else: max_tasks = 3 # one seed for each task to sample: # - ensures different seeds for each sampler, and # - ensures a deterministic set of sampled tasks. task_seeds_list = list( range(process_ind * max_tasks, (process_ind + 1) * max_tasks) ) deterministic_sampling = ( True # deterministically sample task in validation/testing ) return dict( gym_env_types=["LunarLanderContinuous-v2"], sensors=self.SENSORS, # sensors used to return observations to the agent max_tasks=max_tasks, # see above task_seeds_list=task_seeds_list, # see above deterministic_sampling=deterministic_sampling, # see above seed=seeds[process_ind], ) # %% """ Note that we just sample 3 tasks for validation and testing in this case, which suffice to illustrate the model's success. ### Machine parameters Given the simplicity of the task and model, we can just train the model on the CPU. During training, success should reach 100% in less than 10 minutes, whereas solving the task (evaluation reward > 200) might take about 20 minutes (on a laptop CPU). We allocate a larger number of samplers for training (8) than for validation or testing (just 1), and we default to CPU usage by returning an empty list of `devices`. We also include a video visualizer (`AgentViewViz`) in test mode. """ # %% @classmethod def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]: visualizer = None if mode == "test": visualizer = VizSuite( mode=mode, video_viz=AgentViewViz( label="episode_vid", max_clip_length=400, vector_task_source=("render", {"mode": "rgb_array"}), fps=30, ), ) return { "nprocesses": 8 if mode == "train" else 1, "devices": [], "visualizer": visualizer, } # %% """ ### Training pipeline The last definition is the training pipeline. In this case, we use a PPO stage with linearly decaying learning rate and 80 single-batch update repeats per rollout: """ # %% @classmethod def training_pipeline(cls, **kwargs) -> TrainingPipeline: ppo_steps = int(1.2e6) return TrainingPipeline( named_losses=dict( ppo_loss=PPO(clip_param=0.2, value_loss_coef=0.5, entropy_coef=0.0,), ), # type:ignore pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps), ], optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=1e-3)), num_mini_batch=1, update_repeats=80, max_grad_norm=100, num_steps=2000, gamma=0.99, use_gae=False, gae_lambda=0.95, advance_scene_rollout_period=None, save_interval=200000, metric_accumulate_interval=50000, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}, # type:ignore ), ) # %% """ ## Training and validation We have a complete implementation of this experiment's configuration class in `projects/tutorials/gym_tutorial.py`. To start training from scratch, we just need to invoke ```bash PYTHONPATH=. python allenact/main.py gym_tutorial -b projects/tutorials -m 8 -o /PATH/TO/gym_output -s 54321 -e ``` from the `allenact` root directory. Note that we include `-e` to enforce deterministic evaluation. Please refer to the [Navigation in MiniGrid tutorial](../tutorials/minigrid-tutorial.md) if in doubt of the meaning of the rest of parameters. If we have Tensorboard installed, we can track progress with ```bash tensorboard --logdir /PATH/TO/gym_output ``` which will default to the URL [http://localhost:6006/](http://localhost:6006/). After 1,200,000 steps, the script will terminate. If everything went well, the `valid` success rate should quickly converge to 1 and the mean reward to above 250, while the average episode length should stay below or near 300. ## Testing The training start date for the experiment, in `YYYY-MM-DD_HH-MM-SS` format, is used as the name of one of the subfolders in the path to the checkpoints, saved under the output folder. In order to evaluate (i.e. test) a collection of checkpoints, we need to pass the `--eval` flag and specify the directory containing the checkpoints with the `--checkpoint CHECKPOINT_DIR` option: ```bash PYTHONPATH=. python allenact/main.py gym_tutorial \ -b projects/tutorials \ -m 1 \ -o /PATH/TO/gym_output \ -s 54321 \ -e \ --eval \ --checkpoint /PATH/TO/gym_output/checkpoints/GymTutorial/YOUR_START_DATE \ --approx_ckpt_step_interval 800000 # Skip some checkpoints ``` The option `--approx_ckpt_step_interval 800000` tells AllenAct that we only want to evaluate checkpoints which were saved every ~800000 steps, this lets us avoid evaluating every saved checkpoint. If everything went well, the `test` success rate should converge to 1, the episode length below or near 300 steps, and the mean reward to above 250. The images tab in tensorboard will contain videos for the sampled test episodes. ![video_results](../img/lunar_lander_continuous_test.png). If the test command fails with `pyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to "None"`, e.g. when running remotely, try prepending `DISPLAY=:0.0` to the command above, assuming you have an xserver running with such display available: ```bash DISPLAY=:0.0 PYTHONPATH=. python allenact/main.py gym_tutorial \ -b projects/tutorials \ -m 1 \ -o /PATH/TO/gym_output \ -s 54321 \ -e \ --eval \ --checkpoint /PATH/TO/gym_output/checkpoints/GymTutorial/YOUR_START_DATE \ --approx_ckpt_step_interval 800000 ``` """
allenact-main
projects/tutorials/gym_tutorial.py
# literate: tutorials/gym-mujoco-tutorial.md # %% """# Tutorial: OpenAI gym MuJoCo environment.""" # %% """ **Note** The provided commands to execute in this tutorial assume you have [installed the full library](../installation/installation-allenact.md#full-library) and the requirements for the `gym_plugin`. The latter can be installed by ```bash pip install -r allenact_plugins/gym_plugin/extra_requirements.txt ``` The environments for this tutorial use [MuJoCo](http://www.mujoco.org/)(**Mu**lti-**Jo**int dynamics in **Co**ntact) physics simulator, which is also required to be installed properly with instructions [here](https://github.com/openai/mujoco-py). ## The task For this tutorial, we'll focus on one of the continuous-control environments under the `mujoco` group of `gym` environments: [Ant-v2](https://gym.openai.com/envs/Ant-v2/). In this task, the goal is to make a four-legged creature, "ant", walk forward as fast as possible. A random agent of "Ant-v2" is shown below. ![The Ant-v2 task](https://ai2-prior-allenact-public-assets.s3.us-west-2.amazonaws.com/tutorials/gym-mujoco/ant_random.gif). To achieve the goal, we need to provide continuous control for the agent moving forward with four legs with the `x` velocity as high as possible for at most 1000 episodes steps. The agent is failed, or done, if the `z` position is out of the range [0.2, 1.0]. The dimension of the action space is 8 and 111 for the dimension of the observation space that maps to different body parts, including 3D position `(x,y,z)`, orientation(quaternion `x`,`y`,`z`,`w`) of the torso, and the joint angles, 3D velocity `(x,y,z)`, 3D angular velocity `(x,y,z)`, and joint velocities. The rewards for the agent "ant" are composed of the forward rewards, healthy rewards, control cost, and contact cost. ## Implementation For this tutorial, we'll use the readily available `gym_plugin`, which includes a [wrapper for `gym` environments](../api/allenact_plugins/gym_plugin/gym_environment.md#gymenvironment), a [task sampler](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymtasksampler) and [task definition](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymcontinuousbox2dtask), a [sensor](../api/allenact_plugins/gym_plugin/gym_sensors.md#gymbox2dsensor) to wrap the observations provided by the `gym` environment, and a simple [model](../api/allenact_plugins/gym_plugin/gym_models.md#memorylessactorcritic). The experiment config, similar to the one used for the [Navigation in MiniGrid tutorial](../tutorials/minigrid-tutorial.md), is defined as follows: """ # %% from typing import Dict, Optional, List, Any, cast import gym import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses.ppo import PPO from allenact.base_abstractions.experiment_config import ExperimentConfig, TaskSampler from allenact.base_abstractions.sensor import SensorSuite from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler from allenact.utils.experiment_utils import ( TrainingPipeline, Builder, PipelineStage, LinearDecay, ) from allenact.utils.viz_utils import VizSuite, AgentViewViz class HandManipulateTutorialExperimentConfig(ExperimentConfig): @classmethod def tag(cls) -> str: return "GymMuJoCoTutorial" # %% """ ### Sensors and Model As mentioned above, we'll use a [GymBox2DSensor](../api/allenact_plugins/gym_plugin/gym_sensors.md#gymbox2dsensor) to provide full observations from the state of the `gym` environment to our model. """ # %% SENSORS = [ GymMuJoCoSensor("Ant-v2", uuid="gym_mujoco_data"), ] # %% """ We define our `ActorCriticModel` agent using a lightweight implementation with separate MLPs for actors and critic, [MemorylessActorCritic](../api/allenact_plugins/gym_plugin/gym_models.md#memorylessactorcritic). Since this is a model for continuous control, note that the superclass of our model is `ActorCriticModel[GaussianDistr]` instead of `ActorCriticModel[CategoricalDistr]`, since we'll use a [Gaussian distribution](../api/allenact_plugins/gym_plugin/gym_distributions.md#gaussiandistr) to sample actions. """ # %% @classmethod def create_model(cls, **kwargs) -> nn.Module: """We define our `ActorCriticModel` agent using a lightweight implementation with separate MLPs for actors and critic, MemorylessActorCritic. Since this is a model for continuous control, note that the superclass of our model is `ActorCriticModel[GaussianDistr]` instead of `ActorCriticModel[CategoricalDistr]`, since we'll use a Gaussian distribution to sample actions. """ return MemorylessActorCritic( input_uuid="gym_mujoco_data", action_space=gym.spaces.Box( -3.0, 3.0, (8,), "float32" ), # 8 actors, each in the range [-3.0, 3.0] observation_space=SensorSuite(cls.SENSORS).observation_spaces, action_std=0.5, ) # %% """ ### Task samplers We use an available `TaskSampler` implementation for `gym` environments that allows to sample [GymTasks](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymtask): [GymTaskSampler](../api/allenact_plugins/gym_plugin/gym_tasks.md#gymtasksampler). Even though it is possible to let the task sampler instantiate the proper sensor for the chosen task name (by passing `None`), we use the sensors we created above, which contain a custom identifier for the actual observation space (`gym_mujoco_data`) also used by the model. """ # %% @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return GymTaskSampler(gym_env_type="Ant-v2", **kwargs) # %% """ For convenience, we will use a `_get_sampler_args` method to generate the task sampler arguments for all three modes, `train, valid, test`: """ # %% def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args( process_ind=process_ind, mode="train", seeds=seeds ) def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args( process_ind=process_ind, mode="valid", seeds=seeds ) def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="test", seeds=seeds) # %% """ Similarly to what we do in the Minigrid navigation tutorial, the task sampler samples random tasks for ever, while, during testing (or validation), we sample a fixed number of tasks. """ # %% def _get_sampler_args( self, process_ind: int, mode: str, seeds: List[int] ) -> Dict[str, Any]: """Generate initialization arguments for train, valid, and test TaskSamplers. # Parameters process_ind : index of the current task sampler mode: one of `train`, `valid`, or `test` """ if mode == "train": max_tasks = None # infinite training tasks task_seeds_list = None # no predefined random seeds for training deterministic_sampling = False # randomly sample tasks in training else: max_tasks = 4 # one seed for each task to sample: # - ensures different seeds for each sampler, and # - ensures a deterministic set of sampled tasks. task_seeds_list = list( range(process_ind * max_tasks, (process_ind + 1) * max_tasks) ) deterministic_sampling = ( True # deterministically sample task in validation/testing ) return dict( gym_env_types=["Ant-v2"], sensors=self.SENSORS, # sensors used to return observations to the agent max_tasks=max_tasks, # see above task_seeds_list=task_seeds_list, # see above deterministic_sampling=deterministic_sampling, # see above seed=seeds[process_ind], ) # %% """ Note that we just sample 4 tasks for validation and testing in this case, which suffice to illustrate the model's success. ### Machine parameters In this tutorial, we just train the model on the CPU. We allocate a larger number of samplers for training (8) than for validation or testing (just 1), and we default to CPU usage by returning an empty list of `devices`. We also include a video visualizer (`AgentViewViz`) in test mode. """ # %% @classmethod def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]: visualizer = None if mode == "test": visualizer = VizSuite( mode=mode, video_viz=AgentViewViz( label="episode_vid", max_clip_length=400, vector_task_source=("render", {"mode": "rgb_array"}), fps=30, ), ) return { "nprocesses": 8 if mode == "train" else 1, # rollout "devices": [], "visualizer": visualizer, } # %% """ ### Training pipeline The last definition is the training pipeline. In this case, we use a PPO stage with linearly decaying learning rate and 10 single-batch update repeats per rollout. The reward should exceed 4,000 in 20M steps in the test. In order to make the "ant" run with an obvious fast speed, we train the agents using PPO with 3e7 steps. """ # %% @classmethod def training_pipeline(cls, **kwargs) -> TrainingPipeline: lr = 3e-4 ppo_steps = int(3e7) clip_param = 0.2 value_loss_coef = 0.5 entropy_coef = 0.0 num_mini_batch = 4 # optimal 64 update_repeats = 10 max_grad_norm = 0.5 num_steps = 2048 gamma = 0.99 use_gae = True gae_lambda = 0.95 advance_scene_rollout_period = None save_interval = 200000 metric_accumulate_interval = 50000 return TrainingPipeline( named_losses=dict( ppo_loss=PPO( clip_param=clip_param, value_loss_coef=value_loss_coef, entropy_coef=entropy_coef, ), ), # type:ignore pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps), ], optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=advance_scene_rollout_period, save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps, startp=1, endp=0)}, ), ) # %% """ ## Training and validation We have a complete implementation of this experiment's configuration class in `projects/tutorials/gym_mujoco_tutorial.py`. To start training from scratch, we just need to invoke ```bash PYTHONPATH=. python allenact/main.py gym_mujoco_tutorial -b projects/tutorials -m 8 -o /PATH/TO/gym_mujoco_output -s 0 -e ``` from the `allenact` root directory. Note that we include `-e` to enforce deterministic evaluation. Please refer to the [Navigation in MiniGrid tutorial](../tutorials/minigrid-tutorial.md) if in doubt of the meaning of the rest of parameters. If we have Tensorboard installed, we can track progress with ```bash tensorboard --logdir /PATH/TO/gym_mujoco_output ``` which will default to the URL [http://localhost:6006/](http://localhost:6006/). After 30,000,000 steps, the script will terminate. If everything went well, the `valid` success rate should be 1 and the mean reward to above 4,000 in 20,000,000 steps, while the average episode length should stay or a little below 1,000. ## Testing The training start date for the experiment, in `YYYY-MM-DD_HH-MM-SS` format, is used as the name of one of the subfolders in the path to the checkpoints, saved under the output folder. In order to evaluate (i.e. test) a collection of checkpoints, we need to pass the `--eval` flag and specify the directory containing the checkpoints with the `--checkpoint CHECKPOINT_DIR` option: ```bash PYTHONPATH=. python allenact/main.py gym_mujoco_tutorial \ -b projects/tutorials \ -m 1 \ -o /PATH/TO/gym_mujoco_output \ -s 0 \ -e \ --eval \ --checkpoint /PATH/TO/gym_mujoco_output/checkpoints/GymMuJoCoTutorial/YOUR_START_DATE ``` If everything went well, the `test` success rate should converge to 1, the `test` success rate should be 1 and the mean reward to above 4,000 in 20,000,000 steps, while the average episode length should stay or a little below 1,000. The `gif` results can be seen in the image tab of Tensorboard while testing. The output should be something like this: ![results](https://ai2-prior-allenact-public-assets.s3.us-west-2.amazonaws.com/tutorials/gym-mujoco/ant_test.png). And the `gif` results can be seen in the image tab of Tensorboard while testing. ![mp4 demo](https://ai2-prior-allenact-public-assets.s3.us-west-2.amazonaws.com/tutorials/gym-mujoco/ant_test.gif) If the test command fails with `pyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to "None"`, e.g. when running remotely, try prepending `DISPLAY=:0.0` to the command above, assuming you have an xserver running with such display available: ```bash DISPLAY=:0.0 PYTHONPATH=. python allenact/main.py gym_mujoco_tutorial \ -b projects/tutorials \ -m 1 \ -o /PATH/TO/gym_mujoco_output \ -s 0 \ -e \ --eval \ --checkpoint /PATH/TO/gym_mujoco_output/checkpoints/GymMuJoCoTutorial/YOUR_START_DATE ``` """
allenact-main
projects/tutorials/gym_mujoco_tutorial.py
allenact-main
projects/tutorials/__init__.py
import glob import os from math import ceil from typing import Dict, Any, List, Optional, Sequence import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from torchvision import models from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, evenly_distribute_count_into_bins, ) from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.navigation_plugin.objectnav.models import ( ResnetTensorNavActorCritic, ) from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from allenact_plugins.robothor_plugin.robothor_task_samplers import ( PointNavDatasetTaskSampler, ) from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask class PointNaviThorRGBPPOExperimentConfig(ExperimentConfig): """A Point Navigation experiment configuration in iTHOR.""" # Task Parameters MAX_STEPS = 500 REWARD_CONFIG = { "step_penalty": -0.01, "goal_success_reward": 10.0, "failed_stop_reward": 0.0, "shaping_weight": 1.0, } # Simulator Parameters CAMERA_WIDTH = 640 CAMERA_HEIGHT = 480 SCREEN_SIZE = 224 # Training Engine Parameters ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None NUM_PROCESSES = 60 TRAINING_GPUS = list(range(torch.cuda.device_count())) VALIDATION_GPUS = [torch.cuda.device_count() - 1] TESTING_GPUS = [torch.cuda.device_count() - 1] # Dataset Parameters TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-objectnav/train") VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-objectnav/val") SENSORS = [ RGBSensorThor( height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), GPSCompassSensorRoboThor(), ] PREPROCESSORS = [ Builder( ResNetPreprocessor, { "input_height": SCREEN_SIZE, "input_width": SCREEN_SIZE, "output_width": 7, "output_height": 7, "output_dims": 512, "pool": False, "torchvision_resnet_model": models.resnet18, "input_uuids": ["rgb_lowres"], "output_uuid": "rgb_resnet", }, ), ] OBSERVATIONS = [ "rgb_resnet", "target_coordinates_ind", ] ENV_ARGS = dict( width=CAMERA_WIDTH, height=CAMERA_HEIGHT, rotateStepDegrees=30.0, visibilityDistance=1.0, gridSize=0.25, ) @classmethod def tag(cls): return "PointNavithorRGBPPO" @classmethod def training_pipeline(cls, **kwargs): ppo_steps = int(250000000) lr = 3e-4 num_mini_batch = 1 update_repeats = 3 num_steps = 30 save_interval = 5000000 log_interval = 10000 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={"ppo_loss": PPO(**PPOConfig)}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps) ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), ) def machine_params(self, mode="train", **kwargs): sampler_devices: Sequence[int] = [] if mode == "train": workers_per_device = 1 gpu_ids = ( [] if not torch.cuda.is_available() else self.TRAINING_GPUS * workers_per_device ) nprocesses = ( 1 if not torch.cuda.is_available() else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids)) ) sampler_devices = self.TRAINING_GPUS elif mode == "valid": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.VALIDATION_GPUS elif mode == "test": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.TESTING_GPUS else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces, preprocessors=self.PREPROCESSORS, ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sampler_devices=sampler_devices if mode == "train" else gpu_ids, # ignored with > 1 gpu_ids sensor_preprocessor_graph=sensor_preprocessor_graph, ) # Define Model @classmethod def create_model(cls, **kwargs) -> nn.Module: return ResnetTensorNavActorCritic( action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, goal_sensor_uuid="target_coordinates_ind", rgb_resnet_preprocessor_uuid="rgb_resnet", hidden_size=512, goal_dims=32, ) # Define Task Sampler @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return PointNavDatasetTaskSampler(**kwargs) # Utility Functions for distributing scenes between GPUs @staticmethod def _partition_inds(n: int, num_parts: int): return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype( np.int32 ) def _get_sampler_args_for_scene_split( self, scenes_dir: str, process_ind: int, total_processes: int, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: path = os.path.join(scenes_dir, "*.json.gz") scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)] if len(scenes) == 0: raise RuntimeError( ( "Could find no scene dataset information in directory {}." " Are you sure you've downloaded them? " " If not, see https://allenact.org/installation/download-datasets/ information" " on how this can be done." ).format(scenes_dir) ) if total_processes > len(scenes): # oversample some scenes -> bias if total_processes % len(scenes) != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisible by the number of scenes" ) scenes = scenes * int(ceil(total_processes / len(scenes))) scenes = scenes[: total_processes * (len(scenes) // total_processes)] else: if len(scenes) % total_processes != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisor of the number of scenes" ) inds = self._partition_inds(len(scenes), total_processes) return { "scenes": scenes[inds[process_ind] : inds[process_ind + 1]], "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())), "seed": seeds[process_ind] if seeds is not None else None, "deterministic_cudnn": deterministic_cudnn, "rewards_config": self.REWARD_CONFIG, } def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.TRAIN_DATASET_DIR, "episodes"), process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.TRAIN_DATASET_DIR res["loop_dataset"] = True res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) res["allow_flipping"] = True return res def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.VAL_DATASET_DIR, "episodes"), process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.VAL_DATASET_DIR res["loop_dataset"] = False res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) return res def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.VAL_DATASET_DIR, "episodes"), process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.VAL_DATASET_DIR res["loop_dataset"] = False res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) return res
allenact-main
projects/tutorials/pointnav_ithor_rgb_ddppo.py
from projects.tutorials.object_nav_ithor_dagger_then_ppo_one_object import ( ObjectNavThorDaggerThenPPOExperimentConfig, ) from allenact.utils.viz_utils import ( VizSuite, TrajectoryViz, AgentViewViz, ActorViz, TensorViz1D, ) from allenact_plugins.ithor_plugin.ithor_viz import ThorViz class ObjectNavThorDaggerThenPPOVizExperimentConfig( ObjectNavThorDaggerThenPPOExperimentConfig ): """A simple object navigation experiment in THOR. Training with DAgger and then PPO + using viz for test. """ TEST_SAMPLES_IN_SCENE = 4 @classmethod def tag(cls): return "ObjectNavThorDaggerThenPPOViz" viz = None def get_viz(self, mode): if self.viz is not None: return self.viz self.viz = VizSuite( mode=mode, base_trajectory=TrajectoryViz( path_to_target_location=None, path_to_rot_degrees=("rotation",), ), egeocentric=AgentViewViz(max_video_length=100), action_probs=ActorViz(figsize=(3.25, 10), fontsize=18), taken_action_logprobs=TensorViz1D(), episode_mask=TensorViz1D(rollout_source=("masks",)), thor_trajectory=ThorViz( path_to_target_location=None, figsize=(8, 8), viz_rows_cols=(448, 448), ), ) return self.viz def machine_params(self, mode="train", **kwargs): params = super().machine_params(mode, **kwargs) if mode == "test": params.set_visualizer(self.get_viz(mode)) return params
allenact-main
projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object_viz.py
# literate: tutorials/training-a-pointnav-model.md # %% """# Tutorial: PointNav in RoboTHOR.""" # %% """ ![RoboTHOR Robot](../img/RoboTHOR_robot.jpg) ## Introduction One of the most obvious tasks that an embodied agent should master is navigating the world it inhabits. Before we can teach a robot to cook or clean it first needs to be able to move around. The simplest way to formulate "moving around" into a task is by making your agent find a beacon somewhere in the environment. This beacon transmits its location, such that at any time, the agent can get the direction and euclidian distance to the beacon. This particular task is often called Point Navigation, or **PointNav** for short. #### PointNav At first glance, this task seems trivial. If the agent is given the direction and distance of the target at all times, can it not simply follow this signal directly? The answer is no, because agents are often trained on this task in environments that emulate real-world buildings which are not wide-open spaces, but rather contain many smaller rooms. Because of this, the agent has to learn to navigate human spaces and use doors and hallways to efficiently navigate from one side of the building to the other. This task becomes particularly difficult when the agent is tested in an environment that it is not trained in. If the agent does not know how the floor plan of an environment looks, it has to learn to predict the design of man-made structures, to efficiently navigate across them, much like how people instinctively know how to move around a building they have never seen before based on their experience navigating similar buildings. #### What is an environment anyways? Environments are worlds in which embodied agents exist. If our embodied agent is simply a neural network that is being trained in a simulator, then that simulator is its environment. Similarly, if our agent is a physical robot then its environment is the real world. The agent interacts with the environment by taking one of several available actions (such as "move forward", or "turn left"). After each action, the environment produces a new frame that the agent can analyze to determine its next step. For many tasks, including PointNav the agent also has a special "stop" action which indicates that the agent thinks it has reached the target. After this action is called the agent will be reset to a new location, regardless if it reached the target. The hope is that after enough training the agent will learn to correctly assess that it has successfully navigated to the target. ![RoboTHOR Sim vs. Real](../img/RoboTHOR_sim_real.jpg) There are many simulators designed for the training of embodied agents. In this tutorial, we will be using a simulator called [RoboTHOR](https://ai2thor.allenai.org/robothor/), which is designed specifically to train models that can easily be transferred to a real robot, by providing a photo-realistic virtual environment and a real-world replica of the environment that researchers can have access to. RoboTHOR contains 60 different virtual scenes with different floor plans and furniture and 15 validation scenes. It is also important to mention that **AllenAct** has a class abstraction called Environment. This is not the actual simulator game engine or robotics controller, but rather a shallow wrapper that provides a uniform interface to the actual environment. #### Learning algorithm Finally, let us briefly touch on the algorithm that we will use to train our embodied agent to navigate. While *AllenAct* offers us great flexibility to train models using complex pipelines, we will be using a simple pure reinforcement learning approach for this tutorial. More specifically, we will be using DD-PPO, a decentralized and distributed variant of the ubiquitous PPO algorithm. For those unfamiliar with Reinforcement Learning we highly recommend [this tutorial](http://karpathy.github.io/2016/05/31/rl/) by Andrej Karpathy, and [this book](http://www.incompleteideas.net/book/the-book-2nd.html) by Sutton and Barto. Essentially what we are doing is letting our agent explore the environment on its own, rewarding it for taking actions that bring it closer to its goal and penalizing it for actions that take it away from its goal. We then optimize the agent's model to maximize this reward. ## Requirements To train the model on the PointNav task, we need to [install the RoboTHOR environment](../installation/installation-framework.md) and [download the RoboTHOR PointNav dataset](../installation/download-datasets.md) The dataset contains a list of episodes with thousands of randomly generated starting positions and target locations for each of the scenes as well as a precomputed cache of distances, containing the shortest path from each point in a scene, to every other point in that scene. This is used to reward the agent for moving closer to the target in terms of geodesic distance - the actual path distance (as opposed to a straight line distance). ## Config File Setup Now comes the most important part of the tutorial, we are going to write an experiment config file. If this is your first experience with experiment config files in AllenAct, we suggest that you first see our how-to on [defining an experiment](../howtos/defining-an-experiment.md) which will walk you through creating a simplified experiment config file. Unlike a library that can be imported into python, **AllenAct** is structured as a framework with a runner script called `main.py` which will run the experiment specified in a config file. This design forces us to keep meticulous records of exactly which settings were used to produce a particular result, which can be very useful given how expensive RL models are to train. The `projects/` directory is home to different projects using `AllenAct`. Currently it is populated with baselines of popular tasks and tutorials. We already have all the code for this tutorial stored in `projects/tutorials/training_a_pointnav_model.py`. We will be using this file to run our experiments, but you can create a new directory in `projects/` and start writing your experiment there. We start off by importing everything we will need: """ # %% import glob import os from math import ceil from typing import Dict, Any, List, Optional, Sequence import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from torchvision import models from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, evenly_distribute_count_into_bins, ) from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.navigation_plugin.objectnav.models import ( ResnetTensorNavActorCritic, ) from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor from allenact_plugins.robothor_plugin.robothor_task_samplers import ( PointNavDatasetTaskSampler, ) from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask # %% """Next we define a new experiment config class:""" # %% class PointNavRoboThorRGBPPOExperimentConfig(ExperimentConfig): """A Point Navigation experiment configuration in RoboThor.""" # %% """ We then define the task parameters. For PointNav, these include the maximum number of steps our agent can take before being reset (this prevents the agent from wandering on forever), and a configuration for the reward function that we will be using. """ # %% # Task Parameters MAX_STEPS = 500 REWARD_CONFIG = { "step_penalty": -0.01, "goal_success_reward": 10.0, "failed_stop_reward": 0.0, "shaping_weight": 1.0, } # %% """ In this case, we set the maximum number of steps to 500. We give the agent a reward of -0.01 for each action that it takes (this is to encourage it to reach the goal in as few actions as possible), and a reward of 10.0 if the agent manages to successfully reach its destination. If the agent selects the `stop` action without reaching the target we do not punish it (although this is sometimes useful for preventing the agent from stopping prematurely). Finally, our agent gets rewarded if it moves closer to the target and gets punished if it moves further away. `shaping_weight` controls how strong this signal should be and is here set to 1.0. These parameters work well for training an agent on PointNav, but feel free to play around with them. Next, we set the parameters of the simulator itself. Here we select a resolution at which the engine will render every frame (640 by 480) and a resolution at which the image will be fed into the neural network (here it is set to a 224 by 224 box). """ # %% # Simulator Parameters CAMERA_WIDTH = 640 CAMERA_HEIGHT = 480 SCREEN_SIZE = 224 # %% """ Next, we set the hardware parameters for the training engine. `NUM_PROCESSES` sets the total number of parallel processes that will be used to train the model. In general, more processes result in faster training, but since each process is a unique instance of the environment in which we are training they can take up a lot of memory. Depending on the size of the model, the environment, and the hardware we are using, we may need to adjust this number, but for a setup with 8 GTX Titans, 60 processes work fine. 60 also happens to be the number of training scenes in RoboTHOR, which allows each process to load only a single scene into memory, saving time and space. `TRAINING_GPUS` takes the ids of the GPUS on which the model should be trained. Similarly `VALIDATION_GPUS` and `TESTING_GPUS` hold the ids of the GPUS on which the validation and testing will occur. During training, a validation process is constantly running and evaluating the current model, to show the progress on the validation set, so reserving a GPU for validation can be a good idea. If our hardware setup does not include a GPU, these fields can be set to empty lists, as the codebase will default to running everything on the CPU with only 1 process. """ # %% ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None NUM_PROCESSES = 20 TRAINING_GPUS: Sequence[int] = [0] VALIDATION_GPUS: Sequence[int] = [0] TESTING_GPUS: Sequence[int] = [0] # %% """ Since we are using a dataset to train our model we need to define the path to where we have stored it. If we download the dataset instructed above we can define the path as follows """ # %% TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-pointnav/debug") VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-pointnav/debug") # %% """ Next, we define the sensors. `RGBSensorThor` is the environment's implementation of an RGB sensor. It takes the raw image outputted by the simulator and resizes it, to the input dimensions for our neural network that we specified above. It also performs normalization if we want. `GPSCompassSensorRoboThor` is a sensor that tracks the point our agent needs to move to. It tells us the direction and distance to our goal at every time step. """ # %% SENSORS = [ RGBSensorThor( height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), GPSCompassSensorRoboThor(), ] # %% """ For the sake of this example, we are also going to be using a preprocessor with our model. In *AllenAct* the preprocessor abstraction is designed with large models with frozen weights in mind. These models often hail from the ResNet family and transform the raw pixels that our agent observes in the environment, into a complex embedding, which then gets stored and used as input to our trainable model instead of the original image. Most other preprocessing work is done in the sensor classes (as we just saw with the RGB sensor scaling and normalizing our input), but for the sake of efficiency, all neural network preprocessing should use this abstraction. """ # %% PREPROCESSORS = [ Builder( ResNetPreprocessor, { "input_height": SCREEN_SIZE, "input_width": SCREEN_SIZE, "output_width": 7, "output_height": 7, "output_dims": 512, "pool": False, "torchvision_resnet_model": models.resnet18, "input_uuids": ["rgb_lowres"], "output_uuid": "rgb_resnet", }, ), ] # %% """ Next, we must define all of the observation inputs that our model will use. These are just the hardcoded ids of the sensors we are using in the experiment. """ # %% OBSERVATIONS = [ "rgb_resnet", "target_coordinates_ind", ] # %% """ Finally, we must define the settings of our simulator. We set the camera dimensions to the values we defined earlier. We set rotateStepDegrees to 30 degrees, which means that every time the agent takes a turn action, they will rotate by 30 degrees. We set grid size to 0.25 which means that every time the agent moves forward, it will do so by 0.25 meters. """ # %% ENV_ARGS = dict( width=CAMERA_WIDTH, height=CAMERA_HEIGHT, rotateStepDegrees=30.0, visibilityDistance=1.0, gridSize=0.25, agentMode="bot", ) # %% """ Now we move on to the methods that we must define to finish implementing an experiment config. Firstly we have a simple method that just returns the name of the experiment. """ # %% @classmethod def tag(cls): return "PointNavRobothorRGBPPO" # %% """ Next, we define the training pipeline. In this function, we specify exactly which algorithm or algorithms we will use to train our model. In this simple example, we are using the PPO loss with a learning rate of 3e-4. We specify 250 million steps of training and a rollout length of 30 with the `ppo_steps` and `num_steps` parameters respectively. All the other standard PPO parameters are also present in this function. `metric_accumulate_interval` sets the frequency at which data is accumulated from all the processes and logged while `save_interval` sets how often we save the model weights and run validation on them. """ # %% @classmethod def training_pipeline(cls, **kwargs): ppo_steps = int(250000000) lr = 3e-4 num_mini_batch = 1 update_repeats = 3 num_steps = 30 save_interval = 5000000 log_interval = 1000 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={"ppo_loss": PPO(**PPOConfig)}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps) ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), ) # %% """ The `machine_params` method returns the hardware parameters of each process, based on the list of devices we defined above. """ # %% def machine_params(self, mode="train", **kwargs): sampler_devices: List[int] = [] if mode == "train": workers_per_device = 1 gpu_ids = ( [] if not torch.cuda.is_available() else list(self.TRAINING_GPUS) * workers_per_device ) nprocesses = ( 8 if not torch.cuda.is_available() else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids)) ) sampler_devices = list(self.TRAINING_GPUS) elif mode == "valid": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.VALIDATION_GPUS elif mode == "test": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.TESTING_GPUS else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces, preprocessors=self.PREPROCESSORS, ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sampler_devices=sampler_devices if mode == "train" else gpu_ids, # ignored with > 1 gpu_ids sensor_preprocessor_graph=sensor_preprocessor_graph, ) # %% """ Now we define the actual model that we will be using. **AllenAct** offers first-class support for PyTorch, so any PyTorch model that implements the provided `ActorCriticModel` class will work here. Here we borrow a modelfrom the `pointnav_baselines` project (which unsurprisingly contains several PointNav baselines). It is a small convolutional network that expects the output of a ResNet as its rgb input followed by a single-layered GRU. The model accepts as input the number of different actions our agent can perform in the environment through the `action_space` parameter, which we get from the task definition. We also define the shape of the inputs we are going to be passing to the model with `observation_space` We specify the names of our sensors with `goal_sensor_uuid` and `rgb_resnet_preprocessor_uuid`. Finally, we define the size of our RNN with `hidden_layer` and the size of the embedding of our goal sensor data (the direction and distance to the target) with `goal_dims`. """ # %% @classmethod def create_model(cls, **kwargs) -> nn.Module: return ResnetTensorNavActorCritic( action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, goal_sensor_uuid="target_coordinates_ind", rgb_resnet_preprocessor_uuid="rgb_resnet", hidden_size=512, goal_dims=32, ) # %% """ We also need to define the task sampler that we will be using. This is a piece of code that generates instances of tasks for our agent to perform (essentially starting locations and targets for PointNav). Since we are getting our tasks from a dataset, the task sampler is a very simple code that just reads the specified file and sets the agent to the next starting locations whenever the agent exceeds the maximum number of steps or selects the `stop` action. """ # %% @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return PointNavDatasetTaskSampler(**kwargs) # %% """ You might notice that we did not specify the task sampler's arguments, but are rather passing them in. The reason for this is that each process will have its own task sampler, and we need to specify exactly which scenes each process should work with. If we have several GPUS and many scenes this process of distributing the work can be rather complicated so we define a few helper functions to do just this. """ # %% @staticmethod def _partition_inds(n: int, num_parts: int): return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype( np.int32 ) def _get_sampler_args_for_scene_split( self, scenes_dir: str, process_ind: int, total_processes: int, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: path = os.path.join(scenes_dir, "*.json.gz") scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)] if len(scenes) == 0: raise RuntimeError( ( "Could find no scene dataset information in directory {}." " Are you sure you've downloaded them? " " If not, see https://allenact.org/installation/download-datasets/ information" " on how this can be done." ).format(scenes_dir) ) if total_processes > len(scenes): # oversample some scenes -> bias if total_processes % len(scenes) != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisible by the number of scenes" ) scenes = scenes * int(ceil(total_processes / len(scenes))) scenes = scenes[: total_processes * (len(scenes) // total_processes)] else: if len(scenes) % total_processes != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisor of the number of scenes" ) inds = self._partition_inds(len(scenes), total_processes) return { "scenes": scenes[inds[process_ind] : inds[process_ind + 1]], "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())), "seed": seeds[process_ind] if seeds is not None else None, "deterministic_cudnn": deterministic_cudnn, "rewards_config": self.REWARD_CONFIG, } # %% """ The very last things we need to define are the sampler arguments themselves. We define them separately for a train, validation, and test sampler, but in this case, they are almost the same. The arguments need to include the location of the dataset and distance cache as well as the environment arguments for our simulator, both of which we defined above and are just referencing here. The only consequential differences between these task samplers are the path to the dataset we are using (train or validation) and whether we want to loop over the dataset or not (we want this for training since we want to train for several epochs, but we do not need this for validation and testing). Since the test scenes of RoboTHOR are private we are also testing on our validation set. """ # %% def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.TRAIN_DATASET_DIR, "episodes"), process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.TRAIN_DATASET_DIR res["loop_dataset"] = True res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) res["allow_flipping"] = True return res def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.VAL_DATASET_DIR, "episodes"), process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.VAL_DATASET_DIR res["loop_dataset"] = False res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) return res def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( os.path.join(self.VAL_DATASET_DIR, "episodes"), process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_directory"] = self.VAL_DATASET_DIR res["loop_dataset"] = False res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) return res # %% """ This is it! If we copy all of the code into a file we should be able to run our experiment! ## Training Model On Debug Dataset We can test if our installation worked properly by training our model on a small dataset of 4 episodes. This should take about 20 minutes on a computer with a NVIDIA GPU. We can now train a model by running: ```bash PYTHONPATH=. python allenact/main.py -o <PATH_TO_OUTPUT> -c -b <BASE_DIRECTORY_OF_YOUR_EXPERIMENT> <EXPERIMENT_NAME> ``` If using the same configuration as we have set up, the following command should work: ```bash PYTHONPATH=. python allenact/main.py training_a_pointnav_model -o storage/robothor-pointnav-rgb-resnet-resnet -b projects/tutorials ``` If we start up a tensorboard server during training and specify that `output_dir=storage` the output should look something like this: ![tensorboard output](../img/point-nav-baseline-tb.png) ## Training Model On Full Dataset We can also train the model on the full dataset by changing back our dataset path and running the same command as above. But be aware, training this takes nearly 2 days on a machine with 8 GPU. ## Testing Model To test the performance of a model please refer to [this tutorial](running-inference-on-a-pretrained-model.md). ## Conclusion In this tutorial, we learned how to create a new PointNav experiment using **AllenAct**. There are many simple and obvious ways to modify the experiment from here - changing the model, the learning algorithm and the environment each requires very few lines of code changed in the above file, allowing us to explore our embodied ai research ideas across different frameworks with ease. """
allenact-main
projects/tutorials/training_a_pointnav_model.py
from math import ceil from typing import Dict, Any, List, Optional import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams from allenact.base_abstractions.sensor import SensorSuite from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, ) from allenact_plugins.ithor_plugin.ithor_sensors import ( RGBSensorThor, GoalObjectTypeThorSensor, ) from allenact_plugins.ithor_plugin.ithor_task_samplers import ObjectNavTaskSampler from allenact_plugins.ithor_plugin.ithor_tasks import ObjectNaviThorGridTask from allenact_plugins.navigation_plugin.objectnav.models import ObjectNavActorCritic class ObjectNavThorPPOExperimentConfig(ExperimentConfig): """A simple object navigation experiment in THOR. Training with PPO. """ # A simple setting, train/valid/test are all the same single scene # and we're looking for a single object OBJECT_TYPES = ["Tomato"] TRAIN_SCENES = ["FloorPlan1_physics"] VALID_SCENES = ["FloorPlan1_physics"] TEST_SCENES = ["FloorPlan1_physics"] # Setting up sensors and basic environment details SCREEN_SIZE = 224 SENSORS = [ RGBSensorThor( height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, ), GoalObjectTypeThorSensor(object_types=OBJECT_TYPES), ] ENV_ARGS = { "player_screen_height": SCREEN_SIZE, "player_screen_width": SCREEN_SIZE, "quality": "Very Low", } MAX_STEPS = 128 ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None VALID_SAMPLES_IN_SCENE = 10 TEST_SAMPLES_IN_SCENE = 100 @classmethod def tag(cls): return "ObjectNavThorPPO" @classmethod def training_pipeline(cls, **kwargs): ppo_steps = int(1e6) lr = 2.5e-4 num_mini_batch = 2 if not torch.cuda.is_available() else 6 update_repeats = 4 num_steps = 128 metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks save_interval = 10000 gamma = 0.99 use_gae = True gae_lambda = 1.0 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=metric_accumulate_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={ "ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig), }, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,), ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), ) @classmethod def machine_params(cls, mode="train", **kwargs): num_gpus = torch.cuda.device_count() has_gpu = num_gpus != 0 if mode == "train": nprocesses = 20 if has_gpu else 4 gpu_ids = [0] if has_gpu else [] elif mode == "valid": nprocesses = 1 gpu_ids = [1 % num_gpus] if has_gpu else [] elif mode == "test": nprocesses = 1 gpu_ids = [0] if has_gpu else [] else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") return MachineParams(nprocesses=nprocesses, devices=gpu_ids,) @classmethod def create_model(cls, **kwargs) -> nn.Module: return ObjectNavActorCritic( action_space=gym.spaces.Discrete( len(ObjectNaviThorGridTask.class_action_names()) ), observation_space=SensorSuite(cls.SENSORS).observation_spaces, rgb_uuid=cls.SENSORS[0].uuid, depth_uuid=None, goal_sensor_uuid="goal_object_type_ind", hidden_size=512, object_type_embedding_dim=8, ) @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return ObjectNavTaskSampler(**kwargs) @staticmethod def _partition_inds(n: int, num_parts: int): return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype( np.int32 ) def _get_sampler_args_for_scene_split( self, scenes: List[str], process_ind: int, total_processes: int, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: if total_processes > len(scenes): # oversample some scenes -> bias if total_processes % len(scenes) != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisible by the number of scenes" ) scenes = scenes * int(ceil(total_processes / len(scenes))) scenes = scenes[: total_processes * (len(scenes) // total_processes)] else: if len(scenes) % total_processes != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisor of the number of scenes" ) inds = self._partition_inds(len(scenes), total_processes) return { "scenes": scenes[inds[process_ind] : inds[process_ind + 1]], "object_types": self.OBJECT_TYPES, "env_args": self.ENV_ARGS, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete( len(ObjectNaviThorGridTask.class_action_names()) ), "seed": seeds[process_ind] if seeds is not None else None, "deterministic_cudnn": deterministic_cudnn, } def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( self.TRAIN_SCENES, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_period"] = "manual" res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) return res def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( self.VALID_SCENES, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_period"] = self.VALID_SAMPLES_IN_SCENE res["max_tasks"] = self.VALID_SAMPLES_IN_SCENE * len(res["scenes"]) res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) return res def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( self.TEST_SCENES, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_period"] = self.TEST_SAMPLES_IN_SCENE res["max_tasks"] = self.TEST_SAMPLES_IN_SCENE * len(res["scenes"]) res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if devices is not None and len(devices) > 0 else None ) return res
allenact-main
projects/tutorials/object_nav_ithor_ppo_one_object.py
# literate: tutorials/distributed-objectnav-tutorial.md # %% """# Tutorial: Distributed training across multiple nodes.""" # %% """ **Note** The provided commands to execute in this tutorial assume include a configuration script to [clone the full library](../installation/installation-allenact.md#full-library). Setting up headless THOR might require superuser privileges. We also assume [NCCL](https://developer.nvidia.com/nccl) is available for communication across computation nodes and all nodes have a running `ssh` server. The below introduced experimental tools and commands for distributed training assume a Linux OS (tested on Ubuntu 18.04). In this tutorial, we: 1. Introduce the available API for training across multiple nodes, as well as experimental scripts for distributed configuration, training start and termination, and remote command execution. 1. Introduce the headless mode for [AI2-THOR](https://ai2thor.allenai.org/) in `AllenAct`. Note that, in contrast with previous tutorials using AI2-THOR, this time we don't require an xserver (in Linux) to be active. 1. Show a training example for RoboTHOR ObjectNav on a cluster, with each node having sufficient GPUs and GPU memory to host 60 experience samplers collecting rollout data. Thanks to the massive parallelization of experience collection and model training enabled by [DD-PPO](https://arxiv.org/abs/1911.00357), we can greatly speed up training by scaling across multiple nodes: ![training speedup](../img/multinode_training.jpg) ## The task: ObjectNav In ObjectNav, the goal for the agent is to navigate to an object (possibly unseen during training) of a known given class and signal task completion when it determines it has reached the goal. ## Implementation For this tutorial, we'll use the readily available `objectnav_baselines` project, which includes configurations for a wide variety of object navigation experiments for both iTHOR and RoboTHOR. Since those configuration files are defined for a single-node setup, we will mainly focus on the changes required in the `machine_params` and `training_pipeline` methods. Note that, in order to use the headless version of AI2-THOR, we currently need to install a specific THOR commit, different from the default one in `robothor_plugin`. Note that this command is included in the configuration script below, so **we don't need to run this**: ```bash pip install --extra-index-url https://ai2thor-pypi.allenai.org ai2thor==0+91139c909576f3bf95a187c5b02c6fd455d06b48 ``` The experiment config starts as follows: """ # %% import math from typing import Optional, Sequence import torch import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.utils.experiment_utils import ( Builder, LinearDecay, MultiLinearDecay, TrainingPipeline, PipelineStage, ) from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_rgb_resnet18gru_ddppo import ( ObjectNavRoboThorRGBPPOExperimentConfig as BaseConfig, ) class DistributedObjectNavRoboThorRGBPPOExperimentConfig(BaseConfig): def tag(self) -> str: return "DistributedObjectNavRoboThorRGBPPO" # %% """We override ObjectNavRoboThorBaseConfig's THOR_COMMIT_ID to match the installed headless one:""" # %% THOR_COMMIT_ID = "91139c909576f3bf95a187c5b02c6fd455d06b48" # %% """Also indicate that we're using headless THOR (for `task_sampler_args` methods):""" # %% THOR_IS_HEADLESS = True # %% """**Temporary hack** Disable the `commit_id` argument passed to the THOR `Controller`'s `init` method:""" # %% def env_args(self): res = super().env_args() res.pop("commit_id", None) return res # %% """ And, of course, define the number of nodes. This will be used by `machine_params` and `training_pipeline` below. We override the existing `ExperimentConfig`'s `init` method to include control on the number of nodes: """ # %% def __init__( self, distributed_nodes: int = 1, num_train_processes: Optional[int] = None, train_gpu_ids: Optional[Sequence[int]] = None, val_gpu_ids: Optional[Sequence[int]] = None, test_gpu_ids: Optional[Sequence[int]] = None, ): super().__init__( num_train_processes=num_train_processes, train_gpu_ids=train_gpu_ids, val_gpu_ids=val_gpu_ids, test_gpu_ids=test_gpu_ids, ) self.distributed_nodes = distributed_nodes # %% """ ### Machine parameters **Note:** We assume that all nodes are identical (same number and model of GPUs and drivers). The `machine_params` method will be invoked by `runner.py` with different arguments, e.g. to determine the configuration for validation or training. When working in distributed settings, `AllenAct` needs to know the total number of trainers across all nodes as well as the local number of trainers. This is accomplished through the introduction of a `machine_id` keyword argument, which will be used to define the training parameters as follows: """ # %% def machine_params(self, mode="train", **kwargs): params = super().machine_params(mode, **kwargs) if mode == "train": params.devices = params.devices * self.distributed_nodes params.nprocesses = params.nprocesses * self.distributed_nodes params.sampler_devices = params.sampler_devices * self.distributed_nodes if "machine_id" in kwargs: machine_id = kwargs["machine_id"] assert ( 0 <= machine_id < self.distributed_nodes ), f"machine_id {machine_id} out of range [0, {self.distributed_nodes - 1}]" local_worker_ids = list( range( len(self.train_gpu_ids) * machine_id, len(self.train_gpu_ids) * (machine_id + 1), ) ) params.set_local_worker_ids(local_worker_ids) # Confirm we're setting up train params nicely: print( f"devices {params.devices}" f"\nnprocesses {params.nprocesses}" f"\nsampler_devices {params.sampler_devices}" f"\nlocal_worker_ids {params.local_worker_ids}" ) elif mode == "valid": # Use all GPUs at their maximum capacity for training # (you may run validation in a separate machine) params.nprocesses = (0,) return params # %% """ In summary, we need to specify which indices in `devices`, `nprocesses` and `sampler_devices` correspond to the local `machine_id` node (whenever a `machine_id` is given as a keyword argument), otherwise we specify the global configuration. ### Training pipeline In preliminary ObjectNav experiments, we observe that small batches are useful during the initial training steps in terms of sample efficiency, whereas large batches are preferred during the rest of training. In order to scale to the larger amount of collected data in multi-node settings, we will proceed with a two-stage pipeline: 1. In the first stage, we'll enforce a number of updates per amount of collected data similar to the configuration with a single node by enforcing more batches per rollout (for about 30 million steps). 1. In the second stage we'll switch to a configuration with larger learning rate and batch size to be used up to the grand total of 300 million experience steps. We first define a helper method to generate a learning rate curve with decay for each stage: """ # %% @staticmethod def lr_scheduler(small_batch_steps, transition_steps, ppo_steps, lr_scaling): safe_small_batch_steps = int(small_batch_steps * 1.02) large_batch_and_lr_steps = ppo_steps - safe_small_batch_steps - transition_steps # Learning rate after small batch steps (assuming decay to 0) break1 = 1.0 - safe_small_batch_steps / ppo_steps # Initial learning rate for large batch (after transition from initial to large learning rate) break2 = lr_scaling * ( 1.0 - (safe_small_batch_steps + transition_steps) / ppo_steps ) return MultiLinearDecay( [ # Base learning rate phase for small batch (with linear decay towards 0) LinearDecay(steps=safe_small_batch_steps, startp=1.0, endp=break1,), # Allow the optimizer to adapt its statistics to the changes with a larger learning rate LinearDecay(steps=transition_steps, startp=break1, endp=break2,), # Scaled learning rate phase for large batch (with linear decay towards 0) LinearDecay(steps=large_batch_and_lr_steps, startp=break2, endp=0,), ] ) # %% """ The training pipeline looks like: """ # %% def training_pipeline(self, **kwargs): # These params are identical to the baseline configuration for 60 samplers (1 machine) ppo_steps = int(300e6) lr = 3e-4 num_mini_batch = 1 update_repeats = 4 num_steps = 128 save_interval = 5000000 log_interval = 10000 if torch.cuda.is_available() else 1 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 # We add 30 million steps for small batch learning small_batch_steps = int(30e6) # And a short transition phase towards large learning rate # (see comment in the `lr_scheduler` helper method transition_steps = int(2 / 3 * self.distributed_nodes * 1e6) # Find exact number of samplers per GPU assert ( self.num_train_processes % len(self.train_gpu_ids) == 0 ), "Expected uniform number of samplers per GPU" samplers_per_gpu = self.num_train_processes // len(self.train_gpu_ids) # Multiply num_mini_batch by the largest divisor of # samplers_per_gpu to keep all batches of same size: num_mini_batch_multiplier = [ i for i in reversed( range(1, min(samplers_per_gpu // 2, self.distributed_nodes) + 1) ) if samplers_per_gpu % i == 0 ][0] # Multiply update_repeats so that the product of this factor and # num_mini_batch_multiplier is >= self.distributed_nodes: update_repeats_multiplier = int( math.ceil(self.distributed_nodes / num_mini_batch_multiplier) ) return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={"ppo_loss": PPO(**PPOConfig, show_ratios=False)}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ # We increase the number of batches for the first stage to reach an # equivalent number of updates per collected rollout data as in the # 1 node/60 samplers setting PipelineStage( loss_names=["ppo_loss"], max_stage_steps=small_batch_steps, num_mini_batch=num_mini_batch * num_mini_batch_multiplier, update_repeats=update_repeats * update_repeats_multiplier, ), # The we proceed with the base configuration (leading to larger # batches due to the increased number of samplers) PipelineStage( loss_names=["ppo_loss"], max_stage_steps=ppo_steps - small_batch_steps, ), ], # We use the MultiLinearDecay curve defined by the helper function, # setting the learning rate scaling as the square root of the number # of nodes. Linear scaling might also works, but we leave that # check to the reader. lr_scheduler_builder=Builder( LambdaLR, { "lr_lambda": self.lr_scheduler( small_batch_steps=small_batch_steps, transition_steps=transition_steps, ppo_steps=ppo_steps, lr_scaling=math.sqrt(self.distributed_nodes), ) }, ), ) # %% """ ## Multi-node configuration **Note:** In the following, we'll assume you don't have an available setup for distributed execution, such as [slurm](https://slurm.schedmd.com/documentation.html). If you do have access to a better alternative to setup and run distributed processes, we encourage you to use that. The experimental distributed tools included here are intended for a rather basic usage pattern that might not suit your needs. If we haven't set up AllenAct with the headless version of Ai2-THOR in our nodes, we can define a configuration script similar to: ```bash #!/bin/bash # Prepare a virtualenv for allenact sudo apt-get install -y python3-venv python3 -mvenv ~/allenact_venv source ~/allenact_venv/bin/activate pip install -U pip wheel # Install AllenAct cd ~ git clone https://github.com/allenai/allenact.git cd allenact # Install AllenaAct + RoboTHOR plugin dependencies pip install -r requirements.txt pip install -r allenact_plugins/robothor_plugin/extra_requirements.txt # Download + setup datasets bash datasets/download_navigation_datasets.sh robothor-objectnav # Install headless AI2-THOR and required libvulkan1 sudo apt-get install -y libvulkan1 pip install --extra-index-url https://ai2thor-pypi.allenai.org ai2thor==0+91139c909576f3bf95a187c5b02c6fd455d06b48 # Download AI2-THOR binaries python -c "from ai2thor.controller import Controller; c=Controller(); c.stop()" echo DONE ``` and save it as `headless_robothor_config.sh`. Note that some of the configuration steps in the script assume you have superuser privileges. Then, we can just copy this file to the first node in our cluster and run it with: ```bash source <PATH/TO/headless_robothor_config.sh> ``` If everything went well, we should be able to ```bash cd ~/allenact && source ~/allenact_venv/bin/activate ``` Note that we might need to install `libvulkan1` in each node (even if the AllenAct setup is shared across nodes) if it is not already available. ### Local filesystems If our cluster does not use a shared filesystem, we'll need to propagate the setup to the rest of nodes. Assuming we can just `ssh` with the current user to all nodes, we can propagate our config with ```bash scripts/dconfig.py --runs_on <COMMA_SEPARATED_LIST_OF_IP_ADDRESSES> \ --config_script <PATH/TO/headless_robothor_config.sh> ``` and we can check the state of the installation with the `scripts/dcommand.py` tool: ```bash scripts/dcommand.py --runs_on <COMMA_SEPARATED_LIST_OF_IP_ADDRESSES> \ --command 'tail -n 5 ~/log_allenact_distributed_config' ``` If everything went fine, all requirements are ready to start running our experiment. ## Run your experiment **Note:** In this section, we again assume you don't have an available setup for distributed execution, such as [slurm](https://slurm.schedmd.com/documentation.html). If you do have access to a better alternative to setup/run distributed processes, we encourage you to use that. The experimental distributed tools included here are intended for a rather basic usage pattern that might not suit your needs. Our experimental extension to AllenAct's `main.py` script allows using practically identical commands to the ones used in a single-node setup to start our experiments. From the root `allenact` directory, we can simply invoke ```bash scripts/dmain.py projects/tutorials/distributed_objectnav_tutorial.py \ --config_kwargs '{"distributed_nodes":3}' \ --runs_on <COMMA_SEPARATED_LIST_OF_IP_ADDRESSES> \ --env_activate_path ~/allenact_venv/bin/activate \ --allenact_path ~/allenact \ --distributed_ip_and_port <FIRST_IP_ADDRESS_IN_RUNS_ON_LIST>:<FREE_PORT_NUMBER_FOR_THIS_IP_ADDRESS> ``` This script will do several things for you, including synchronization of the changes in the `allenact` directory to all machines, enabling virtual environments in each node, sharing the same random seed for all `main.py` instances, assigning `--machine_id` parameters required for multi-node training, and redirecting the process output to a log file under the output results folder. Note that by changing the value associated with the `distributed_nodes` key in the `config_kwargs` map and the `runs_on` list of IPs, we can easily scale our training to e.g. 1, 3, or 8 nodes as shown in the chart above. Note that for this call to work unmodified, you should have sufficient GPUs/GPU memory to host 60 samplers per node. ## Track and stop your experiment You might have noticed that, when your experiment started with the above command, a file was created under `~/.allenact`. This file includes IP addresses and screen session IDs for all nodes. It can be used by the already introduced `scripts/dcommand.py` script, if we omit the `--runs_on` argument, to call a command on each node via ssh; but most importantly it is used by the `scripts/dkill.py` script to terminate all screen sessions hosting our training processes. ### Experiment tracking A simple way to check all machines are training, assuming you have `nvidia-smi` installed in all nodes, is to just call ```bash scripts/dcommand.py ``` from the root `allenact` directory. If everything is working well, the GPU usage stats from `nvidia-smi` should reflect ongoing activity. You can also add different commands to be executed by each node. It is of course also possible to run tensorboard on any of the nodes, if that's your preference. ### Experiment termination Just call ```bash scripts/dkill.py ``` After killing all involved screen sessions, you will be asked about whether you also want to delete the "killfile" stored under the `~/.allenact` directory (which might be your preferred option once all processes are terminated). We hope this tutorial will help you start quickly testing new ideas! Even if we've only explored moderates settings of up to 480 experience samplers, you might want to consider some additional changes (like the [choice for the optimizer](https://arxiv.org/abs/2103.07013)) if you plan to run at larger scale. """
allenact-main
projects/tutorials/distributed_objectnav_tutorial.py
# literate: tutorials/minigrid-tutorial.md # %% """# Tutorial: Navigation in MiniGrid.""" # %% """ In this tutorial, we will train an agent to complete the `MiniGrid-Empty-Random-5x5-v0` task within the [MiniGrid](https://github.com/maximecb/gym-minigrid) environment. We will demonstrate how to: * Write an experiment configuration file with a simple training pipeline from scratch. * Use one of the supported environments with minimal user effort. * Train, validate and test your experiment from the command line. This tutorial assumes the [installation instructions](../installation/installation-allenact.md) have already been followed and that, to some extent, this framework's [abstractions](../getting_started/abstractions.md) are known. The `extra_requirements` for `minigrid_plugin` and `babyai_plugin` can be installed with. ```bash pip install -r allenact_plugins/minigrid_plugin/extra_requirements.txt; pip install -r allenact_plugins/babyai_plugin/extra_requirements.txt ``` ## The task A `MiniGrid-Empty-Random-5x5-v0` task consists of a grid of dimensions 5x5 where an agent spawned at a random location and orientation has to navigate to the visitable bottom right corner cell of the grid by sequences of three possible actions (rotate left/right and move forward). A visualization of the environment with expert steps in a random `MiniGrid-Empty-Random-5x5-v0` task looks like ![MiniGridEmptyRandom5x5 task example](../img/minigrid_environment.png) The observation for the agent is a subset of the entire grid, simulating a simplified limited field of view, as depicted by the highlighted rectangle (observed subset of the grid) around the agent (red arrow). Gray cells correspond to walls. ## Experiment configuration file Our complete experiment consists of: * Training a basic actor-critic agent with memory to solve randomly sampled navigation tasks. * Validation on a fixed set of tasks (running in parallel with training). * A second stage where we test saved checkpoints with a larger fixed set of tasks. The entire configuration for the experiment, including training, validation, and testing, is encapsulated in a single class implementing the `ExperimentConfig` abstraction. For this tutorial, we will follow the config under `projects/tutorials/minigrid_tutorial.py`. The `ExperimentConfig` abstraction is used by the [OnPolicyTrainer](../api/allenact/algorithms/onpolicy_sync/engine.md#onpolicytrainer) class (for training) and the [OnPolicyInference](../api/allenact/algorithms/onpolicy_sync/engine.md#onpolicyinference) class (for validation and testing) invoked through the entry script `main.py` that calls an orchestrating [OnPolicyRunner](../api/allenact/algorithms/onpolicy_sync/runner.md#onpolicyrunner) class. It includes: * A `tag` method to identify the experiment. * A `create_model` method to instantiate actor-critic models. * A `make_sampler_fn` method to instantiate task samplers. * Three `{train,valid,test}_task_sampler_args` methods describing initialization parameters for task samplers used in training, validation, and testing; including assignment of workers to devices for simulation. * A `machine_params` method with configuration parameters that will be used for training, validation, and testing. * A `training_pipeline` method describing a possibly multi-staged training pipeline with different types of losses, an optimizer, and other parameters like learning rates, batch sizes, etc. ### Preliminaries We first import everything we'll need to define our experiment. """ # %% from typing import Dict, Optional, List, Any, cast import gym from gym_minigrid.envs import EmptyRandomEnv5x5 import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses.ppo import PPO, PPOConfig from allenact.base_abstractions.experiment_config import ExperimentConfig, TaskSampler from allenact.base_abstractions.sensor import SensorSuite from allenact.utils.experiment_utils import ( TrainingPipeline, Builder, PipelineStage, LinearDecay, ) from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvRNN from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor from allenact_plugins.minigrid_plugin.minigrid_tasks import ( MiniGridTaskSampler, MiniGridTask, ) # %% """ We now create the `MiniGridTutorialExperimentConfig` class which we will use to define our experiment. For pedagogical reasons, we will add methods to this class one at a time below with a description of what these classes do. """ # %% class MiniGridTutorialExperimentConfig(ExperimentConfig): # %% """An experiment is identified by a `tag`.""" # %% @classmethod def tag(cls) -> str: return "MiniGridTutorial" # %% """ ### Sensors and Model A readily available Sensor type for MiniGrid, [EgocentricMiniGridSensor](../api/allenact_plugins/minigrid_plugin/minigrid_sensors.md#egocentricminigridsensor), allows us to extract observations in a format consumable by an `ActorCriticModel` agent: """ # %% SENSORS = [ EgocentricMiniGridSensor(agent_view_size=5, view_channels=3), ] # %% """ The three `view_channels` include objects, colors and states corresponding to a partial observation of the environment as an image tensor, equivalent to that from `ImgObsWrapper` in [MiniGrid](https://github.com/maximecb/gym-minigrid#wrappers). The relatively large `agent_view_size` means the view will only be clipped by the environment walls in the forward and lateral directions with respect to the agent's orientation. We define our `ActorCriticModel` agent using a lightweight implementation with recurrent memory for MiniGrid environments, [MiniGridSimpleConvRNN](../api/allenact_plugins/minigrid_plugin/minigrid_models.md#minigridsimpleconvrnn): """ # %% @classmethod def create_model(cls, **kwargs) -> nn.Module: return MiniGridSimpleConvRNN( action_space=gym.spaces.Discrete(len(MiniGridTask.class_action_names())), observation_space=SensorSuite(cls.SENSORS).observation_spaces, num_objects=cls.SENSORS[0].num_objects, num_colors=cls.SENSORS[0].num_colors, num_states=cls.SENSORS[0].num_states, ) # %% """ ### Task samplers We use an available TaskSampler implementation for MiniGrid environments that allows to sample both random and deterministic `MiniGridTasks`, [MiniGridTaskSampler](../api/allenact_plugins/minigrid_plugin/minigrid_tasks.md#minigridtasksampler): """ # %% @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return MiniGridTaskSampler(**kwargs) # %% """ This task sampler will during training (or validation/testing), randomly initialize new tasks for the agent to complete. While it is not quite as important for this task type (as we test our agent in the same setting it is trained on) there are a lot of good reasons we would like to sample tasks differently during training than during validation or testing. One good reason, that is applicable in this tutorial, is that, during training, we would like to be able to sample tasks forever while, during testing, we would like to sample a fixed number of tasks (as otherwise we would never finish testing!). In `allenact` this is made possible by defining different arguments for the task sampler: """ # %% def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="train") def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="valid") def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="test") # %% """ where, for convenience, we have defined a `_get_sampler_args` method: """ # %% def _get_sampler_args(self, process_ind: int, mode: str) -> Dict[str, Any]: """Generate initialization arguments for train, valid, and test TaskSamplers. # Parameters process_ind : index of the current task sampler mode: one of `train`, `valid`, or `test` """ if mode == "train": max_tasks = None # infinite training tasks task_seeds_list = None # no predefined random seeds for training deterministic_sampling = False # randomly sample tasks in training else: max_tasks = 20 + 20 * (mode == "test") # 20 tasks for valid, 40 for test # one seed for each task to sample: # - ensures different seeds for each sampler, and # - ensures a deterministic set of sampled tasks. task_seeds_list = list( range(process_ind * max_tasks, (process_ind + 1) * max_tasks) ) deterministic_sampling = ( True # deterministically sample task in validation/testing ) return dict( max_tasks=max_tasks, # see above env_class=self.make_env, # builder for third-party environment (defined below) sensors=self.SENSORS, # sensors used to return observations to the agent env_info=dict(), # parameters for environment builder (none for now) task_seeds_list=task_seeds_list, # see above deterministic_sampling=deterministic_sampling, # see above ) @staticmethod def make_env(*args, **kwargs): return EmptyRandomEnv5x5() # %% """ Note that the `env_class` argument to the Task Sampler is the one determining which task type we are going to train the model for (in this case, `MiniGrid-Empty-Random-5x5-v0` from [gym-minigrid](https://github.com/maximecb/gym-minigrid#empty-environment)) . The sparse reward is [given by the environment](https://github.com/maximecb/gym-minigrid/blob/6e22a44dc67414b647063692258a4f95ce789161/gym_minigrid/minigrid.py#L819) , and the maximum task length is 100. For training, we opt for a default random sampling, whereas for validation and test we define fixed sets of randomly sampled tasks without needing to explicitly define a dataset. In this toy example, the maximum number of different tasks is 32. For validation we sample 320 tasks using 16 samplers, or 640 for testing, so we can be fairly sure that all possible tasks are visited at least once during evaluation. ### Machine parameters Given the simplicity of the task and model, we can quickly train the model on the CPU: """ # %% @classmethod def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]: return { "nprocesses": 128 if mode == "train" else 16, "devices": [], } # %% """ We allocate a larger number of samplers for training (128) than for validation or testing (16), and we default to CPU usage by returning an empty list of `devices`. ### Training pipeline The last definition required before starting to train is a training pipeline. In this case, we just use a single PPO stage with linearly decaying learning rate: """ # %% @classmethod def training_pipeline(cls, **kwargs) -> TrainingPipeline: ppo_steps = int(150000) return TrainingPipeline( named_losses=dict(ppo_loss=PPO(**PPOConfig)), # type:ignore pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps) ], optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=1e-4)), num_mini_batch=4, update_repeats=3, max_grad_norm=0.5, num_steps=16, gamma=0.99, use_gae=True, gae_lambda=0.95, advance_scene_rollout_period=None, save_interval=10000, metric_accumulate_interval=1, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} # type:ignore ), ) # %% """ You can see that we use a `Builder` class to postpone the construction of some of the elements, like the optimizer, for which the model weights need to be known. ## Training and validation We have a complete implementation of this experiment's configuration class in `projects/tutorials/minigrid_tutorial.py`. To start training from scratch, we just need to invoke ```bash PYTHONPATH=. python allenact/main.py minigrid_tutorial -b projects/tutorials -m 8 -o /PATH/TO/minigrid_output -s 12345 ``` from the `allenact` root directory. * With `-b projects/tutorials` we tell `allenact` that `minigrid_tutorial` experiment config file will be found in the `projects/tutorials` directory. * With `-m 8` we limit the number of subprocesses to 8 (each subprocess will run 16 of the 128 training task samplers). * With `-o minigrid_output` we set the output folder into which results and logs will be saved. * With `-s 12345` we set the random seed. If we have Tensorboard installed, we can track progress with ```bash tensorboard --logdir /PATH/TO/minigrid_output ``` which will default to the URL [http://localhost:6006/](http://localhost:6006/). After 150,000 steps, the script will terminate and several checkpoints will be saved in the output folder. The training curves should look similar to: ![training curves](../img/minigrid_train.png) If everything went well, the `valid` success rate should converge to 1 and the mean episode length to a value below 4. (For perfectly uniform sampling and complete observation, the expectation for the optimal policy is 3.75 steps.) In the not-so-unlikely event of the run failing to converge to a near-optimal policy, we can just try to re-run (for example with a different random seed). The validation curves should look similar to: ![validation curves](../img/minigrid_valid.png) ## Testing The training start date for the experiment, in `YYYY-MM-DD_HH-MM-SS` format, is used as the name of one of the subfolders in the path to the checkpoints, saved under the output folder. In order to evaluate (i.e. test) a particular checkpoint, we need to pass the `--eval` flag and specify the checkpoint with the `--checkpoint CHECKPOINT_PATH` option: ```bash PYTHONPATH=. python allenact/main.py minigrid_tutorial \ -b projects/tutorials \ -m 1 \ -o /PATH/TO/minigrid_output \ -s 12345 \ --eval \ --checkpoint /PATH/TO/minigrid_output/checkpoints/MiniGridTutorial/YOUR_START_DATE/exp_MiniGridTutorial__stage_00__steps_000000151552.pt ``` Again, if everything went well, the `test` success rate should converge to 1 and the mean episode length to a value below 4. Detailed results are saved under a `metrics` subfolder in the output folder. The test curves should look similar to: ![test curves](../img/minigrid_test.png) """
allenact-main
projects/tutorials/minigrid_tutorial.py
from typing import Dict, Optional, List, Any, cast, Callable, Union, Tuple import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from gym_minigrid.envs import EmptyRandomEnv5x5 from gym_minigrid.minigrid import MiniGridEnv from torch.optim.lr_scheduler import LambdaLR from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation from allenact.algorithms.onpolicy_sync.losses.ppo import PPO, PPOConfig from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel, DistributionType from allenact.base_abstractions.distributions import ( CategoricalDistr, ConditionalDistr, SequentialDistr, ) from allenact.base_abstractions.experiment_config import ExperimentConfig, TaskSampler from allenact.base_abstractions.misc import ActorCriticOutput, Memory, RLStepResult from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor from allenact.embodiedai.models.basic_models import RNNStateEncoder from allenact.utils.experiment_utils import ( TrainingPipeline, Builder, PipelineStage, LinearDecay, ) from allenact.utils.misc_utils import prepare_locals_for_super from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvBase from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor from allenact_plugins.minigrid_plugin.minigrid_tasks import ( MiniGridTaskSampler, MiniGridTask, ) class ConditionedLinearActorCriticHead(nn.Module): def __init__( self, input_size: int, master_actions: int = 2, subpolicy_actions: int = 2 ): super().__init__() self.input_size = input_size self.master_and_critic = nn.Linear(input_size, master_actions + 1) self.embed_higher = nn.Embedding(num_embeddings=2, embedding_dim=input_size) self.actor = nn.Linear(2 * input_size, subpolicy_actions) nn.init.orthogonal_(self.master_and_critic.weight) nn.init.constant_(self.master_and_critic.bias, 0) nn.init.orthogonal_(self.actor.weight) nn.init.constant_(self.actor.bias, 0) def lower_policy(self, *args, **kwargs): assert "higher" in kwargs assert "state_embedding" in kwargs emb = self.embed_higher(kwargs["higher"]) logits = self.actor(torch.cat([emb, kwargs["state_embedding"]], dim=-1)) return CategoricalDistr(logits=logits) def forward(self, x): out = self.master_and_critic(x) master_logits = out[..., :-1] values = out[..., -1:] # noinspection PyArgumentList cond1 = ConditionalDistr( distr_conditioned_on_input_fn_or_instance=CategoricalDistr( logits=master_logits ), action_group_name="higher", ) cond2 = ConditionalDistr( distr_conditioned_on_input_fn_or_instance=lambda *args, **kwargs: ConditionedLinearActorCriticHead.lower_policy( self, *args, **kwargs ), action_group_name="lower", state_embedding=x, ) return ( SequentialDistr(cond1, cond2), values.view(*values.shape[:2], -1), # [steps, samplers, flattened] ) class ConditionedLinearActorCritic(ActorCriticModel[SequentialDistr]): def __init__( self, input_uuid: str, action_space: gym.spaces.Dict, observation_space: gym.spaces.Dict, ): super().__init__(action_space=action_space, observation_space=observation_space) assert ( input_uuid in observation_space.spaces ), "ConditionedLinearActorCritic expects only a single observational input." self.input_uuid = input_uuid box_space: gym.spaces.Box = observation_space[self.input_uuid] assert isinstance(box_space, gym.spaces.Box), ( "ConditionedLinearActorCritic requires that" "observation space corresponding to the input uuid is a Box space." ) assert len(box_space.shape) == 1 self.in_dim = box_space.shape[0] self.head = ConditionedLinearActorCriticHead( input_size=self.in_dim, master_actions=action_space["higher"].n, subpolicy_actions=action_space["lower"].n, ) # noinspection PyMethodMayBeStatic def _recurrent_memory_specification(self): return None def forward(self, observations, memory, prev_actions, masks): dists, values = self.head(observations[self.input_uuid]) # noinspection PyArgumentList return ( ActorCriticOutput(distributions=dists, values=values, extras={},), None, ) class ConditionedRNNActorCritic(ActorCriticModel[SequentialDistr]): def __init__( self, input_uuid: str, action_space: gym.spaces.Dict, observation_space: gym.spaces.Dict, hidden_size: int = 128, num_layers: int = 1, rnn_type: str = "GRU", head_type: Callable[ ..., ActorCriticModel[SequentialDistr] ] = ConditionedLinearActorCritic, ): super().__init__(action_space=action_space, observation_space=observation_space) self.hidden_size = hidden_size self.rnn_type = rnn_type assert ( input_uuid in observation_space.spaces ), "LinearActorCritic expects only a single observational input." self.input_uuid = input_uuid box_space: gym.spaces.Box = observation_space[self.input_uuid] assert isinstance(box_space, gym.spaces.Box), ( "RNNActorCritic requires that" "observation space corresponding to the input uuid is a Box space." ) assert len(box_space.shape) == 1 self.in_dim = box_space.shape[0] self.state_encoder = RNNStateEncoder( input_size=self.in_dim, hidden_size=hidden_size, num_layers=num_layers, rnn_type=rnn_type, trainable_masked_hidden_state=True, ) self.head_uuid = "{}_{}".format("rnn", input_uuid) self.ac_nonrecurrent_head: ActorCriticModel[SequentialDistr] = head_type( input_uuid=self.head_uuid, action_space=action_space, observation_space=gym.spaces.Dict( { self.head_uuid: gym.spaces.Box( low=np.float32(0.0), high=np.float32(1.0), shape=(hidden_size,) ) } ), ) self.memory_key = "rnn" @property def recurrent_hidden_state_size(self) -> int: return self.hidden_size @property def num_recurrent_layers(self) -> int: return self.state_encoder.num_recurrent_layers def _recurrent_memory_specification(self): return { self.memory_key: ( ( ("layer", self.num_recurrent_layers), ("sampler", None), ("hidden", self.recurrent_hidden_state_size), ), torch.float32, ) } def forward( # type:ignore self, observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]], memory: Memory, prev_actions: torch.Tensor, masks: torch.FloatTensor, ) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]: rnn_out, mem_return = self.state_encoder( x=observations[self.input_uuid], hidden_states=memory.tensor(self.memory_key), masks=masks, ) # noinspection PyCallingNonCallable out, _ = self.ac_nonrecurrent_head( observations={self.head_uuid: rnn_out}, memory=None, prev_actions=prev_actions, masks=masks, ) # noinspection PyArgumentList return ( out, memory.set_tensor(self.memory_key, mem_return), ) class ConditionedMiniGridSimpleConvRNN(MiniGridSimpleConvBase): def __init__( self, action_space: gym.spaces.Dict, observation_space: gym.spaces.Dict, num_objects: int, num_colors: int, num_states: int, object_embedding_dim: int = 8, hidden_size=512, num_layers=1, rnn_type="GRU", head_type: Callable[ ..., ActorCriticModel[SequentialDistr] ] = ConditionedLinearActorCritic, **kwargs, ): super().__init__(**prepare_locals_for_super(locals())) self._hidden_size = hidden_size agent_view_x, agent_view_y, view_channels = observation_space[ "minigrid_ego_image" ].shape self.actor_critic = ConditionedRNNActorCritic( input_uuid=self.ac_key, action_space=action_space, observation_space=gym.spaces.Dict( { self.ac_key: gym.spaces.Box( low=np.float32(-1.0), high=np.float32(1.0), shape=( self.object_embedding_dim * agent_view_x * agent_view_y * view_channels, ), ) } ), hidden_size=hidden_size, num_layers=num_layers, rnn_type=rnn_type, head_type=head_type, ) self.memory_key = "rnn" self.train() @property def num_recurrent_layers(self): return self.actor_critic.num_recurrent_layers @property def recurrent_hidden_state_size(self): return self._hidden_size def _recurrent_memory_specification(self): return { self.memory_key: ( ( ("layer", self.num_recurrent_layers), ("sampler", None), ("hidden", self.recurrent_hidden_state_size), ), torch.float32, ) } class ConditionedMiniGridTask(MiniGridTask): _ACTION_NAMES = ("left", "right", "forward", "pickup") _ACTION_IND_TO_MINIGRID_IND = tuple( MiniGridEnv.Actions.__members__[name].value for name in _ACTION_NAMES ) @property def action_space(self) -> gym.spaces.Dict: return gym.spaces.Dict( higher=gym.spaces.Discrete(2), lower=gym.spaces.Discrete(2) ) def _step(self, action: Dict[str, int]) -> RLStepResult: assert len(action) == 2, "got action={}".format(action) minigrid_obs, reward, self._minigrid_done, info = self.env.step( action=( self._ACTION_IND_TO_MINIGRID_IND[action["lower"] + 2 * action["higher"]] ) ) # self.env.render() return RLStepResult( observation=self.get_observations(minigrid_output_obs=minigrid_obs), reward=reward, done=self.is_done(), info=info, ) def query_expert(self, **kwargs) -> Tuple[int, bool]: if kwargs["expert_sensor_group_name"] == "higher": if self._minigrid_done: raise ValueError("Episode is completed, but expert is still queried.") # return 0, False self.cached_expert = super().query_expert(**kwargs) if self.cached_expert[1]: return self.cached_expert[0] // 2, True else: return 0, False else: assert hasattr(self, "cached_expert") if self.cached_expert[1]: res = (self.cached_expert[0] % 2, True) else: res = (0, False) del self.cached_expert return res class MiniGridTutorialExperimentConfig(ExperimentConfig): @classmethod def tag(cls) -> str: return "MiniGridTutorial" SENSORS = [ EgocentricMiniGridSensor(agent_view_size=5, view_channels=3), ExpertActionSensor( action_space=gym.spaces.Dict( higher=gym.spaces.Discrete(2), lower=gym.spaces.Discrete(2) ) ), ] @classmethod def create_model(cls, **kwargs) -> nn.Module: return ConditionedMiniGridSimpleConvRNN( action_space=gym.spaces.Dict( higher=gym.spaces.Discrete(2), lower=gym.spaces.Discrete(2) ), observation_space=SensorSuite(cls.SENSORS).observation_spaces, num_objects=cls.SENSORS[0].num_objects, num_colors=cls.SENSORS[0].num_colors, num_states=cls.SENSORS[0].num_states, ) @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return MiniGridTaskSampler(**kwargs) def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="train") def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="valid") def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: return self._get_sampler_args(process_ind=process_ind, mode="test") def _get_sampler_args(self, process_ind: int, mode: str) -> Dict[str, Any]: """Generate initialization arguments for train, valid, and test TaskSamplers. # Parameters process_ind : index of the current task sampler mode: one of `train`, `valid`, or `test` """ if mode == "train": max_tasks = None # infinite training tasks task_seeds_list = None # no predefined random seeds for training deterministic_sampling = False # randomly sample tasks in training else: max_tasks = 20 + 20 * ( mode == "test" ) # 20 tasks for valid, 40 for test (per sampler) # one seed for each task to sample: # - ensures different seeds for each sampler, and # - ensures a deterministic set of sampled tasks. task_seeds_list = list( range(process_ind * max_tasks, (process_ind + 1) * max_tasks) ) deterministic_sampling = ( True # deterministically sample task in validation/testing ) return dict( max_tasks=max_tasks, # see above env_class=self.make_env, # builder for third-party environment (defined below) sensors=self.SENSORS, # sensors used to return observations to the agent env_info=dict(), # parameters for environment builder (none for now) task_seeds_list=task_seeds_list, # see above deterministic_sampling=deterministic_sampling, # see above task_class=ConditionedMiniGridTask, ) @staticmethod def make_env(*args, **kwargs): return EmptyRandomEnv5x5() @classmethod def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]: return { "nprocesses": 128 if mode == "train" else 16, "devices": [], } @classmethod def training_pipeline(cls, **kwargs) -> TrainingPipeline: ppo_steps = int(150000) return TrainingPipeline( named_losses=dict( imitation_loss=Imitation( cls.SENSORS[1] ), # 0 is Minigrid, 1 is ExpertActionSensor ppo_loss=PPO(**PPOConfig, entropy_method_name="conditional_entropy"), ), # type:ignore pipeline_stages=[ PipelineStage( teacher_forcing=LinearDecay( startp=1.0, endp=0.0, steps=ppo_steps // 2, ), loss_names=["imitation_loss", "ppo_loss"], max_stage_steps=ppo_steps, ) ], optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=1e-4)), num_mini_batch=4, update_repeats=3, max_grad_norm=0.5, num_steps=16, gamma=0.99, use_gae=True, gae_lambda=0.95, advance_scene_rollout_period=None, save_interval=10000, metric_accumulate_interval=1, lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} # type:ignore ), )
allenact-main
projects/tutorials/minigrid_tutorial_conds.py
allenact-main
projects/manipulathor_baselines/__init__.py
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/__init__.py
import platform from abc import ABC from math import ceil from typing import Dict, Any, List, Optional, Sequence import gym import numpy as np import torch from allenact.base_abstractions.experiment_config import MachineParams from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import evenly_distribute_count_into_bins from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import ( SimpleArmPointNavGeneralSampler, ) from allenact_plugins.manipulathor_plugin.manipulathor_viz import ( ImageVisualizer, TestMetricLogger, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_base import ( ArmPointNavBaseConfig, ) class ArmPointNavThorBaseConfig(ArmPointNavBaseConfig, ABC): """The base config for all iTHOR PointNav experiments.""" TASK_SAMPLER = SimpleArmPointNavGeneralSampler VISUALIZE = False if platform.system() == "Darwin": VISUALIZE = True NUM_PROCESSES: Optional[int] = None TRAIN_GPU_IDS = list(range(torch.cuda.device_count())) SAMPLER_GPU_IDS = TRAIN_GPU_IDS VALID_GPU_IDS = [torch.cuda.device_count() - 1] TEST_GPU_IDS = [torch.cuda.device_count() - 1] TRAIN_DATASET_DIR: Optional[str] = None VAL_DATASET_DIR: Optional[str] = None CAP_TRAINING = None TRAIN_SCENES: Optional[List[str]] = None VAL_SCENES: Optional[List[str]] = None TEST_SCENES: Optional[List[str]] = None OBJECT_TYPES: Optional[Sequence[str]] = None VALID_SAMPLES_IN_SCENE = 1 TEST_SAMPLES_IN_SCENE = 1 NUMBER_OF_TEST_PROCESS = 10 def __init__(self): super().__init__() assert ( self.CAMERA_WIDTH == 224 and self.CAMERA_HEIGHT == 224 and self.VISIBILITY_DISTANCE == 1 and self.STEP_SIZE == 0.25 ) self.ENV_ARGS = ENV_ARGS def machine_params(self, mode="train", **kwargs): sampler_devices: Sequence[int] = [] if mode == "train": workers_per_device = 1 gpu_ids = ( [] if not torch.cuda.is_available() else self.TRAIN_GPU_IDS * workers_per_device ) nprocesses = ( 1 if not torch.cuda.is_available() else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids)) ) sampler_devices = self.SAMPLER_GPU_IDS elif mode == "valid": nprocesses = 1 gpu_ids = [] if not torch.cuda.is_available() else self.VALID_GPU_IDS elif mode == "test": nprocesses = self.NUMBER_OF_TEST_PROCESS if torch.cuda.is_available() else 1 gpu_ids = [] if not torch.cuda.is_available() else self.TEST_GPU_IDS else: raise NotImplementedError("mode must be 'train', 'valid', or 'test'.") sensors = [*self.SENSORS] if mode != "train": sensors = [s for s in sensors if not isinstance(s, ExpertActionSensor)] sensor_preprocessor_graph = ( SensorPreprocessorGraph( source_observation_spaces=SensorSuite(sensors).observation_spaces, preprocessors=self.preprocessors(), ) if mode == "train" or ( (isinstance(nprocesses, int) and nprocesses > 0) or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0) ) else None ) return MachineParams( nprocesses=nprocesses, devices=gpu_ids, sampler_devices=sampler_devices if mode == "train" else gpu_ids, # ignored with > 1 gpu_ids sensor_preprocessor_graph=sensor_preprocessor_graph, ) @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: from datetime import datetime now = datetime.now() exp_name_w_time = cls.__name__ + "_" + now.strftime("%m_%d_%Y_%H_%M_%S_%f") if cls.VISUALIZE: visualizers = [ ImageVisualizer(exp_name=exp_name_w_time), TestMetricLogger(exp_name=exp_name_w_time), ] kwargs["visualizers"] = visualizers kwargs["objects"] = cls.OBJECT_TYPES kwargs["exp_name"] = exp_name_w_time return cls.TASK_SAMPLER(**kwargs) @staticmethod def _partition_inds(n: int, num_parts: int): return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype( np.int32 ) def _get_sampler_args_for_scene_split( self, scenes: List[str], process_ind: int, total_processes: int, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: if total_processes > len(scenes): # oversample some scenes -> bias if total_processes % len(scenes) != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisible by the number of scenes" ) scenes = scenes * int(ceil(total_processes / len(scenes))) scenes = scenes[: total_processes * (len(scenes) // total_processes)] else: if len(scenes) % total_processes != 0: print( "Warning: oversampling some of the scenes to feed all processes." " You can avoid this by setting a number of workers divisor of the number of scenes" ) inds = self._partition_inds(len(scenes), total_processes) return { "scenes": scenes[inds[process_ind] : inds[process_ind + 1]], "env_args": self.ENV_ARGS, "max_steps": self.MAX_STEPS, "sensors": self.SENSORS, "action_space": gym.spaces.Discrete( len(self.TASK_SAMPLER._TASK_TYPE.class_action_names()) ), "seed": seeds[process_ind] if seeds is not None else None, "deterministic_cudnn": deterministic_cudnn, "rewards_config": self.REWARD_CONFIG, } def train_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]] = None, seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( self.TRAIN_SCENES, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_period"] = "manual" res["sampler_mode"] = "train" res["cap_training"] = self.CAP_TRAINING res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None ) return res def valid_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]], seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( self.VALID_SCENES, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_period"] = self.VALID_SAMPLES_IN_SCENE res["sampler_mode"] = "val" res["cap_training"] = self.CAP_TRAINING res["max_tasks"] = self.VALID_SAMPLES_IN_SCENE * len(res["scenes"]) res["env_args"] = {} res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None ) return res def test_task_sampler_args( self, process_ind: int, total_processes: int, devices: Optional[List[int]], seeds: Optional[List[int]] = None, deterministic_cudnn: bool = False, ) -> Dict[str, Any]: res = self._get_sampler_args_for_scene_split( self.TEST_SCENES, process_ind, total_processes, seeds=seeds, deterministic_cudnn=deterministic_cudnn, ) res["scene_period"] = self.TEST_SAMPLES_IN_SCENE res["sampler_mode"] = "test" res["env_args"] = {} res["cap_training"] = self.CAP_TRAINING res["env_args"].update(self.ENV_ARGS) res["env_args"]["x_display"] = ( ("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None ) return res
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/armpointnav_thor_base.py
from abc import ABC from typing import Optional, Sequence, Union from allenact.base_abstractions.experiment_config import ExperimentConfig from allenact.base_abstractions.preprocessor import Preprocessor from allenact.base_abstractions.sensor import Sensor from allenact.utils.experiment_utils import Builder class ArmPointNavBaseConfig(ExperimentConfig, ABC): """The base object navigation configuration file.""" ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None SENSORS: Optional[Sequence[Sensor]] = None STEP_SIZE = 0.25 ROTATION_DEGREES = 45.0 VISIBILITY_DISTANCE = 1.0 STOCHASTIC = False CAMERA_WIDTH = 224 CAMERA_HEIGHT = 224 SCREEN_SIZE = 224 MAX_STEPS = 200 def __init__(self): self.REWARD_CONFIG = { "step_penalty": -0.01, "goal_success_reward": 10.0, "pickup_success_reward": 5.0, "failed_stop_reward": 0.0, "shaping_weight": 1.0, # we are not using this "failed_action_penalty": -0.03, } @classmethod def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: return tuple()
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/armpointnav_base.py
import torch.optim as optim from allenact.algorithms.onpolicy_sync.losses import PPO from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig from allenact.utils.experiment_utils import ( Builder, PipelineStage, TrainingPipeline, LinearDecay, ) from torch.optim.lr_scheduler import LambdaLR from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_base import ( ArmPointNavBaseConfig, ) class ArmPointNavMixInPPOConfig(ArmPointNavBaseConfig): def training_pipeline(self, **kwargs): ppo_steps = int(300000000) lr = 3e-4 num_mini_batch = 1 update_repeats = 4 num_steps = self.MAX_STEPS save_interval = 500000 # from 50k log_interval = 1000 gamma = 0.99 use_gae = True gae_lambda = 0.95 max_grad_norm = 0.5 return TrainingPipeline( save_interval=save_interval, metric_accumulate_interval=log_interval, optimizer_builder=Builder(optim.Adam, dict(lr=lr)), num_mini_batch=num_mini_batch, update_repeats=update_repeats, max_grad_norm=max_grad_norm, num_steps=num_steps, named_losses={"ppo_loss": PPO(**PPOConfig)}, gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps) ], lr_scheduler_builder=Builder( LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)} ), )
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/armpointnav_mixin_ddppo.py
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/__init__.py
from typing import Sequence, Union import gym import torch.nn as nn from allenact.base_abstractions.preprocessor import Preprocessor from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import Builder from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_base import ( ArmPointNavBaseConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.models.arm_pointnav_models import ( ArmPointNavBaselineActorCritic, ) class ArmPointNavMixInSimpleGRUConfig(ArmPointNavBaseConfig): TASK_SAMPLER: TaskSampler @classmethod def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]: preprocessors = [] return preprocessors @classmethod def create_model(cls, **kwargs) -> nn.Module: return ArmPointNavBaselineActorCritic( action_space=gym.spaces.Discrete( len(cls.TASK_SAMPLER._TASK_TYPE.class_action_names()) ), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, hidden_size=512, )
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/armpointnav_mixin_simplegru.py
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS from allenact_plugins.manipulathor_plugin.manipulathor_sensors import ( NoVisionSensorThor, RelativeAgentArmToObjectSensor, RelativeObjectToGoalSensor, PickedUpObjSensor, ) from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import ( ArmPointNavTaskSampler, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_ddppo import ( ArmPointNavMixInPPOConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_simplegru import ( ArmPointNavMixInSimpleGRUConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.ithor.armpointnav_ithor_base import ( ArmPointNaviThorBaseConfig, ) class ArmPointNavNoVision( ArmPointNaviThorBaseConfig, ArmPointNavMixInPPOConfig, ArmPointNavMixInSimpleGRUConfig, ): """An Object Navigation experiment configuration in iThor with RGB input.""" SENSORS = [ NoVisionSensorThor( height=ArmPointNaviThorBaseConfig.SCREEN_SIZE, width=ArmPointNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=False, uuid="rgb_lowres", ), RelativeAgentArmToObjectSensor(), RelativeObjectToGoalSensor(), PickedUpObjSensor(), ] MAX_STEPS = 200 TASK_SAMPLER = ArmPointNavTaskSampler # def __init__(self): super().__init__() assert ( self.CAMERA_WIDTH == 224 and self.CAMERA_HEIGHT == 224 and self.VISIBILITY_DISTANCE == 1 and self.STEP_SIZE == 0.25 ) self.ENV_ARGS = {**ENV_ARGS, "renderDepthImage": False} @classmethod def tag(cls): return cls.__name__
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_no_vision.py
from abc import ABC from allenact_plugins.manipulathor_plugin.armpointnav_constants import ( TRAIN_OBJECTS, TEST_OBJECTS, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_thor_base import ( ArmPointNavThorBaseConfig, ) class ArmPointNaviThorBaseConfig(ArmPointNavThorBaseConfig, ABC): """The base config for all iTHOR ObjectNav experiments.""" NUM_PROCESSES = 40 # add all the arguments here TOTAL_NUMBER_SCENES = 30 TRAIN_SCENES = [ "FloorPlan{}_physics".format(str(i)) for i in range(1, TOTAL_NUMBER_SCENES + 1) if (i % 3 == 1 or i % 3 == 0) and i != 28 ] # last scenes are really bad TEST_SCENES = [ "FloorPlan{}_physics".format(str(i)) for i in range(1, TOTAL_NUMBER_SCENES + 1) if i % 3 == 2 and i % 6 == 2 ] VALID_SCENES = [ "FloorPlan{}_physics".format(str(i)) for i in range(1, TOTAL_NUMBER_SCENES + 1) if i % 3 == 2 and i % 6 == 5 ] ALL_SCENES = TRAIN_SCENES + TEST_SCENES + VALID_SCENES assert ( len(ALL_SCENES) == TOTAL_NUMBER_SCENES - 1 and len(set(ALL_SCENES)) == TOTAL_NUMBER_SCENES - 1 ) OBJECT_TYPES = tuple(sorted(TRAIN_OBJECTS)) UNSEEN_OBJECT_TYPES = tuple(sorted(TEST_OBJECTS))
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_ithor_base.py
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS from allenact_plugins.manipulathor_plugin.manipulathor_sensors import ( DepthSensorThor, RelativeAgentArmToObjectSensor, RelativeObjectToGoalSensor, PickedUpObjSensor, ) from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import ( ArmPointNavTaskSampler, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_ddppo import ( ArmPointNavMixInPPOConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_simplegru import ( ArmPointNavMixInSimpleGRUConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.ithor.armpointnav_ithor_base import ( ArmPointNaviThorBaseConfig, ) class ArmPointNavDepth( ArmPointNaviThorBaseConfig, ArmPointNavMixInPPOConfig, ArmPointNavMixInSimpleGRUConfig, ): """An Object Navigation experiment configuration in iThor with RGB input.""" SENSORS = [ DepthSensorThor( height=ArmPointNaviThorBaseConfig.SCREEN_SIZE, width=ArmPointNaviThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), RelativeAgentArmToObjectSensor(), RelativeObjectToGoalSensor(), PickedUpObjSensor(), ] MAX_STEPS = 200 TASK_SAMPLER = ArmPointNavTaskSampler def __init__(self): super().__init__() assert ( self.CAMERA_WIDTH == 224 and self.CAMERA_HEIGHT == 224 and self.VISIBILITY_DISTANCE == 1 and self.STEP_SIZE == 0.25 ) self.ENV_ARGS = {**ENV_ARGS, "renderDepthImage": True} @classmethod def tag(cls): return cls.__name__
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_depth.py
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/__init__.py
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS from allenact_plugins.manipulathor_plugin.manipulathor_sensors import ( RelativeAgentArmToObjectSensor, RelativeObjectToGoalSensor, PickedUpObjSensor, ) from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import ( ArmPointNavTaskSampler, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_ddppo import ( ArmPointNavMixInPPOConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_simplegru import ( ArmPointNavMixInSimpleGRUConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.ithor.armpointnav_ithor_base import ( ArmPointNaviThorBaseConfig, ) class ArmPointNavRGB( ArmPointNaviThorBaseConfig, ArmPointNavMixInPPOConfig, ArmPointNavMixInSimpleGRUConfig, ): """An Object Navigation experiment configuration in iThor with RGB input.""" SENSORS = [ RGBSensorThor( height=ArmPointNaviThorBaseConfig.SCREEN_SIZE, width=ArmPointNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), RelativeAgentArmToObjectSensor(), RelativeObjectToGoalSensor(), PickedUpObjSensor(), ] MAX_STEPS = 200 TASK_SAMPLER = ArmPointNavTaskSampler # def __init__(self): super().__init__() assert ( self.CAMERA_WIDTH == 224 and self.CAMERA_HEIGHT == 224 and self.VISIBILITY_DISTANCE == 1 and self.STEP_SIZE == 0.25 ) self.ENV_ARGS = {**ENV_ARGS} @classmethod def tag(cls): return cls.__name__
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_rgb.py
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS from allenact_plugins.manipulathor_plugin.manipulathor_sensors import ( DepthSensorThor, RelativeAgentArmToObjectSensor, RelativeObjectToGoalSensor, PickedUpObjSensor, ) from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import ( ArmPointNavTaskSampler, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_ddppo import ( ArmPointNavMixInPPOConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.armpointnav_mixin_simplegru import ( ArmPointNavMixInSimpleGRUConfig, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.ithor.armpointnav_ithor_base import ( ArmPointNaviThorBaseConfig, ) class ArmPointNavRGBDepth( ArmPointNaviThorBaseConfig, ArmPointNavMixInPPOConfig, ArmPointNavMixInSimpleGRUConfig, ): """An Object Navigation experiment configuration in iThor with RGB input.""" SENSORS = [ DepthSensorThor( height=ArmPointNaviThorBaseConfig.SCREEN_SIZE, width=ArmPointNaviThorBaseConfig.SCREEN_SIZE, use_normalization=True, uuid="depth_lowres", ), RGBSensorThor( height=ArmPointNaviThorBaseConfig.SCREEN_SIZE, width=ArmPointNaviThorBaseConfig.SCREEN_SIZE, use_resnet_normalization=True, uuid="rgb_lowres", ), RelativeAgentArmToObjectSensor(), RelativeObjectToGoalSensor(), PickedUpObjSensor(), ] MAX_STEPS = 200 TASK_SAMPLER = ArmPointNavTaskSampler # def __init__(self): super().__init__() assert ( self.CAMERA_WIDTH == 224 and self.CAMERA_HEIGHT == 224 and self.VISIBILITY_DISTANCE == 1 and self.STEP_SIZE == 0.25 ) self.ENV_ARGS = {**ENV_ARGS, "renderDepthImage": True} @classmethod def tag(cls): return cls.__name__
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_rgbdepth.py
import gym import torch.nn as nn from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import ( ArmPointNavTaskSampler, ) from projects.manipulathor_baselines.armpointnav_baselines.experiments.ithor.armpointnav_depth import ( ArmPointNavDepth, ) from projects.manipulathor_baselines.armpointnav_baselines.models.disjoint_arm_pointnav_models import ( DisjointArmPointNavBaselineActorCritic, ) class ArmPointNavDisjointDepth(ArmPointNavDepth): """An Object Navigation experiment configuration in iThor with RGB input.""" TASK_SAMPLER = ArmPointNavTaskSampler def __init__(self): super().__init__() assert ( self.CAMERA_WIDTH == 224 and self.CAMERA_HEIGHT == 224 and self.VISIBILITY_DISTANCE == 1 and self.STEP_SIZE == 0.25 ) self.ENV_ARGS = {**ENV_ARGS, "renderDepthImage": True} @classmethod def create_model(cls, **kwargs) -> nn.Module: return DisjointArmPointNavBaselineActorCritic( action_space=gym.spaces.Discrete( len(cls.TASK_SAMPLER._TASK_TYPE.class_action_names()) ), observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces, hidden_size=512, ) @classmethod def tag(cls): return cls.__name__
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_disjoint_depth.py
"""Baseline models for use in the Arm Point Navigation task. Arm Point Navigation is currently available as a Task in ManipulaTHOR. """ from typing import Tuple, Optional import gym import torch from gym.spaces.dict import Dict as SpaceDict from allenact.algorithms.onpolicy_sync.policy import ( ActorCriticModel, LinearCriticHead, LinearActorHead, DistributionType, Memory, ObservationType, ) from allenact.base_abstractions.distributions import CategoricalDistr from allenact.base_abstractions.misc import ActorCriticOutput from allenact.embodiedai.models.basic_models import SimpleCNN, RNNStateEncoder from projects.manipulathor_baselines.armpointnav_baselines.models.manipulathor_net_utils import ( input_embedding_net, ) class ArmPointNavBaselineActorCritic(ActorCriticModel[CategoricalDistr]): """Baseline recurrent actor critic model for armpointnav task. # Attributes action_space : The space of actions available to the agent. Currently only discrete actions are allowed (so this space will always be of type `gym.spaces.Discrete`). observation_space : The observation space expected by the agent. This observation space should include (optionally) 'rgb' images and 'depth' images. hidden_size : The hidden size of the GRU RNN. object_type_embedding_dim: The dimensionality of the embedding corresponding to the goal object type. """ def __init__( self, action_space: gym.spaces.Discrete, observation_space: SpaceDict, hidden_size=512, obj_state_embedding_size=512, trainable_masked_hidden_state: bool = False, num_rnn_layers=1, rnn_type="GRU", ): """Initializer. See class documentation for parameter definitions. """ super().__init__(action_space=action_space, observation_space=observation_space) self._hidden_size = hidden_size self.object_type_embedding_size = obj_state_embedding_size sensor_names = self.observation_space.spaces.keys() self.visual_encoder = SimpleCNN( self.observation_space, self._hidden_size, rgb_uuid="rgb_lowres" if "rgb_lowres" in sensor_names else None, depth_uuid="depth_lowres" if "depth_lowres" in sensor_names else None, ) if "rgb_lowres" in sensor_names and "depth_lowres" in sensor_names: input_visual_feature_num = 2 elif "rgb_lowres" in sensor_names: input_visual_feature_num = 1 elif "depth_lowres" in sensor_names: input_visual_feature_num = 1 else: raise NotImplementedError self.state_encoder = RNNStateEncoder( self._hidden_size * input_visual_feature_num + obj_state_embedding_size, self._hidden_size, trainable_masked_hidden_state=trainable_masked_hidden_state, num_layers=num_rnn_layers, rnn_type=rnn_type, ) self.actor = LinearActorHead(self._hidden_size, action_space.n) self.critic = LinearCriticHead(self._hidden_size) relative_dist_embedding_size = torch.Tensor([3, 100, obj_state_embedding_size]) self.relative_dist_embedding = input_embedding_net( relative_dist_embedding_size.long().tolist(), dropout=0 ) self.train() @property def recurrent_hidden_state_size(self) -> int: """The recurrent hidden state size of the model.""" return self._hidden_size @property def num_recurrent_layers(self) -> int: """Number of recurrent hidden layers.""" return self.state_encoder.num_recurrent_layers def _recurrent_memory_specification(self): return dict( rnn=( ( ("layer", self.num_recurrent_layers), ("sampler", None), ("hidden", self.recurrent_hidden_state_size), ), torch.float32, ) ) def get_relative_distance_embedding( self, state_tensor: torch.Tensor ) -> torch.FloatTensor: return self.relative_dist_embedding(state_tensor) def forward( # type:ignore self, observations: ObservationType, memory: Memory, prev_actions: torch.Tensor, masks: torch.FloatTensor, ) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]: """Processes input batched observations to produce new actor and critic values. Processes input batched observations (along with prior hidden states, previous actions, and masks denoting which recurrent hidden states should be masked) and returns an `ActorCriticOutput` object containing the model's policy (distribution over actions) and evaluation of the current state (value). # Parameters observations : Batched input observations. memory : `Memory` containing the hidden states from initial timepoints. prev_actions : Tensor of previous actions taken. masks : Masks applied to hidden states. See `RNNStateEncoder`. # Returns Tuple of the `ActorCriticOutput` and recurrent hidden state. """ arm2obj_dist = self.get_relative_distance_embedding( observations["relative_agent_arm_to_obj"] ) obj2goal_dist = self.get_relative_distance_embedding( observations["relative_obj_to_goal"] ) perception_embed = self.visual_encoder(observations) pickup_bool = observations["pickedup_object"] after_pickup = pickup_bool == 1 distances = arm2obj_dist distances[after_pickup] = obj2goal_dist[after_pickup] x = [distances, perception_embed] x_cat = torch.cat(x, dim=-1) x_out, rnn_hidden_states = self.state_encoder( x_cat, memory.tensor("rnn"), masks ) actor_out = self.actor(x_out) critic_out = self.critic(x_out) actor_critic_output = ActorCriticOutput( distributions=actor_out, values=critic_out, extras={} ) updated_memory = memory.set_tensor("rnn", rnn_hidden_states) return ( actor_critic_output, updated_memory, )
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/models/arm_pointnav_models.py
import torch import torch.nn as nn class LinearActorHeadNoCategory(nn.Module): def __init__(self, num_inputs: int, num_outputs: int): super().__init__() self.linear = nn.Linear(num_inputs, num_outputs) nn.init.orthogonal_(self.linear.weight, gain=0.01) nn.init.constant_(self.linear.bias, 0) def forward(self, x: torch.FloatTensor): # type: ignore x = self.linear(x) # type:ignore assert len(x.shape) == 3 return x
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/models/base_models.py
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/models/__init__.py
"""Baseline models for use in the Arm Point Navigation task. Arm Point Navigation is currently available as a Task in ManipulaTHOR. """ from typing import Tuple, Optional import gym import torch from gym.spaces.dict import Dict as SpaceDict from allenact.algorithms.onpolicy_sync.policy import ( ActorCriticModel, LinearCriticHead, DistributionType, Memory, ObservationType, ) from allenact.base_abstractions.distributions import CategoricalDistr from allenact.base_abstractions.misc import ActorCriticOutput from allenact.embodiedai.models.basic_models import SimpleCNN, RNNStateEncoder from projects.manipulathor_baselines.armpointnav_baselines.models.base_models import ( LinearActorHeadNoCategory, ) from projects.manipulathor_baselines.armpointnav_baselines.models.manipulathor_net_utils import ( input_embedding_net, ) class DisjointArmPointNavBaselineActorCritic(ActorCriticModel[CategoricalDistr]): """Disjoint Baseline recurrent actor critic model for armpointnav. # Attributes action_space : The space of actions available to the agent. Currently only discrete actions are allowed (so this space will always be of type `gym.spaces.Discrete`). observation_space : The observation space expected by the agent. This observation space should include (optionally) 'rgb' images and 'depth' images and is required to have a component corresponding to the goal `goal_sensor_uuid`. goal_sensor_uuid : The uuid of the sensor of the goal object. See `GoalObjectTypeThorSensor` as an example of such a sensor. hidden_size : The hidden size of the GRU RNN. object_type_embedding_dim: The dimensionality of the embedding corresponding to the goal object type. """ def __init__( self, action_space: gym.spaces.Discrete, observation_space: SpaceDict, hidden_size=512, obj_state_embedding_size=512, trainable_masked_hidden_state: bool = False, num_rnn_layers=1, rnn_type="GRU", ): """Initializer. See class documentation for parameter definitions. """ super().__init__(action_space=action_space, observation_space=observation_space) self._hidden_size = hidden_size self.object_type_embedding_size = obj_state_embedding_size self.visual_encoder_pick = SimpleCNN( self.observation_space, self._hidden_size, rgb_uuid=None, depth_uuid="depth_lowres", ) self.visual_encoder_drop = SimpleCNN( self.observation_space, self._hidden_size, rgb_uuid=None, depth_uuid="depth_lowres", ) self.state_encoder = RNNStateEncoder( self._hidden_size + obj_state_embedding_size, self._hidden_size, trainable_masked_hidden_state=trainable_masked_hidden_state, num_layers=num_rnn_layers, rnn_type=rnn_type, ) self.actor_pick = LinearActorHeadNoCategory(self._hidden_size, action_space.n) self.critic_pick = LinearCriticHead(self._hidden_size) self.actor_drop = LinearActorHeadNoCategory(self._hidden_size, action_space.n) self.critic_drop = LinearCriticHead(self._hidden_size) # self.object_state_embedding = nn.Embedding(num_embeddings=6, embedding_dim=obj_state_embedding_size) relative_dist_embedding_size = torch.Tensor([3, 100, obj_state_embedding_size]) self.relative_dist_embedding_pick = input_embedding_net( relative_dist_embedding_size.long().tolist(), dropout=0 ) self.relative_dist_embedding_drop = input_embedding_net( relative_dist_embedding_size.long().tolist(), dropout=0 ) self.train() @property def recurrent_hidden_state_size(self) -> int: """The recurrent hidden state size of the model.""" return self._hidden_size @property def num_recurrent_layers(self) -> int: """Number of recurrent hidden layers.""" return self.state_encoder.num_recurrent_layers def _recurrent_memory_specification(self): return dict( rnn=( ( ("layer", self.num_recurrent_layers), ("sampler", None), ("hidden", self.recurrent_hidden_state_size), ), torch.float32, ) ) def forward( # type:ignore self, observations: ObservationType, memory: Memory, prev_actions: torch.Tensor, masks: torch.FloatTensor, ) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]: """Processes input batched observations to produce new actor and critic values. Processes input batched observations (along with prior hidden states, previous actions, and masks denoting which recurrent hidden states should be masked) and returns an `ActorCriticOutput` object containing the model's policy (distribution over actions) and evaluation of the current state (value). # Parameters observations : Batched input observations. memory : `Memory` containing the hidden states from initial timepoints. prev_actions : Tensor of previous actions taken. masks : Masks applied to hidden states. See `RNNStateEncoder`. # Returns Tuple of the `ActorCriticOutput` and recurrent hidden state. """ arm2obj_dist = self.relative_dist_embedding_pick( observations["relative_agent_arm_to_obj"] ) obj2goal_dist = self.relative_dist_embedding_drop( observations["relative_obj_to_goal"] ) perception_embed_pick = self.visual_encoder_pick(observations) perception_embed_drop = self.visual_encoder_drop(observations) pickup_bool = observations["pickedup_object"] after_pickup = pickup_bool == 1 distances = arm2obj_dist distances[after_pickup] = obj2goal_dist[after_pickup] perception_embed = perception_embed_pick perception_embed[after_pickup] = perception_embed_drop[after_pickup] x = [distances, perception_embed] x_cat = torch.cat(x, dim=-1) # type: ignore x_out, rnn_hidden_states = self.state_encoder( x_cat, memory.tensor("rnn"), masks ) actor_out_pick = self.actor_pick(x_out) critic_out_pick = self.critic_pick(x_out) actor_out_drop = self.actor_drop(x_out) critic_out_drop = self.critic_drop(x_out) actor_out = actor_out_pick actor_out[after_pickup] = actor_out_drop[after_pickup] critic_out = critic_out_pick critic_out[after_pickup] = critic_out_drop[after_pickup] actor_out = CategoricalDistr(logits=actor_out) actor_critic_output = ActorCriticOutput( distributions=actor_out, values=critic_out, extras={} ) updated_memory = memory.set_tensor("rnn", rnn_hidden_states) return ( actor_critic_output, updated_memory, )
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/models/disjoint_arm_pointnav_models.py
import pdb import torch.nn as nn import torch.nn.functional as F def upshuffle( in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1 ): return nn.Sequential( nn.Conv2d( in_planes, out_planes * upscale_factor ** 2, kernel_size=kernel_size, stride=stride, padding=padding, ), nn.PixelShuffle(upscale_factor), nn.LeakyReLU(), ) def upshufflenorelu( in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1 ): return nn.Sequential( nn.Conv2d( in_planes, out_planes * upscale_factor ** 2, kernel_size=kernel_size, stride=stride, padding=padding, ), nn.PixelShuffle(upscale_factor), ) def combine_block_w_bn(in_planes, out_planes): return nn.Sequential( nn.Conv2d(in_planes, out_planes, 1, 1), nn.BatchNorm2d(out_planes), nn.LeakyReLU(), ) def conv2d_block(in_planes, out_planes, kernel_size, stride=1, padding=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size, stride=stride, padding=padding), nn.BatchNorm2d(out_planes), nn.LeakyReLU(), nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_planes), ) def combine_block_w_do(in_planes, out_planes, dropout=0.0): return nn.Sequential( nn.Conv2d(in_planes, out_planes, 1, 1), nn.LeakyReLU(), nn.Dropout(dropout), ) def combine_block_no_do(in_planes, out_planes): return nn.Sequential(nn.Conv2d(in_planes, out_planes, 1, 1), nn.LeakyReLU(),) def linear_block(in_features, out_features, dropout=0.0): return nn.Sequential( nn.Linear(in_features, out_features), nn.LeakyReLU(), nn.Dropout(dropout), ) def linear_block_norelu(in_features, out_features): return nn.Sequential(nn.Linear(in_features, out_features),) def input_embedding_net(list_of_feature_sizes, dropout=0.0): modules = [] for i in range(len(list_of_feature_sizes) - 1): input_size, output_size = list_of_feature_sizes[i : i + 2] if i + 2 == len(list_of_feature_sizes): modules.append(linear_block_norelu(input_size, output_size)) else: modules.append(linear_block(input_size, output_size, dropout=dropout)) return nn.Sequential(*modules) def _upsample_add(x, y): _, _, H, W = y.size() return F.upsample(x, size=(H, W), mode="bilinear") + y def replace_all_relu_w_leakyrelu(model): pdb.set_trace() print("Not sure if using this is a good idea") modules = model._modules for m in modules.keys(): module = modules[m] if isinstance(module, nn.ReLU): model._modules[m] = nn.LeakyReLU() elif isinstance(module, nn.Module): model._modules[m] = replace_all_relu_w_leakyrelu(module) return model def replace_all_leakyrelu_w_relu(model): modules = model._modules for m in modules.keys(): module = modules[m] if isinstance(module, nn.LeakyReLU): model._modules[m] = nn.ReLU() elif isinstance(module, nn.Module): model._modules[m] = replace_all_leakyrelu_w_relu(module) return model def replace_all_bn_w_groupnorm(model): pdb.set_trace() print("Not sure if using this is a good idea") modules = model._modules for m in modules.keys(): module = modules[m] if isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm1d): feature_number = module.num_features model._modules[m] = nn.GroupNorm(32, feature_number) elif isinstance(module, nn.BatchNorm3d): raise Exception("Not implemented") elif isinstance(module, nn.Module): model._modules[m] = replace_all_bn_w_groupnorm(module) return model def flat_temporal(tensor, batch_size, sequence_length): tensor_shape = [s for s in tensor.shape] assert tensor_shape[0] == batch_size and tensor_shape[1] == sequence_length result_shape = [batch_size * sequence_length] + tensor_shape[2:] return tensor.contiguous().view(result_shape) def unflat_temporal(tensor, batch_size, sequence_length): tensor_shape = [s for s in tensor.shape] assert tensor_shape[0] == batch_size * sequence_length result_shape = [batch_size, sequence_length] + tensor_shape[1:] return tensor.contiguous().view(result_shape)
allenact-main
projects/manipulathor_baselines/armpointnav_baselines/models/manipulathor_net_utils.py
import json import os import re import shutil import sys from pathlib import Path from urllib.request import urlopen from allenact.utils.misc_utils import all_equal DATASET_DIR = os.path.abspath(os.path.dirname(Path(__file__))) def get_habitat_download_info(allow_create: bool = False): """Get a dictionary giving a specification of where habitat data lives online. # Parameters allow_create: Whether or not we should try to regenerate the json file that represents the above dictionary. This is potentially unsafe so please only set this to `True` if you're sure it will download what you want. """ json_save_path = os.path.join(DATASET_DIR, ".habitat_datasets_download_info.json") if allow_create and not os.path.exists(json_save_path): url = "https://raw.githubusercontent.com/facebookresearch/habitat-lab/master/README.md" output = urlopen(url).read().decode("utf-8") lines = [l.strip() for l in output.split("\n")] task_table_started = False table_lines = [] for l in lines: if l.count("|") > 3 and l[0] == l[-1] == "|": if task_table_started: table_lines.append(l) elif "Task" in l and "Link" in l: task_table_started = True table_lines.append(l) elif task_table_started: break url_pat = re.compile("\[.*\]\((.*)\)") def get_url(in_str: str): match = re.match(pattern=url_pat, string=in_str) if match: return match.group(1) else: return in_str header = None rows = [] for i, l in enumerate(table_lines): l = l.strip("|") entries = [get_url(e.strip().replace("`", "")) for e in l.split("|")] if i == 0: header = [e.lower().replace(" ", "_") for e in entries] elif not all_equal(entries): rows.append(entries) link_ind = header.index("link") extract_ind = header.index("extract_path") config_ind = header.index("config_to_use") assert link_ind >= 0 data_info = {} for row in rows: id = row[link_ind].split("/")[-1].replace(".zip", "").replace("_", "-") data_info[id] = { "link": row[link_ind], "rel_path": row[extract_ind], "config_url": row[config_ind], } with open(json_save_path, "w") as f: json.dump(data_info, f) with open(json_save_path, "r") as f: return json.load(f) if __name__ == "__main__": habitat_dir = os.path.join(DATASET_DIR, "habitat") os.makedirs(habitat_dir, exist_ok=True) os.chdir(habitat_dir) download_info = get_habitat_download_info(allow_create=False) if len(sys.argv) != 2 or sys.argv[1] not in download_info: print( "Incorrect input, expects a single input where this input is one of " f" {['test-scenes', *sorted(download_info.keys())]}." ) quit(1) task_key = sys.argv[1] task_dl_info = download_info[task_key] output_archive_name = "__TO_OVERWRITE__.zip" deletable_dir_name = "__TO_DELETE__" cmd = f"wget {task_dl_info['link']} -O {output_archive_name}" if os.system(cmd): print(f"ERROR: `{cmd}` failed.") quit(1) cmd = f"unzip {output_archive_name} -d {deletable_dir_name}" if os.system(cmd): print(f"ERROR: `{cmd}` failed.") quit(1) download_to_path = task_dl_info["rel_path"].replace("data/", "") if download_to_path[-1] == "/": download_to_path = download_to_path[:-1] os.makedirs(download_to_path, exist_ok=True) cmd = f"rsync -avz {deletable_dir_name}/ {download_to_path}/" if os.system(cmd): print(f"ERROR: `{cmd}` failed.") quit(1) os.remove(output_archive_name) shutil.rmtree(deletable_dir_name)
allenact-main
datasets/.habitat_downloader_helper.py
allenact-main
tests/__init__.py
allenact-main
tests/mapping/__init__.py
import os import platform import random import sys import urllib import urllib.request import warnings from collections import defaultdict # noinspection PyUnresolvedReferences from tempfile import mkdtemp from typing import Dict, List, Tuple, cast # noinspection PyUnresolvedReferences import ai2thor # noinspection PyUnresolvedReferences import ai2thor.wsgi_server import compress_pickle import numpy as np import torch from allenact.algorithms.onpolicy_sync.storage import RolloutBlockStorage from allenact.base_abstractions.misc import Memory, ActorCriticOutput from allenact.embodiedai.mapping.mapping_utils.map_builders import SemanticMapBuilder from allenact.utils.experiment_utils import set_seed from allenact.utils.system import get_logger from allenact.utils.tensor_utils import batch_observations from allenact_plugins.ithor_plugin.ithor_sensors import ( RelativePositionChangeTHORSensor, ReachableBoundsTHORSensor, BinnedPointCloudMapTHORSensor, SemanticMapTHORSensor, ) from allenact_plugins.ithor_plugin.ithor_util import get_open_x_displays from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor from constants import ABS_PATH_OF_TOP_LEVEL_DIR class TestAI2THORMapSensors(object): def setup_path_for_use_with_rearrangement_project(self) -> bool: if platform.system() != "Darwin" and len(get_open_x_displays()) == 0: wrn_msg = "Cannot run tests as there seem to be no open displays!" warnings.warn(wrn_msg) get_logger().warning(wrn_msg) return False os.chdir(ABS_PATH_OF_TOP_LEVEL_DIR) sys.path.append( os.path.join(ABS_PATH_OF_TOP_LEVEL_DIR, "projects/ithor_rearrangement") ) try: import rearrange except ImportError: wrn_msg = ( "Could not import `rearrange`. Is it possible you have" " not initialized the submodules (i.e. by running" " `git submodule init; git submodule update;`)?" ) warnings.warn(wrn_msg) get_logger().warning(wrn_msg) return False return True def test_binned_and_semantic_mapping(self, tmpdir): try: if not self.setup_path_for_use_with_rearrangement_project(): return from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig from baseline_configs.walkthrough.walkthrough_rgb_base import ( WalkthroughBaseExperimentConfig, ) from rearrange.constants import ( FOV, PICKUPABLE_OBJECTS, OPENABLE_OBJECTS, ) from datagen.datagen_utils import get_scenes ORDERED_OBJECT_TYPES = list(sorted(PICKUPABLE_OBJECTS + OPENABLE_OBJECTS)) map_range_sensor = ReachableBoundsTHORSensor(margin=1.0) map_info = dict( map_range_sensor=map_range_sensor, vision_range_in_cm=40 * 5, map_size_in_cm=1050, resolution_in_cm=5, ) map_sensors = [ RelativePositionChangeTHORSensor(), map_range_sensor, DepthSensorThor( height=224, width=224, use_normalization=False, uuid="depth", ), BinnedPointCloudMapTHORSensor(fov=FOV, ego_only=False, **map_info,), SemanticMapTHORSensor( fov=FOV, ego_only=False, ordered_object_types=ORDERED_OBJECT_TYPES, **map_info, ), ] all_sensors = [*WalkthroughBaseExperimentConfig.SENSORS, *map_sensors] open_x_displays = [] try: open_x_displays = get_open_x_displays() except (AssertionError, IOError): pass walkthrough_task_sampler = WalkthroughBaseExperimentConfig.make_sampler_fn( stage="train", sensors=all_sensors, scene_to_allowed_rearrange_inds={s: [0] for s in get_scenes("train")}, force_cache_reset=True, allowed_scenes=None, seed=1, x_display=open_x_displays[0] if len(open_x_displays) != 0 else None, thor_controller_kwargs={ **RearrangeBaseExperimentConfig.THOR_CONTROLLER_KWARGS, # "server_class": ai2thor.wsgi_server.WsgiServer, # Only for debugging }, ) targets_path = os.path.join(tmpdir, "rearrange_mapping_examples.pkl.gz") urllib.request.urlretrieve( "https://ai2-prior-allenact-public-test.s3-us-west-2.amazonaws.com/ai2thor_mapping/rearrange_mapping_examples.pkl.gz", targets_path, ) goal_obs_dict = compress_pickle.load(targets_path) def compare_recursive(obs, goal_obs, key_list: List): if isinstance(obs, Dict): for k in goal_obs: compare_recursive( obs=obs[k], goal_obs=goal_obs[k], key_list=key_list + [k] ) elif isinstance(obs, (List, Tuple)): for i in range(len(goal_obs)): compare_recursive( obs=obs[i], goal_obs=goal_obs[i], key_list=key_list + [i] ) else: # Should be a numpy array at this point assert isinstance(obs, np.ndarray) and isinstance( goal_obs, np.ndarray ), f"After {key_list}, not numpy arrays, obs={obs}, goal_obs={goal_obs}" obs = 1.0 * obs goal_obs = 1.0 * goal_obs goal_where_nan = np.isnan(goal_obs) obs_where_nan = np.isnan(obs) where_nan_not_equal = (goal_where_nan != obs_where_nan).sum() assert ( where_nan_not_equal.sum() <= 1 and where_nan_not_equal.mean() < 1e3 ) where_nan = np.logical_or(goal_where_nan, obs_where_nan) obs[where_nan] = 0.0 goal_obs[where_nan] = 0.0 def special_mean(v): while len(v.shape) > 2: v = v.sum(-1) return v.mean() numer = np.abs(obs - goal_obs) denom = np.abs( np.stack((obs, goal_obs, np.ones_like(obs)), axis=0) ).max(0) difference = special_mean(numer / denom) assert ( difference < 1.2e-3 ), f"Difference of {np.abs(obs - goal_obs).mean()} at {key_list}." if ( len(obs.shape) >= 2 and obs.shape[0] == obs.shape[1] and obs.shape[0] > 1 ): # Sanity check that rotating the observations makes them not-equal rot_obs = np.rot90(obs) numer = np.abs(rot_obs - goal_obs) denom = np.abs( np.stack((rot_obs, goal_obs, np.ones_like(obs)), axis=0) ).max(0) rot_difference = special_mean(numer / denom) assert ( difference < rot_difference or (obs == rot_obs).all() ), f"Too small a difference ({(numer / denom).mean()})." observations_dict = defaultdict(lambda: []) for i in range(5): # Why 5, why not 5? set_seed(i) task = walkthrough_task_sampler.next_task() obs_list = observations_dict[i] obs_list.append(task.get_observations()) k = 0 compare_recursive( obs=obs_list[0], goal_obs=goal_obs_dict[i][0], key_list=[i, k] ) while not task.is_done(): obs = task.step( action=task.action_names().index( random.choice( 3 * [ "move_ahead", "rotate_right", "rotate_left", "look_up", "look_down", ] + ["done"] ) ) ).observation k += 1 obs_list.append(obs) compare_recursive( obs=obs, goal_obs=goal_obs_dict[i][task.num_steps_taken()], key_list=[i, k], ) # Free space metric map in RGB using pointclouds coming from depth images. This # is built iteratively after every step. # R - is used to encode points at a height < 0.02m (i.e. the floor) # G - is used to encode points at a height between 0.02m and 2m, i.e. objects the agent would run into # B - is used to encode points higher than 2m, i.e. ceiling # Uncomment if you wish to visualize the observations: # import matplotlib.pyplot as plt # plt.imshow( # np.flip(255 * (obs["binned_pc_map"]["map"] > 0), 0) # ) # np.flip because we expect "up" to be -row # plt.title("Free space map") # plt.show() # plt.close() # See also `obs["binned_pc_map"]["egocentric_update"]` to see the # the metric map from the point of view of the agent before it is # rotated into the world-space coordinates and merged with past observations. # Semantic map in RGB which is iteratively revealed using depth maps to figure out what # parts of the scene the agent has seen so far. # This map has shape 210x210x72 with the 72 channels corresponding to the 72 # object types in `ORDERED_OBJECT_TYPES` semantic_map = obs["semantic_map"]["map"] # We can't display all 72 channels in an RGB image so instead we randomly assign # each object a color and then just allow them to overlap each other colored_semantic_map = SemanticMapBuilder.randomly_color_semantic_map( semantic_map ) # Here's the full semantic map with nothing masked out because the agent # hasn't seen it yet colored_semantic_map_no_fog = SemanticMapBuilder.randomly_color_semantic_map( map_sensors[-1].semantic_map_builder.ground_truth_semantic_map ) # Uncomment if you wish to visualize the observations: # import matplotlib.pyplot as plt # plt.imshow( # np.flip( # np.flip because we expect "up" to be -row # np.concatenate( # ( # colored_semantic_map, # 255 + 0 * colored_semantic_map[:, :10, :], # colored_semantic_map_no_fog, # ), # axis=1, # ), # 0, # ) # ) # plt.title("Semantic map with and without exploration fog") # plt.show() # plt.close() # See also # * `obs["semantic_map"]["egocentric_update"]` # * `obs["semantic_map"]["explored_mask"]` # * `obs["semantic_map"]["egocentric_mask"]` # To save observations for comparison against future runs, uncomment the below. # os.makedirs("tmp_out", exist_ok=True) # compress_pickle.dump( # {**observations_dict}, "tmp_out/rearrange_mapping_examples.pkl.gz" # ) finally: try: walkthrough_task_sampler.close() except NameError: pass def test_pretrained_rearrange_walkthrough_mapping_agent(self, tmpdir): try: if not self.setup_path_for_use_with_rearrangement_project(): return from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig from baseline_configs.walkthrough.walkthrough_rgb_mapping_ppo import ( WalkthroughRGBMappingPPOExperimentConfig, ) from rearrange.constants import ( FOV, PICKUPABLE_OBJECTS, OPENABLE_OBJECTS, ) from datagen.datagen_utils import get_scenes open_x_displays = [] try: open_x_displays = get_open_x_displays() except (AssertionError, IOError): pass walkthrough_task_sampler = WalkthroughRGBMappingPPOExperimentConfig.make_sampler_fn( stage="train", scene_to_allowed_rearrange_inds={s: [0] for s in get_scenes("train")}, force_cache_reset=True, allowed_scenes=None, seed=2, x_display=open_x_displays[0] if len(open_x_displays) != 0 else None, ) named_losses = ( WalkthroughRGBMappingPPOExperimentConfig.training_pipeline()._named_losses ) ckpt_path = os.path.join( tmpdir, "pretrained_walkthrough_mapping_agent_75mil.pt" ) if not os.path.exists(ckpt_path): urllib.request.urlretrieve( "https://prior-model-weights.s3.us-east-2.amazonaws.com/embodied-ai/rearrangement/walkthrough/pretrained_walkthrough_mapping_agent_75mil.pt", ckpt_path, ) state_dict = torch.load(ckpt_path, map_location="cpu",) walkthrough_model = WalkthroughRGBMappingPPOExperimentConfig.create_model() walkthrough_model.load_state_dict(state_dict["model_state_dict"]) memory = RolloutBlockStorage.create_memory( spec=walkthrough_model.recurrent_memory_specification, num_samplers=1 ).step_squeeze(0) masks = torch.FloatTensor([0]).view(1, 1, 1) binned_map_losses = [] semantic_map_losses = [] for i in range(5): masks = 0 * masks set_seed(i + 1) task = walkthrough_task_sampler.next_task() def add_step_dim(input): if isinstance(input, torch.Tensor): return input.unsqueeze(0) elif isinstance(input, Dict): return {k: add_step_dim(v) for k, v in input.items()} else: raise NotImplementedError batch = add_step_dim(batch_observations([task.get_observations()])) while not task.is_done(): # noinspection PyTypeChecker ac_out, memory = cast( Tuple[ActorCriticOutput, Memory], walkthrough_model.forward( observations=batch, memory=memory, prev_actions=None, masks=masks, ), ) binned_map_losses.append( named_losses["binned_map_loss"] .loss( step_count=0, # Not used in this loss batch={"observations": batch}, actor_critic_output=ac_out, )[0] .item() ) assert ( binned_map_losses[-1] < 0.16 ), f"Binned map loss to large at ({i}, {task.num_steps_taken()})" semantic_map_losses.append( named_losses["semantic_map_loss"] .loss( step_count=0, # Not used in this loss batch={"observations": batch}, actor_critic_output=ac_out, )[0] .item() ) assert ( semantic_map_losses[-1] < 0.004 ), f"Semantic map loss to large at ({i}, {task.num_steps_taken()})" masks = masks.fill_(1.0) obs = task.step( action=ac_out.distributions.sample().item() ).observation batch = add_step_dim(batch_observations([obs])) if task.num_steps_taken() >= 10: break # To save observations for comparison against future runs, uncomment the below. # os.makedirs("tmp_out", exist_ok=True) # compress_pickle.dump( # {**observations_dict}, "tmp_out/rearrange_mapping_examples.pkl.gz" # ) finally: try: walkthrough_task_sampler.close() except NameError: pass if __name__ == "__main__": TestAI2THORMapSensors().test_binned_and_semantic_mapping(mkdtemp()) # type:ignore # TestAI2THORMapSensors().test_binned_and_semantic_mapping("tmp_out") # Used for local debugging # TestAI2THORMapSensors().test_pretrained_rearrange_walkthrough_mapping_agent( # mkdtemp() # "tmp_out" # ) # Used for local debugging
allenact-main
tests/mapping/test_ai2thor_mapping.py
from typing import Dict, Any import torch.multiprocessing as mp import torch.nn as nn from allenact.base_abstractions.experiment_config import ExperimentConfig from allenact.base_abstractions.task import TaskSampler from allenact.utils.experiment_utils import TrainingPipeline # noinspection PyAbstractClass,PyTypeChecker class MyConfig(ExperimentConfig): MY_VAR: int = 3 @classmethod def tag(cls) -> str: return "" @classmethod def training_pipeline(cls, **kwargs) -> TrainingPipeline: return None @classmethod def create_model(cls, **kwargs) -> nn.Module: return None @classmethod def make_sampler_fn(cls, **kwargs) -> TaskSampler: return None def my_var_is(self, val): assert self.MY_VAR == val # noinspection PyAbstractClass class MySpecConfig(MyConfig): MY_VAR = 6 @classmethod def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]: return {} @classmethod def tag(cls) -> str: return "SpecTag" scfg = MySpecConfig() class TestFrozenAttribs(object): def test_frozen_inheritance(self): from abc import abstractmethod from allenact.base_abstractions.experiment_config import FrozenClassVariables class SomeBase(metaclass=FrozenClassVariables): yar = 3 @abstractmethod def use(self): raise NotImplementedError() class SomeDerived(SomeBase): yar = 33 def use(self): return self.yar failed = False try: SomeDerived.yar = 6 # Error except Exception as _: failed = True assert failed inst = SomeDerived() inst2 = SomeDerived() inst.yar = 12 # No error assert inst.use() == 12 assert inst2.use() == 33 @staticmethod def my_func(config, val): config.my_var_is(val) def test_frozen_experiment_config(self): val = 5 failed = False try: MyConfig() except (RuntimeError, TypeError): failed = True assert failed scfg.MY_VAR = val scfg.my_var_is(val) failed = False try: MyConfig.MY_VAR = val except RuntimeError: failed = True assert failed failed = False try: MySpecConfig.MY_VAR = val except RuntimeError: failed = True assert failed for fork_method in ["forkserver", "fork"]: ctxt = mp.get_context(fork_method) p = ctxt.Process(target=self.my_func, kwargs=dict(config=scfg, val=val)) p.start() p.join() if __name__ == "__main__": TestFrozenAttribs().test_frozen_inheritance() # type:ignore TestFrozenAttribs().test_frozen_experiment_config() # type:ignore
allenact-main
tests/multiprocessing/test_frozen_attribs.py
allenact-main
tests/multiprocessing/__init__.py
allenact-main
tests/utils/__init__.py
import torch from allenact.utils.experiment_utils import set_seed from allenact.utils.inference import InferenceAgent from projects.babyai_baselines.experiments.go_to_obj.ppo import ( PPOBabyAIGoToObjExperimentConfig, ) expected_results = [ {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 17, "reward": 0.7646153846153846, "success": 1.0}, {"ep_length": 22, "reward": 0.6953846153846154, "success": 1.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, {"ep_length": 64, "reward": 0.0, "success": 0.0}, ] class TestInferenceAgent(object): def test_inference_agent_from_minigrid_config(self): set_seed(1) exp_config = PPOBabyAIGoToObjExperimentConfig() agent = InferenceAgent.from_experiment_config( exp_config=exp_config, device=torch.device("cpu"), ) task_sampler = exp_config.make_sampler_fn( **exp_config.test_task_sampler_args(process_ind=0, total_processes=1) ) for ind, expected_result in zip(range(10), expected_results): agent.reset() task = task_sampler.next_task() observations = task.get_observations() while not task.is_done(): action = agent.act(observations=observations) observations = task.step(action).observation assert all( abs(v - expected_result[k]) < 1e-4 for k, v in task.metrics().items() if k != "task_info" ) if __name__ == "__main__": TestInferenceAgent().test_inference_agent_from_minigrid_config()
allenact-main
tests/utils/test_inference_agent.py
import warnings from collections import OrderedDict from typing import Tuple import numpy as np import torch from gym import spaces as gyms from allenact.utils import spaces_utils as su class TestSpaces(object): space = gyms.Dict( { "first": gyms.Tuple( [ gyms.Box(-10, 10, (3, 4)), gyms.MultiDiscrete([2, 3, 4]), gyms.Box(-1, 1, ()), ] ), "second": gyms.Tuple( [gyms.Dict({"third": gyms.Discrete(11)}), gyms.MultiBinary(8),] ), } ) @staticmethod def same(a, b, bidx=None): if isinstance(a, OrderedDict): for key in a: if not TestSpaces.same(a[key], b[key], bidx): return False return True elif isinstance(a, Tuple): for it in range(len(a)): if not TestSpaces.same(a[it], b[it], bidx): return False return True else: # np.array_equal also works for torch tensors and scalars if bidx is None: return np.array_equal(a, b) else: return np.array_equal(a, b[bidx]) def test_conversion(self): gsample = self.space.sample() asample = su.torch_point(self.space, gsample) back = su.numpy_point(self.space, asample) assert self.same(back, gsample) def test_flatten(self): # We flatten Discrete to 1 value assert su.flatdim(self.space) == 25 # gym flattens Discrete to one-hot assert gyms.flatdim(self.space) == 35 asample = su.torch_point(self.space, self.space.sample()) flattened = su.flatten(self.space, asample) unflattened = su.unflatten(self.space, flattened) assert self.same(asample, unflattened) # suppress `UserWarning: WARN: Box bound precision lowered by casting to float32` with warnings.catch_warnings(): warnings.simplefilter("ignore") flattened_space = su.flatten_space(self.space) assert flattened_space.shape == (25,) # The maximum comes from Discrete(11) assert flattened_space.high.max() == 11.0 assert flattened_space.low.min() == -10.0 gym_flattened_space = gyms.flatten_space(self.space) assert gym_flattened_space.shape == (35,) # The maximum comes from Box(-10, 10, (3, 4)) assert gym_flattened_space.high.max() == 10.0 assert gym_flattened_space.low.min() == -10.0 def test_batched(self): samples = [self.space.sample() for _ in range(10)] flattened = [ su.flatten(self.space, su.torch_point(self.space, sample)) for sample in samples ] stacked = torch.stack(flattened, dim=0) unflattened = su.unflatten(self.space, stacked) for bidx, refsample in enumerate(samples): # Compare each torch-ified sample to the corresponding unflattened from the stack assert self.same(su.torch_point(self.space, refsample), unflattened, bidx) assert self.same(su.flatten(self.space, unflattened), stacked) def test_tolist(self): space = gyms.MultiDiscrete([3, 3]) actions = su.torch_point(space, space.sample()) # single sampler actions = actions.unsqueeze(0).unsqueeze(0) # add [step, sampler] flat_actions = su.flatten(space, actions) al = su.action_list(space, flat_actions) assert len(al) == 1 assert len(al[0]) == 2 space = gyms.Tuple([gyms.MultiDiscrete([3, 3]), gyms.Discrete(2)]) actions = su.torch_point(space, space.sample()) # single sampler actions = ( actions[0].unsqueeze(0).unsqueeze(0), torch.tensor(actions[1]).unsqueeze(0).unsqueeze(0), ) # add [step, sampler] flat_actions = su.flatten(space, actions) al = su.action_list(space, flat_actions) assert len(al) == 1 assert len(al[0][0]) == 2 assert isinstance(al[0][1], int) space = gyms.Dict( {"tuple": gyms.MultiDiscrete([3, 3]), "scalar": gyms.Discrete(2)} ) actions = su.torch_point(space, space.sample()) # single sampler actions = OrderedDict( [ ("tuple", actions["tuple"].unsqueeze(0).unsqueeze(0)), ("scalar", torch.tensor(actions["scalar"]).unsqueeze(0).unsqueeze(0)), ] ) flat_actions = su.flatten(space, actions) al = su.action_list(space, flat_actions) assert len(al) == 1 assert len(al[0]["tuple"]) == 2 assert isinstance(al[0]["scalar"], int) if __name__ == "__main__": TestSpaces().test_conversion() # type:ignore TestSpaces().test_flatten() # type:ignore TestSpaces().test_batched() # type:ignore TestSpaces().test_tolist() # type:ignore
allenact-main
tests/utils/test_spaces.py
import hashlib import os import imageio import numpy as np from torchvision.transforms import transforms from allenact.utils.tensor_utils import ScaleBothSides from constants import ABS_PATH_OF_TOP_LEVEL_DIR to_pil = transforms.ToPILImage() # Same as used by the vision sensors class TestPillowRescaling(object): def _load_thor_img(self) -> np.ndarray: img_path = os.path.join( ABS_PATH_OF_TOP_LEVEL_DIR, "docs/img/iTHOR_framework.jpg" ) img = imageio.imread(img_path) return img def _get_img_hash(self, img: np.ndarray) -> str: img_hash = hashlib.sha1(np.ascontiguousarray(img)) return img_hash.hexdigest() def _random_rgb_image(self, width: int, height: int, seed: int) -> np.ndarray: s = np.random.get_state() np.random.seed(seed) img = np.random.randint( low=0, high=256, size=(width, height, 3), dtype=np.uint8 ) np.random.set_state(s) return img def _random_depthmap( self, width: int, height: int, max_depth: float, seed: int ) -> np.ndarray: s = np.random.get_state() np.random.seed(seed) img = max_depth * np.random.rand(width, height, 1) np.random.set_state(s) return np.float32(img) def test_scaler_rgb_thor(self): thor_img_arr = np.uint8(self._load_thor_img()) assert ( self._get_img_hash(thor_img_arr) == "80ff8a342b4f74966796eee91babde31409d0457" ) img = to_pil(thor_img_arr) scaler = ScaleBothSides(width=75, height=75) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "2c47057aa188240cb21b2edc39e0f269c1085bac" ) scaler = ScaleBothSides(width=500, height=600) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "faf0be2b9ec9bfd23a1b7b465c86ad961d03c259" ) def test_scaler_rgb_random(self): arr = self._random_rgb_image(width=100, height=100, seed=1) assert self._get_img_hash(arr) == "d01bd8ba151ab790fde9a8cc29aa8a3c63147334" img = to_pil(arr) scaler = ScaleBothSides(width=60, height=60) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "22473537e50d5e39abeeec4f92dbfde51c754010" ) scaler = ScaleBothSides(width=1000, height=800) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "5e5b955981e4ee3b5e22287536040d001a31fbd3" ) def test_scaler_depth_thor(self): thor_depth_arr = 5 * np.float32(self._load_thor_img()).sum(-1) thor_depth_arr /= thor_depth_arr.max() assert ( self._get_img_hash(thor_depth_arr) == "d3c1474400ba57ed78f52cf4ba6a4c2a1d90516c" ) img = to_pil(thor_depth_arr) scaler = ScaleBothSides(width=75, height=75) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "6a879beb6bed49021e438c1e3af7a62c428a44d8" ) scaler = ScaleBothSides(width=500, height=600) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "79f11fb741ae638afca40125e4c501f54b22cc01" ) def test_scaler_depth_random(self): depth_arr = self._random_depthmap(width=96, height=103, max_depth=5.0, seed=1) assert ( self._get_img_hash(depth_arr) == "cbd8ca127951ffafb6848536d9d731970a5397e9" ) img = to_pil(depth_arr) scaler = ScaleBothSides(width=60, height=60) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "5bed173f2d783fb2badcde9b43904ef85a1a5820" ) scaler = ScaleBothSides(width=1000, height=800) scaled_img = np.array(scaler(img)) assert ( self._get_img_hash(scaled_img) == "9dceb7f77d767888f24a84c00913c0cf4ccd9d49" ) if __name__ == "__main__": TestPillowRescaling().test_scaler_rgb_thor() TestPillowRescaling().test_scaler_rgb_random() TestPillowRescaling().test_scaler_depth_thor() TestPillowRescaling().test_scaler_depth_random()
allenact-main
tests/vision/test_pillow_rescaling.py