python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
allenact-main | allenact_plugins/lighthouse_plugin/data/__init__.py |
|
import os
from pathlib import Path
BABYAI_EXPERT_TRAJECTORIES_DIR = os.path.abspath(
os.path.join(os.path.dirname(Path(__file__)), "data", "demos")
)
| allenact-main | allenact_plugins/babyai_plugin/babyai_constants.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"\n\nPlease install babyai with:\n\n"
"pip install -e git+https://github.com/Lucaweihs/babyai.git@0b450eeb3a2dc7116c67900d51391986bdbb84cd#egg=babyai\n",
):
# noinspection PyUnresolvedReferences
import babyai
| allenact-main | allenact_plugins/babyai_plugin/__init__.py |
from typing import Dict, Optional, List, cast, Tuple, Any
import babyai.model
import babyai.rl
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
ObservationType,
Memory,
DistributionType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class BabyAIACModelWrapped(babyai.model.ACModel):
def __init__(
self,
obs_space: Dict[str, int],
action_space: gym.spaces.Discrete,
image_dim=128,
memory_dim=128,
instr_dim=128,
use_instr=False,
lang_model="gru",
use_memory=False,
arch="cnn1",
aux_info=None,
include_auxiliary_head: bool = False,
):
self.use_cnn2 = arch == "cnn2"
super().__init__(
obs_space=obs_space,
action_space=action_space,
image_dim=image_dim,
memory_dim=memory_dim,
instr_dim=instr_dim,
use_instr=use_instr,
lang_model=lang_model,
use_memory=use_memory,
arch="cnn1" if self.use_cnn2 else arch,
aux_info=aux_info,
)
self.semantic_embedding = None
if self.use_cnn2:
self.semantic_embedding = nn.Embedding(33, embedding_dim=8)
self.image_conv = nn.Sequential(
nn.Conv2d(in_channels=24, out_channels=16, kernel_size=(2, 2)),
*self.image_conv[1:] # type:ignore
)
self.image_conv[0].apply(babyai.model.initialize_parameters)
self.include_auxiliary_head = include_auxiliary_head
if self.use_memory and self.lang_model == "gru":
self.memory_rnn = nn.LSTM(self.image_dim, self.memory_dim)
if self.include_auxiliary_head:
self.aux = nn.Sequential(
nn.Linear(self.memory_dim, 64),
nn.Tanh(),
nn.Linear(64, action_space.n),
)
self.aux.apply(babyai.model.initialize_parameters)
self.train()
def forward_once(self, obs, memory, instr_embedding=None):
"""Copied (with minor modifications) from
`babyai.model.ACModel.forward(...)`."""
if self.use_instr and instr_embedding is None:
instr_embedding = self._get_instr_embedding(obs.instr)
if self.use_instr and self.lang_model == "attgru":
# outputs: B x L x D
# memory: B x M
mask = (obs.instr != 0).float()
# The mask tensor has the same length as obs.instr, and
# thus can be both shorter and longer than instr_embedding.
# It can be longer if instr_embedding is computed
# for a subbatch of obs.instr.
# It can be shorter if obs.instr is a subbatch of
# the batch that instr_embeddings was computed for.
# Here, we make sure that mask and instr_embeddings
# have equal length along dimension 1.
mask = mask[:, : instr_embedding.shape[1]]
instr_embedding = instr_embedding[:, : mask.shape[1]]
keys = self.memory2key(memory)
pre_softmax = (keys[:, None, :] * instr_embedding).sum(2) + 1000 * mask
attention = F.softmax(pre_softmax, dim=1)
instr_embedding = (instr_embedding * attention[:, :, None]).sum(1)
x = torch.transpose(torch.transpose(obs.image, 1, 3), 2, 3)
if self.arch.startswith("expert_filmcnn"):
x = self.image_conv(x)
for controler in self.controllers:
x = controler(x, instr_embedding)
x = F.relu(self.film_pool(x))
else:
x = self.image_conv(x.contiguous())
x = x.reshape(x.shape[0], -1)
if self.use_memory:
hidden = (
memory[:, : self.semi_memory_size],
memory[:, self.semi_memory_size :],
)
hidden = self.memory_rnn(x, hidden)
embedding = hidden[0]
memory = torch.cat(hidden, dim=1) # type: ignore
else:
embedding = x
if self.use_instr and not "filmcnn" in self.arch:
embedding = torch.cat((embedding, instr_embedding), dim=1)
if hasattr(self, "aux_info") and self.aux_info:
extra_predictions = {
info: self.extra_heads[info](embedding) for info in self.extra_heads
}
else:
extra_predictions = dict()
return {
"embedding": embedding,
"memory": memory,
"extra_predictions": extra_predictions,
}
def forward_loop(
self,
observations: ObservationType,
recurrent_hidden_states: torch.FloatTensor,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
results = []
images = cast(torch.FloatTensor, observations["minigrid_ego_image"]).float()
instrs: Optional[torch.Tensor] = None
if "minigrid_mission" in observations:
instrs = cast(torch.Tensor, observations["minigrid_mission"])
_, nsamplers, _ = recurrent_hidden_states.shape
rollouts_len = images.shape[0] // nsamplers
obs = babyai.rl.DictList()
images = images.view(rollouts_len, nsamplers, *images.shape[1:])
masks = masks.view(rollouts_len, nsamplers, *masks.shape[1:]) # type:ignore
# needs_reset = (masks != 1.0).view(nrollouts, -1).any(-1)
if instrs is not None:
instrs = instrs.view(rollouts_len, nsamplers, instrs.shape[-1])
needs_instr_reset_mask = masks != 1.0
needs_instr_reset_mask[0] = 1
needs_instr_reset_mask = needs_instr_reset_mask.squeeze(-1)
instr_embeddings: Optional[torch.Tensor] = None
if self.use_instr:
instr_reset_multi_inds = list(
(int(a), int(b))
for a, b in zip(*np.where(needs_instr_reset_mask.cpu().numpy()))
)
time_ind_to_which_need_instr_reset: List[List] = [
[] for _ in range(rollouts_len)
]
reset_multi_ind_to_index = {
mi: i for i, mi in enumerate(instr_reset_multi_inds)
}
for a, b in instr_reset_multi_inds:
time_ind_to_which_need_instr_reset[a].append(b)
unique_instr_embeddings = self._get_instr_embedding(
instrs[needs_instr_reset_mask]
)
instr_embeddings_list = [unique_instr_embeddings[:nsamplers]]
current_instr_embeddings_list = list(instr_embeddings_list[-1])
for time_ind in range(1, rollouts_len):
if len(time_ind_to_which_need_instr_reset[time_ind]) == 0:
instr_embeddings_list.append(instr_embeddings_list[-1])
else:
for sampler_needing_reset_ind in time_ind_to_which_need_instr_reset[
time_ind
]:
current_instr_embeddings_list[
sampler_needing_reset_ind
] = unique_instr_embeddings[
reset_multi_ind_to_index[
(time_ind, sampler_needing_reset_ind)
]
]
instr_embeddings_list.append(
torch.stack(current_instr_embeddings_list, dim=0)
)
instr_embeddings = torch.stack(instr_embeddings_list, dim=0)
assert recurrent_hidden_states.shape[0] == 1
memory = recurrent_hidden_states[0]
# instr_embedding: Optional[torch.Tensor] = None
for i in range(rollouts_len):
obs.image = images[i]
if "minigrid_mission" in observations:
obs.instr = instrs[i]
# reset = needs_reset[i].item()
# if self.baby_ai_model.use_instr and (reset or i == 0):
# instr_embedding = self.baby_ai_model._get_instr_embedding(obs.instr)
results.append(
self.forward_once(
obs, memory=memory * masks[i], instr_embedding=instr_embeddings[i]
)
)
memory = results[-1]["memory"]
embedding = torch.cat([r["embedding"] for r in results], dim=0)
extra_predictions_list = [r["extra_predictions"] for r in results]
extra_predictions = {
key: torch.cat([ep[key] for ep in extra_predictions_list], dim=0)
for key in extra_predictions_list[0]
}
return (
ActorCriticOutput(
distributions=CategoricalDistr(logits=self.actor(embedding),),
values=self.critic(embedding),
extras=extra_predictions
if not self.include_auxiliary_head
else {
**extra_predictions,
"auxiliary_distributions": cast(
Any, CategoricalDistr(logits=self.aux(embedding))
),
},
),
torch.stack([r["memory"] for r in results], dim=0),
)
# noinspection PyMethodOverriding
def forward(
self,
observations: ObservationType,
recurrent_hidden_states: torch.FloatTensor,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
(
observations,
recurrent_hidden_states,
prev_actions,
masks,
num_steps,
num_samplers,
num_agents,
num_layers,
) = self.adapt_inputs(
observations, recurrent_hidden_states, prev_actions, masks
)
if self.lang_model != "gru":
ac_output, hidden_states = self.forward_loop(
observations=observations,
recurrent_hidden_states=recurrent_hidden_states,
prev_actions=prev_actions,
masks=masks, # type: ignore
)
return self.adapt_result(
ac_output,
hidden_states[-1:],
num_steps,
num_samplers,
num_agents,
num_layers,
observations,
)
assert recurrent_hidden_states.shape[0] == 1
images = cast(torch.FloatTensor, observations["minigrid_ego_image"])
if self.use_cnn2:
images_shape = images.shape
# noinspection PyArgumentList
images = images + torch.LongTensor([0, 11, 22]).view( # type:ignore
1, 1, 1, 3
).to(images.device)
images = self.semantic_embedding(images).view( # type:ignore
*images_shape[:3], 24
)
images = images.permute(0, 3, 1, 2).float() # type:ignore
_, nsamplers, _ = recurrent_hidden_states.shape
rollouts_len = images.shape[0] // nsamplers
masks = cast(
torch.FloatTensor, masks.view(rollouts_len, nsamplers, *masks.shape[1:])
)
instrs: Optional[torch.Tensor] = None
if "minigrid_mission" in observations and self.use_instr:
instrs = cast(torch.FloatTensor, observations["minigrid_mission"])
instrs = instrs.view(rollouts_len, nsamplers, instrs.shape[-1])
needs_instr_reset_mask = masks != 1.0
needs_instr_reset_mask[0] = 1
needs_instr_reset_mask = needs_instr_reset_mask.squeeze(-1)
blocking_inds: List[int] = np.where(
needs_instr_reset_mask.view(rollouts_len, -1).any(-1).cpu().numpy()
)[0].tolist()
blocking_inds.append(rollouts_len)
instr_embeddings: Optional[torch.Tensor] = None
if self.use_instr:
instr_reset_multi_inds = list(
(int(a), int(b))
for a, b in zip(*np.where(needs_instr_reset_mask.cpu().numpy()))
)
time_ind_to_which_need_instr_reset: List[List] = [
[] for _ in range(rollouts_len)
]
reset_multi_ind_to_index = {
mi: i for i, mi in enumerate(instr_reset_multi_inds)
}
for a, b in instr_reset_multi_inds:
time_ind_to_which_need_instr_reset[a].append(b)
unique_instr_embeddings = self._get_instr_embedding(
instrs[needs_instr_reset_mask]
)
instr_embeddings_list = [unique_instr_embeddings[:nsamplers]]
current_instr_embeddings_list = list(instr_embeddings_list[-1])
for time_ind in range(1, rollouts_len):
if len(time_ind_to_which_need_instr_reset[time_ind]) == 0:
instr_embeddings_list.append(instr_embeddings_list[-1])
else:
for sampler_needing_reset_ind in time_ind_to_which_need_instr_reset[
time_ind
]:
current_instr_embeddings_list[
sampler_needing_reset_ind
] = unique_instr_embeddings[
reset_multi_ind_to_index[
(time_ind, sampler_needing_reset_ind)
]
]
instr_embeddings_list.append(
torch.stack(current_instr_embeddings_list, dim=0)
)
instr_embeddings = torch.stack(instr_embeddings_list, dim=0)
# The following code can be used to compute the instr_embeddings in another way
# and thus verify that the above logic is (more likely to be) correct
# needs_instr_reset_mask = (masks != 1.0)
# needs_instr_reset_mask[0] *= 0
# needs_instr_reset_inds = needs_instr_reset_mask.view(nrollouts, -1).any(-1).cpu().numpy()
#
# # Get inds where a new task has started
# blocking_inds: List[int] = np.where(needs_instr_reset_inds)[0].tolist()
# blocking_inds.append(needs_instr_reset_inds.shape[0])
# if nrollouts != 1:
# pdb.set_trace()
# if blocking_inds[0] != 0:
# blocking_inds.insert(0, 0)
# if self.use_instr:
# instr_embeddings_list = []
# for ind0, ind1 in zip(blocking_inds[:-1], blocking_inds[1:]):
# instr_embeddings_list.append(
# self._get_instr_embedding(instrs[ind0])
# .unsqueeze(0)
# .repeat(ind1 - ind0, 1, 1)
# )
# tmp_instr_embeddings = torch.cat(instr_embeddings_list, dim=0)
# assert (instr_embeddings - tmp_instr_embeddings).abs().max().item() < 1e-6
# Embed images
# images = images.view(nrollouts, nsamplers, *images.shape[1:])
image_embeddings = self.image_conv(images)
if self.arch.startswith("expert_filmcnn"):
instr_embeddings_flatter = instr_embeddings.view(
-1, *instr_embeddings.shape[2:]
)
for controller in self.controllers:
image_embeddings = controller(
image_embeddings, instr_embeddings_flatter
)
image_embeddings = F.relu(self.film_pool(image_embeddings))
image_embeddings = image_embeddings.view(rollouts_len, nsamplers, -1)
if self.use_instr and self.lang_model == "attgru":
raise NotImplementedError("Currently attgru is not implemented.")
memory = None
if self.use_memory:
assert recurrent_hidden_states.shape[0] == 1
hidden = (
recurrent_hidden_states[:, :, : self.semi_memory_size],
recurrent_hidden_states[:, :, self.semi_memory_size :],
)
embeddings_list = []
for ind0, ind1 in zip(blocking_inds[:-1], blocking_inds[1:]):
hidden = (hidden[0] * masks[ind0], hidden[1] * masks[ind0])
rnn_out, hidden = self.memory_rnn(image_embeddings[ind0:ind1], hidden)
embeddings_list.append(rnn_out)
# embedding = hidden[0]
embedding = torch.cat(embeddings_list, dim=0)
memory = torch.cat(hidden, dim=-1)
else:
embedding = image_embeddings
if self.use_instr and not "filmcnn" in self.arch:
embedding = torch.cat((embedding, instr_embeddings), dim=-1)
if hasattr(self, "aux_info") and self.aux_info:
extra_predictions = {
info: self.extra_heads[info](embedding) for info in self.extra_heads
}
else:
extra_predictions = dict()
embedding = embedding.view(rollouts_len * nsamplers, -1)
ac_output = ActorCriticOutput(
distributions=CategoricalDistr(logits=self.actor(embedding),),
values=self.critic(embedding),
extras=extra_predictions
if not self.include_auxiliary_head
else {
**extra_predictions,
"auxiliary_distributions": CategoricalDistr(logits=self.aux(embedding)),
},
)
hidden_states = memory
return self.adapt_result(
ac_output,
hidden_states,
num_steps,
num_samplers,
num_agents,
num_layers,
observations,
)
@staticmethod
def adapt_inputs( # type: ignore
observations: ObservationType,
recurrent_hidden_states: torch.FloatTensor,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
# INPUTS
# observations are of shape [num_steps, num_samplers, ...]
# recurrent_hidden_states are of shape [num_layers, num_samplers, (num_agents,) num_dims]
# prev_actions are of shape [num_steps, num_samplers, ...]
# masks are of shape [num_steps, num_samplers, 1]
# num_agents is assumed to be 1
num_steps, num_samplers = masks.shape[:2]
num_layers = recurrent_hidden_states.shape[0]
num_agents = 1
# Flatten all observation batch dims
def recursively_adapt_observations(obs):
for entry in obs:
if isinstance(obs[entry], Dict):
recursively_adapt_observations(obs[entry])
else:
assert isinstance(obs[entry], torch.Tensor)
if entry in ["minigrid_ego_image", "minigrid_mission"]:
final_dims = obs[entry].shape[2:]
obs[entry] = obs[entry].view(
num_steps * num_samplers, *final_dims
)
# Old-style inputs need to be
# observations [num_steps * num_samplers, ...]
# recurrent_hidden_states [num_layers, num_samplers (* num_agents), num_dims]
# prev_actions [num_steps * num_samplers, -1]
# masks [num_steps * num_samplers, 1]
recursively_adapt_observations(observations)
recurrent_hidden_states = cast(
torch.FloatTensor,
recurrent_hidden_states.view(num_layers, num_samplers * num_agents, -1),
)
if prev_actions is not None:
prev_actions = prev_actions.view( # type:ignore
num_steps * num_samplers, -1
)
masks = masks.view(num_steps * num_samplers, 1) # type:ignore
return (
observations,
recurrent_hidden_states,
prev_actions,
masks,
num_steps,
num_samplers,
num_agents,
num_layers,
)
@staticmethod
def adapt_result(ac_output, hidden_states, num_steps, num_samplers, num_agents, num_layers, observations): # type: ignore
distributions = CategoricalDistr(
logits=ac_output.distributions.logits.view(num_steps, num_samplers, -1),
)
values = ac_output.values.view(num_steps, num_samplers, num_agents)
extras = ac_output.extras # ignore shape
# TODO confirm the shape of the auxiliary distribution is the same as the actor's
if "auxiliary_distributions" in extras:
extras["auxiliary_distributions"] = CategoricalDistr(
logits=extras["auxiliary_distributions"].logits.view(
num_steps, num_samplers, -1 # assume single-agent
),
)
hidden_states = hidden_states.view(num_layers, num_samplers * num_agents, -1)
# Unflatten all observation batch dims
def recursively_adapt_observations(obs):
for entry in obs:
if isinstance(obs[entry], Dict):
recursively_adapt_observations(obs[entry])
else:
assert isinstance(obs[entry], torch.Tensor)
if entry in ["minigrid_ego_image", "minigrid_mission"]:
final_dims = obs[entry].shape[
1:
] # assumes no agents dim in observations!
obs[entry] = obs[entry].view(
num_steps, num_samplers * num_agents, *final_dims
)
recursively_adapt_observations(observations)
return (
ActorCriticOutput(
distributions=distributions, values=values, extras=extras
),
hidden_states,
)
class BabyAIRecurrentACModel(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
image_dim=128,
memory_dim=128,
instr_dim=128,
use_instr=False,
lang_model="gru",
use_memory=False,
arch="cnn1",
aux_info=None,
include_auxiliary_head: bool = False,
):
super().__init__(action_space=action_space, observation_space=observation_space)
assert "minigrid_ego_image" in observation_space.spaces
assert not use_instr or "minigrid_mission" in observation_space.spaces
self.memory_dim = memory_dim
self.include_auxiliary_head = include_auxiliary_head
self.baby_ai_model = BabyAIACModelWrapped(
obs_space={"image": 7 * 7 * 3, "instr": 100,},
action_space=action_space,
image_dim=image_dim,
memory_dim=memory_dim,
instr_dim=instr_dim,
use_instr=use_instr,
lang_model=lang_model,
use_memory=use_memory,
arch=arch,
aux_info=aux_info,
include_auxiliary_head=self.include_auxiliary_head,
)
self.memory_key = "rnn"
@property
def recurrent_hidden_state_size(self) -> int:
return 2 * self.memory_dim
@property
def num_recurrent_layers(self):
return 1
def _recurrent_memory_specification(self):
return {
self.memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
}
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
out, recurrent_hidden_states = self.baby_ai_model.forward(
observations=observations,
recurrent_hidden_states=cast(
torch.FloatTensor, memory.tensor(self.memory_key)
),
prev_actions=prev_actions,
masks=masks,
)
return out, memory.set_tensor(self.memory_key, recurrent_hidden_states)
| allenact-main | allenact_plugins/babyai_plugin/babyai_models.py |
import random
import signal
from typing import Tuple, Any, List, Dict, Optional, Union, Callable
import babyai
import babyai.bot
import gym
import numpy as np
from gym.utils import seeding
from gym_minigrid.minigrid import MiniGridEnv
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.system import get_logger
class BabyAITask(Task[MiniGridEnv]):
def __init__(
self,
env: MiniGridEnv,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
expert_view_size: int = 7,
expert_can_see_through_walls: bool = False,
**kwargs,
):
super().__init__(
env=env,
sensors=sensors,
task_info=task_info,
max_steps=env.max_steps,
**kwargs,
)
self._was_successful: bool = False
self.bot: Optional[babyai.bot.Bot] = None
self._bot_died = False
self.expert_view_size = expert_view_size
self.expert_can_see_through_walls = expert_can_see_through_walls
self._last_action: Optional[int] = None
env.max_steps = env.max_steps + 1
@property
def action_space(self) -> gym.spaces.Discrete:
return self.env.action_space
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
return self.env.render(mode=mode)
def _step(self, action: int) -> RLStepResult:
assert isinstance(action, int)
minigrid_obs, reward, done, info = self.env.step(action=action)
self._last_action = action
self._was_successful = done and reward > 0
return RLStepResult(
observation=self.get_observations(minigrid_output_obs=minigrid_obs),
reward=reward,
done=self.is_done(),
info=info,
)
def get_observations(
self, *args, minigrid_output_obs: Optional[Dict[str, Any]] = None, **kwargs
) -> Any:
return self.sensor_suite.get_observations(
env=self.env, task=self, minigrid_output_obs=minigrid_output_obs
)
def reached_terminal_state(self) -> bool:
return self._was_successful
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return tuple(
x
for x, _ in sorted(
[(str(a), a.value) for a in MiniGridEnv.Actions], key=lambda x: x[1]
)
)
def close(self) -> None:
pass
def _expert_timeout_hander(self, signum, frame):
raise TimeoutError
def query_expert(self, **kwargs) -> Tuple[Any, bool]:
see_through_walls = self.env.see_through_walls
agent_view_size = self.env.agent_view_size
if self._bot_died:
return 0, False
try:
self.env.agent_view_size = self.expert_view_size
self.env.expert_can_see_through_walls = self.expert_can_see_through_walls
if self.bot is None:
self.bot = babyai.bot.Bot(self.env)
signal.signal(signal.SIGALRM, self._expert_timeout_hander)
signal.alarm(kwargs.get("timeout", 4 if self.num_steps_taken() == 0 else 2))
return self.bot.replan(self._last_action), True
except TimeoutError as _:
self._bot_died = True
return 0, False
finally:
signal.alarm(0)
self.env.see_through_walls = see_through_walls
self.env.agent_view_size = agent_view_size
def metrics(self) -> Dict[str, Any]:
metrics = {
**super(BabyAITask, self).metrics(),
"success": 1.0 * (self.reached_terminal_state()),
}
return metrics
class BabyAITaskSampler(TaskSampler):
def __init__(
self,
env_builder: Union[str, Callable[..., MiniGridEnv]],
sensors: Union[SensorSuite, List[Sensor]],
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
extra_task_kwargs: Optional[Dict] = None,
**kwargs,
):
super(BabyAITaskSampler, self).__init__()
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_tasks = max_tasks
self.num_unique_seeds = num_unique_seeds
self.deterministic_sampling = deterministic_sampling
self.extra_task_kwargs = (
extra_task_kwargs if extra_task_kwargs is not None else {}
)
self._last_env_seed: Optional[int] = None
self._last_task: Optional[BabyAITask] = None
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
if isinstance(env_builder, str):
self.env = gym.make(env_builder)
else:
self.env = env_builder()
self.np_seeded_random_gen, _ = seeding.np_random(random.randint(0, 2 ** 31 - 1))
self.num_tasks_generated = 0
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None if self.num_unique_seeds is None else self.num_unique_seeds
@property
def last_sampled_task(self) -> Optional[Task]:
raise NotImplementedError
def next_task(self, force_advance_scene: bool = False) -> Optional[BabyAITask]:
if self.length <= 0:
return None
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
self._last_env_seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
self._last_env_seed = self.np_seeded_random_gen.choice(
self.task_seeds_list
)
else:
self._last_env_seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
self.env.seed(self._last_env_seed)
self.env.saved_seed = self._last_env_seed
self.env.reset()
self.num_tasks_generated += 1
self._last_task = BabyAITask(env=self.env, sensors=self.sensors, task_info={})
return self._last_task
def close(self) -> None:
self.env.close()
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.env.reset()
def set_seed(self, seed: int) -> None:
self.np_seeded_random_gen, _ = seeding.np_random(seed)
| allenact-main | allenact_plugins/babyai_plugin/babyai_tasks.py |
allenact-main | allenact_plugins/babyai_plugin/configs/__init__.py |
|
import glob
import os
import babyai
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
def make_small_demos(dir: str):
for file_path in glob.glob(os.path.join(dir, "*.pkl")):
if "valid" not in file_path and "small" not in file_path:
new_file_path = file_path.replace(".pkl", "-small.pkl")
if os.path.exists(new_file_path):
continue
print(
"Saving small version of {} to {}...".format(
os.path.basename(file_path), new_file_path
)
)
babyai.utils.save_demos(
babyai.utils.load_demos(file_path)[:1000], new_file_path
)
print("Done.")
if __name__ == "__main__":
make_small_demos(BABYAI_EXPERT_TRAJECTORIES_DIR)
| allenact-main | allenact_plugins/babyai_plugin/scripts/truncate_expert_demos.py |
allenact-main | allenact_plugins/babyai_plugin/scripts/__init__.py |
|
import glob
import os
import babyai
import numpy as np
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
# Boss level
# [(50, 11.0), (90, 22.0), (99, 32.0), (99.9, 38.0), (99.99, 43.0)]
if __name__ == "__main__":
# level = "BossLevel"
level = "GoToLocal"
files = glob.glob(
os.path.join(BABYAI_EXPERT_TRAJECTORIES_DIR, "*{}-v0.pkl".format(level))
)
assert len(files) == 1
demos = babyai.utils.load_demos(files[0])
percentiles = [50, 90, 99, 99.9, 99.99, 100]
print(
list(
zip(
percentiles,
np.percentile([len(d[0].split(" ")) for d in demos], percentiles),
)
)
)
| allenact-main | allenact_plugins/babyai_plugin/scripts/get_instr_length_percentiles.py |
import argparse
import os
import platform
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
LEVEL_TO_TRAIN_VALID_IDS = {
"BossLevel": (
"1DkVVpIEVtpyo1LxOXQL_bVyjFCTO3cHD",
"1ccEFA_n5RT4SWD0Wa_qO65z2HACJBace",
),
"GoToObjMaze": (
"1P1CuMbGDJtZit1f-8hmd-HwweXZMj77T",
"1MVlVsIpJUZ0vjrYGXY6Ku4m4vBxtWjRZ",
),
"GoTo": ("1ABR1q-TClgjSlbhVdVJjzOBpTmTtlTN1", "13DlEx5woi31MIs_dzyLxfi7dPe1g59l2"),
"GoToLocal": (
"1U8YWdd3viN2lxOP5BByNUZRPVDKVvDAN",
"1Esy-J0t8eJUg6_RT8F4kkegHYDWwqmSl",
),
}
def get_args():
"""Creates the argument parser and parses input arguments."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="download_babyai_expert_demos",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"dataset",
nargs="?",
default="all",
help="dataset name (one of {}, or all)".format(
", ".join(LEVEL_TO_TRAIN_VALID_IDS.keys())
),
)
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
if platform.system() == "Linux":
download_template = """wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={}' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id={}" -O {}"""
elif platform.system() == "Darwin":
download_template = """wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={}' -O- | gsed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id={}" -O {}"""
else:
raise NotImplementedError("{} is not supported".format(platform.system()))
try:
os.makedirs(BABYAI_EXPERT_TRAJECTORIES_DIR, exist_ok=True)
if args.dataset == "all":
id_items = LEVEL_TO_TRAIN_VALID_IDS
else:
assert (
args.dataset in LEVEL_TO_TRAIN_VALID_IDS
), "Only {} are valid datasets".format(
", ".join(LEVEL_TO_TRAIN_VALID_IDS.keys())
)
id_items = {args.dataset: LEVEL_TO_TRAIN_VALID_IDS[args.dataset]}
for level_name, (train_id, valid_id) in id_items.items():
train_path = os.path.join(
BABYAI_EXPERT_TRAJECTORIES_DIR, "BabyAI-{}-v0.pkl".format(level_name)
)
if os.path.exists(train_path):
print("{} already exists, skipping...".format(train_path))
else:
os.system(download_template.format(train_id, train_id, train_path))
print("Demos saved to {}.".format(train_path))
valid_path = os.path.join(
BABYAI_EXPERT_TRAJECTORIES_DIR,
"BabyAI-{}-v0_valid.pkl".format(level_name),
)
if os.path.exists(valid_path):
print("{} already exists, skipping...".format(valid_path))
else:
os.system(download_template.format(valid_id, valid_id, valid_path))
print("Demos saved to {}.".format(valid_path))
except Exception as _:
raise Exception(
"Failed to download babyai demos. Make sure you have the appropriate command line"
" tools installed for your platform. For MacOS you'll need to install `gsed` and `gwget (the gnu version"
" of sed) using homebrew or some other method."
)
| allenact-main | allenact_plugins/babyai_plugin/scripts/download_babyai_expert_demos.py |
allenact-main | allenact_plugins/babyai_plugin/data/__init__.py |
|
import random
from typing import Dict, Tuple, List, Any, Optional, Union, Sequence, cast
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_constants import (
MOVE_AHEAD,
ROTATE_LEFT,
ROTATE_RIGHT,
LOOK_DOWN,
LOOK_UP,
END,
)
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
class ObjectNaviThorGridTask(Task[IThorEnvironment]):
"""Defines the object navigation task in AI2-THOR.
In object navigation an agent is randomly initialized into an AI2-THOR scene and must
find an object of a given type (e.g. tomato, television, etc). An object is considered
found if the agent takes an `End` action and the object is visible to the agent (see
[here](https://ai2thor.allenai.org/documentation/concepts) for a definition of visibiliy
in AI2-THOR).
The actions available to an agent in this task are:
1. Move ahead
* Moves agent ahead by 0.25 meters.
1. Rotate left / rotate right
* Rotates the agent by 90 degrees counter-clockwise / clockwise.
1. Look down / look up
* Changes agent view angle by 30 degrees up or down. An agent cannot look more than 30
degrees above horizontal or less than 60 degrees below horizontal.
1. End
* Ends the task and the agent receives a positive reward if the object type is visible to the agent,
otherwise it receives a negative reward.
# Attributes
env : The ai2thor environment.
sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer.
task_info : The task info. Must contain a field "object_type" that specifies, as a string,
the goal object type.
max_steps : The maximum number of steps an agent can take an in the task before it is considered failed.
observation_space: The observation space returned on each step from the sensors.
"""
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, LOOK_DOWN, LOOK_UP, END)
_CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE: Dict[
Tuple[str, str], List[Tuple[float, float, int, int]]
] = {}
def __init__(
self,
env: IThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
self.task_info["followed_path"] = [self.env.get_agent_location()]
self.task_info["action_names"] = self.class_action_names()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
if action_str == END:
self._took_end_action = True
self._success = self.is_goal_object_visible()
self.last_action_success = self._success
else:
self.env.step({"action": action_str})
self.last_action_success = self.env.last_action_success
if (
not self.last_action_success
) and self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE is not None:
self.env.update_graph_with_failed_action(failed_action=action_str)
self.task_info["followed_path"].append(self.env.get_agent_location())
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode == "rgb", "only rgb rendering is implemented"
return self.env.current_frame
def is_goal_object_visible(self) -> bool:
"""Is the goal object currently visible?"""
return any(
o["objectType"] == self.task_info["object_type"]
for o in self.env.visible_objects()
)
def judge(self) -> float:
"""Compute the reward after having taken a step."""
reward = -0.01
if not self.last_action_success:
reward += -0.03
if self._took_end_action:
reward += 1.0 if self._success else -1.0
return float(reward)
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
else:
return {
"success": self._success,
**super(ObjectNaviThorGridTask, self).metrics(),
}
def query_expert(self, **kwargs) -> Tuple[int, bool]:
target = self.task_info["object_type"]
if self.is_goal_object_visible():
return self.class_action_names().index(END), True
else:
key = (self.env.scene_name, target)
if self._subsampled_locations_from_which_obj_visible is None:
if key not in self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE:
obj_ids: List[str] = []
obj_ids.extend(
o["objectId"]
for o in self.env.last_event.metadata["objects"]
if o["objectType"] == target
)
assert len(obj_ids) != 0, "No objects to get an expert path to."
locations_from_which_object_is_visible: List[
Tuple[float, float, int, int]
] = []
y = self.env.last_event.metadata["agent"]["position"]["y"]
positions_to_check_interactionable_from = [
{"x": x, "y": y, "z": z}
for x, z in set((x, z) for x, z, _, _ in self.env.graph.nodes)
]
for obj_id in set(obj_ids):
self.env.controller.step(
{
"action": "PositionsFromWhichItemIsInteractable",
"objectId": obj_id,
"positions": positions_to_check_interactionable_from,
}
)
assert (
self.env.last_action_success
), "Could not get positions from which item was interactable."
returned = self.env.last_event.metadata["actionReturn"]
locations_from_which_object_is_visible.extend(
(
round(x, 2),
round(z, 2),
round_to_factor(rot, 90) % 360,
round_to_factor(hor, 30) % 360,
)
for x, z, rot, hor, standing in zip(
returned["x"],
returned["z"],
returned["rotation"],
returned["horizon"],
returned["standing"],
)
if standing == 1
)
self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[
key
] = locations_from_which_object_is_visible
self._subsampled_locations_from_which_obj_visible = self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[
key
]
if len(self._subsampled_locations_from_which_obj_visible) > 5:
self._subsampled_locations_from_which_obj_visible = random.sample(
self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[key], 5
)
current_loc_key = self.env.get_key(self.env.last_event.metadata["agent"])
paths = []
for goal_key in self._subsampled_locations_from_which_obj_visible:
path = self.env.shortest_state_path(
source_state_key=current_loc_key, goal_state_key=goal_key
)
if path is not None:
paths.append(path)
if len(paths) == 0:
return 0, False
shortest_path_ind = int(np.argmin([len(p) for p in paths]))
if len(paths[shortest_path_ind]) == 1:
get_logger().warning(
"Shortest path computations suggest we are at the target but episode does not think so."
)
return 0, False
next_key_on_shortest_path = paths[shortest_path_ind][1]
return (
self.class_action_names().index(
self.env.action_transitioning_between_keys(
current_loc_key, next_key_on_shortest_path
)
),
True,
)
| allenact-main | allenact_plugins/ithor_plugin/ithor_tasks.py |
"""A wrapper for engaging with the THOR environment."""
import copy
import functools
import math
import random
from typing import Tuple, Dict, List, Set, Union, Any, Optional, Mapping, cast
import ai2thor.server
import networkx as nx
import numpy as np
from ai2thor.controller import Controller
from scipy.spatial.transform import Rotation
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_constants import VISIBILITY_DISTANCE, FOV
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
class IThorEnvironment(object):
"""Wrapper for the ai2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/documentation/installation) for comprehensive
documentation on AI2-THOR.
# Attributes
controller : The ai2thor controller.
"""
def __init__(
self,
x_display: Optional[str] = None,
docker_enabled: bool = False,
local_thor_build: Optional[str] = None,
visibility_distance: float = VISIBILITY_DISTANCE,
fov: float = FOV,
player_screen_width: int = 300,
player_screen_height: int = 300,
quality: str = "Very Low",
restrict_to_initially_reachable_points: bool = False,
make_agents_visible: bool = True,
object_open_speed: float = 1.0,
simplify_physics: bool = False,
) -> None:
"""Initializer.
# Parameters
x_display : The x display into which to launch ai2thor (possibly necessarily if you are running on a server
without an attached display).
docker_enabled : Whether or not to run thor in a docker container (useful on a server without an attached
display so that you don't have to start an x display).
local_thor_build : The path to a local build of ai2thor. This is probably not necessary for your use case
and can be safely ignored.
visibility_distance : The distance (in meters) at which objects, in the viewport of the agent,
are considered visible by ai2thor and will have their "visible" flag be set to `True` in the metadata.
fov : The agent's camera's field of view.
player_screen_width : The width resolution (in pixels) of the images returned by ai2thor.
player_screen_height : The height resolution (in pixels) of the images returned by ai2thor.
quality : The quality at which to render. Possible quality settings can be found in
`ai2thor._quality_settings.QUALITY_SETTINGS`.
restrict_to_initially_reachable_points : Whether or not to restrict the agent to locations in ai2thor
that were found to be (initially) reachable by the agent (i.e. reachable by the agent after resetting
the scene). This can be useful if you want to ensure there are only a fixed set of locations where the
agent can go.
make_agents_visible : Whether or not the agent should be visible. Most noticable when there are multiple agents
or when quality settings are high so that the agent casts a shadow.
object_open_speed : How quickly objects should be opened. High speeds mean faster simulation but also mean
that opening objects have a lot of kinetic energy and can, possibly, knock other objects away.
simplify_physics : Whether or not to simplify physics when applicable. Currently this only simplies object
interactions when opening drawers (when simplified, objects within a drawer do not slide around on
their own when the drawer is opened or closed, instead they are effectively glued down).
"""
self._start_player_screen_width = player_screen_width
self._start_player_screen_height = player_screen_height
self._local_thor_build = local_thor_build
self.x_display = x_display
self.controller: Optional[Controller] = None
self._started = False
self._quality = quality
self._initially_reachable_points: Optional[List[Dict]] = None
self._initially_reachable_points_set: Optional[Set[Tuple[float, float]]] = None
self._move_mag: Optional[float] = None
self._grid_size: Optional[float] = None
self._visibility_distance = visibility_distance
self._fov = fov
self.restrict_to_initially_reachable_points = (
restrict_to_initially_reachable_points
)
self.make_agents_visible = make_agents_visible
self.object_open_speed = object_open_speed
self._always_return_visible_range = False
self.simplify_physics = simplify_physics
self.start(None)
# noinspection PyTypeHints
self.controller.docker_enabled = docker_enabled # type: ignore
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"]
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.controller.last_event.frame
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
@property
def started(self) -> bool:
"""Has the ai2thor controller been started."""
return self._started
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self.controller.last_event.metadata["lastAction"]
@last_action.setter
def last_action(self, value: str) -> None:
"""Set the last action taken by the agent.
Doing this is rewriting history, be careful.
"""
self.controller.last_event.metadata["lastAction"] = value
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@last_action_success.setter
def last_action_success(self, value: bool) -> None:
"""Set whether or not the last action taken by the agent was a success.
Doing this is rewriting history, be careful.
"""
self.controller.last_event.metadata["lastActionSuccess"] = value
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.controller.last_event.metadata["actionReturn"]
@last_action_return.setter
def last_action_return(self, value: Any) -> None:
"""Set the value returned by the last action.
Doing this is rewriting history, be careful.
"""
self.controller.last_event.metadata["actionReturn"] = value
def start(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
) -> None:
"""Starts the ai2thor controller if it was previously stopped.
After starting, `reset` will be called with the scene name and move magnitude.
# Parameters
scene_name : The scene to load.
move_mag : The amount of distance the agent moves in a single `MoveAhead` step.
kwargs : additional kwargs, passed to reset.
"""
if self._started:
raise RuntimeError(
"Trying to start the environment but it is already started."
)
# noinspection PyUnresolvedReferences
self.controller = Controller(
x_display=self.x_display,
width=self._start_player_screen_width,
height=self._start_player_screen_height,
local_executable_path=self._local_thor_build,
quality=self._quality,
server_class=ai2thor.fifo_server.FifoServer,
)
if (
self._start_player_screen_height,
self._start_player_screen_width,
) != self.current_frame.shape[:2]:
self.controller.step(
{
"action": "ChangeResolution",
"x": self._start_player_screen_width,
"y": self._start_player_screen_height,
}
)
self._started = True
self.reset(scene_name=scene_name, move_mag=move_mag, **kwargs)
def stop(self) -> None:
"""Stops the ai2thor controller."""
try:
self.controller.stop()
except Exception as e:
get_logger().warning(str(e))
finally:
self._started = False
def reset(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
):
"""Resets the ai2thor in a new scene.
Resets ai2thor into a new scene and initializes the scene/agents with
prespecified settings (e.g. move magnitude).
# Parameters
scene_name : The scene to load.
move_mag : The amount of distance the agent moves in a single `MoveAhead` step.
kwargs : additional kwargs, passed to the controller "Initialize" action.
"""
self._move_mag = move_mag
self._grid_size = self._move_mag
if scene_name is None:
scene_name = self.controller.last_event.metadata["sceneName"]
self.controller.reset(scene_name)
self.controller.step(
{
"action": "Initialize",
"gridSize": self._grid_size,
"visibilityDistance": self._visibility_distance,
"fieldOfView": self._fov,
"makeAgentsVisible": self.make_agents_visible,
"alwaysReturnVisibleRange": self._always_return_visible_range,
**kwargs,
}
)
if self.object_open_speed != 1.0:
self.controller.step(
{"action": "ChangeOpenSpeed", "x": self.object_open_speed}
)
self._initially_reachable_points = None
self._initially_reachable_points_set = None
self.controller.step({"action": "GetReachablePositions"})
if not self.controller.last_event.metadata["lastActionSuccess"]:
get_logger().warning(
"Error when getting reachable points: {}".format(
self.controller.last_event.metadata["errorMessage"]
)
)
self._initially_reachable_points = self.last_action_return
def teleport_agent_to(
self,
x: float,
y: float,
z: float,
rotation: float,
horizon: float,
standing: Optional[bool] = None,
force_action: bool = False,
only_initially_reachable: Optional[bool] = None,
verbose=True,
ignore_y_diffs=False,
) -> None:
"""Helper function teleporting the agent to a given location."""
if standing is None:
standing = self.last_event.metadata.get(
"isStanding", self.last_event.metadata["agent"].get("isStanding")
)
original_location = self.get_agent_location()
target = {"x": x, "y": y, "z": z}
if only_initially_reachable is None:
only_initially_reachable = self.restrict_to_initially_reachable_points
if only_initially_reachable:
reachable_points = self.initially_reachable_points
reachable = False
for p in reachable_points:
if self.position_dist(target, p, ignore_y=ignore_y_diffs) < 0.01:
reachable = True
break
if not reachable:
self.last_action = "TeleportFull"
self.last_event.metadata[
"errorMessage"
] = "Target position was not initially reachable."
self.last_action_success = False
return
self.controller.step(
dict(
action="TeleportFull",
x=x,
y=y,
z=z,
rotation={"x": 0.0, "y": rotation, "z": 0.0},
horizon=horizon,
standing=standing,
forceAction=force_action,
)
)
if not self.last_action_success:
agent_location = self.get_agent_location()
rot_diff = (
agent_location["rotation"] - original_location["rotation"]
) % 360
new_old_dist = self.position_dist(
original_location, agent_location, ignore_y=ignore_y_diffs
)
if (
self.position_dist(
original_location, agent_location, ignore_y=ignore_y_diffs
)
> 1e-2
or min(rot_diff, 360 - rot_diff) > 1
):
get_logger().warning(
"Teleportation FAILED but agent still moved (position_dist {}, rot diff {})"
" (\nprevious location\n{}\ncurrent_location\n{}\n)".format(
new_old_dist, rot_diff, original_location, agent_location
)
)
return
if force_action:
assert self.last_action_success
return
agent_location = self.get_agent_location()
rot_diff = (agent_location["rotation"] - rotation) % 360
if (
self.position_dist(agent_location, target, ignore_y=ignore_y_diffs) > 1e-2
or min(rot_diff, 360 - rot_diff) > 1
):
if only_initially_reachable:
self._snap_agent_to_initially_reachable(verbose=False)
if verbose:
get_logger().warning(
"Teleportation did not place agent"
" precisely where desired in scene {}"
" (\ndesired\n{}\nactual\n{}\n)"
" perhaps due to grid snapping."
" Action is considered failed but agent may have moved.".format(
self.scene_name,
{
"x": x,
"y": y,
"z": z,
"rotation": rotation,
"standing": standing,
"horizon": horizon,
},
agent_location,
)
)
self.last_action_success = False
return
def random_reachable_state(self, seed: int = None) -> Dict:
"""Returns a random reachable location in the scene."""
if seed is not None:
random.seed(seed)
xyz = random.choice(self.currently_reachable_points)
rotation = random.choice([0, 90, 180, 270])
horizon = random.choice([0, 30, 60, 330])
state = copy.copy(xyz)
state["rotation"] = rotation
state["horizon"] = horizon
return state
def randomize_agent_location(
self, seed: int = None, partial_position: Optional[Dict[str, float]] = None
) -> Dict:
"""Teleports the agent to a random reachable location in the scene."""
if partial_position is None:
partial_position = {}
k = 0
state: Optional[Dict] = None
while k == 0 or (not self.last_action_success and k < 10):
state = self.random_reachable_state(seed=seed)
self.teleport_agent_to(**{**state, **partial_position})
k += 1
if not self.last_action_success:
get_logger().warning(
(
"Randomize agent location in scene {}"
" with seed {} and partial position {} failed in "
"10 attempts. Forcing the action."
).format(self.scene_name, seed, partial_position)
)
self.teleport_agent_to(**{**state, **partial_position}, force_action=True) # type: ignore
assert self.last_action_success
assert state is not None
return state
def object_pixels_in_frame(
self, object_id: str, hide_all: bool = True, hide_transparent: bool = False
) -> np.ndarray:
"""Return an mask for a given object in the agent's current view.
# Parameters
object_id : The id of the object.
hide_all : Whether or not to hide all other objects in the scene before getting the mask.
hide_transparent : Whether or not partially transparent objects are considered to occlude the object.
# Returns
A numpy array of the mask.
"""
# Emphasizing an object turns it magenta and hides all other objects
# from view, we can find where the hand object is on the screen by
# emphasizing it and then scanning across the image for the magenta pixels.
if hide_all:
self.step({"action": "EmphasizeObject", "objectId": object_id})
else:
self.step({"action": "MaskObject", "objectId": object_id})
if hide_transparent:
self.step({"action": "HideTranslucentObjects"})
# noinspection PyShadowingBuiltins
filter = np.array([[[255, 0, 255]]])
object_pixels = 1 * np.all(self.current_frame == filter, axis=2)
if hide_all:
self.step({"action": "UnemphasizeAll"})
else:
self.step({"action": "UnmaskObject", "objectId": object_id})
if hide_transparent:
self.step({"action": "UnhideAllObjects"})
return object_pixels
def object_pixels_on_grid(
self,
object_id: str,
grid_shape: Tuple[int, int],
hide_all: bool = True,
hide_transparent: bool = False,
) -> np.ndarray:
"""Like `object_pixels_in_frame` but counts object pixels in a
partitioning of the image."""
def partition(n, num_parts):
m = n // num_parts
parts = [m] * num_parts
num_extra = n % num_parts
for k in range(num_extra):
parts[k] += 1
return parts
object_pixels = self.object_pixels_in_frame(
object_id=object_id, hide_all=hide_all, hide_transparent=hide_transparent
)
# Divide the current frame into a grid and count the number
# of hand object pixels in each of the grid squares
sums_in_blocks: List[List] = []
frame_shape = self.current_frame.shape[:2]
row_inds = np.cumsum([0] + partition(frame_shape[0], grid_shape[0]))
col_inds = np.cumsum([0] + partition(frame_shape[1], grid_shape[1]))
for i in range(len(row_inds) - 1):
sums_in_blocks.append([])
for j in range(len(col_inds) - 1):
sums_in_blocks[i].append(
np.sum(
object_pixels[
row_inds[i] : row_inds[i + 1], col_inds[j] : col_inds[j + 1]
]
)
)
return np.array(sums_in_blocks, dtype=np.float32)
def object_in_hand(self):
"""Object metadata for the object in the agent's hand."""
inv_objs = self.last_event.metadata["inventoryObjects"]
if len(inv_objs) == 0:
return None
elif len(inv_objs) == 1:
return self.get_object_by_id(
self.last_event.metadata["inventoryObjects"][0]["objectId"]
)
else:
raise AttributeError("Must be <= 1 inventory objects.")
@property
def initially_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that were
reachable after initially resetting."""
assert self._initially_reachable_points is not None
return copy.deepcopy(self._initially_reachable_points) # type:ignore
@property
def initially_reachable_points_set(self) -> Set[Tuple[float, float]]:
"""Set of (x,z) locations in the scene that were reachable after
initially resetting."""
if self._initially_reachable_points_set is None:
self._initially_reachable_points_set = set()
for p in self.initially_reachable_points:
self._initially_reachable_points_set.add(
self._agent_location_to_tuple(p)
)
return self._initially_reachable_points_set
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
self.step({"action": "GetReachablePositions"})
return self.last_event.metadata["actionReturn"] # type:ignore
def get_agent_location(self) -> Dict[str, Union[float, bool]]:
"""Gets agent's location."""
metadata = self.controller.last_event.metadata
location = {
"x": metadata["agent"]["position"]["x"],
"y": metadata["agent"]["position"]["y"],
"z": metadata["agent"]["position"]["z"],
"rotation": metadata["agent"]["rotation"]["y"],
"horizon": metadata["agent"]["cameraHorizon"],
"standing": metadata.get("isStanding", metadata["agent"].get("isStanding")),
}
return location
@staticmethod
def _agent_location_to_tuple(p: Dict[str, float]) -> Tuple[float, float]:
return round(p["x"], 2), round(p["z"], 2)
def _snap_agent_to_initially_reachable(self, verbose=True):
agent_location = self.get_agent_location()
end_location_tuple = self._agent_location_to_tuple(agent_location)
if end_location_tuple in self.initially_reachable_points_set:
return
agent_x = agent_location["x"]
agent_z = agent_location["z"]
closest_reachable_points = list(self.initially_reachable_points_set)
closest_reachable_points = sorted(
closest_reachable_points,
key=lambda xz: abs(xz[0] - agent_x) + abs(xz[1] - agent_z),
)
# In rare cases end_location_tuple might be not considered to be in self.initially_reachable_points_set
# even when it is, here we check for such cases.
if (
math.sqrt(
(
(
np.array(closest_reachable_points[0])
- np.array(end_location_tuple)
)
** 2
).sum()
)
< 1e-6
):
return
saved_last_action = self.last_action
saved_last_action_success = self.last_action_success
saved_last_action_return = self.last_action_return
saved_error_message = self.last_event.metadata["errorMessage"]
# Thor behaves weirdly when the agent gets off of the grid and you
# try to teleport the agent back to the closest grid location. To
# get around this we first teleport the agent to random location
# and then back to where it should be.
for point in self.initially_reachable_points:
if abs(agent_x - point["x"]) > 0.1 or abs(agent_z - point["z"]) > 0.1:
self.teleport_agent_to(
rotation=0,
horizon=30,
**point,
only_initially_reachable=False,
verbose=False,
)
if self.last_action_success:
break
for p in closest_reachable_points:
self.teleport_agent_to(
**{**agent_location, "x": p[0], "z": p[1]},
only_initially_reachable=False,
verbose=False,
)
if self.last_action_success:
break
teleport_forced = False
if not self.last_action_success:
self.teleport_agent_to(
**{
**agent_location,
"x": closest_reachable_points[0][0],
"z": closest_reachable_points[0][1],
},
force_action=True,
only_initially_reachable=False,
verbose=False,
)
teleport_forced = True
self.last_action = saved_last_action
self.last_action_success = saved_last_action_success
self.last_action_return = saved_last_action_return
self.last_event.metadata["errorMessage"] = saved_error_message
new_agent_location = self.get_agent_location()
if verbose:
get_logger().warning(
(
"In {}, at location (x,z)=({},{}) which is not in the set "
"of initially reachable points;"
" attempting to correct this: agent teleported to (x,z)=({},{}).\n"
"Teleportation {} forced."
).format(
self.scene_name,
agent_x,
agent_z,
new_agent_location["x"],
new_agent_location["z"],
"was" if teleport_forced else "wasn't",
)
)
def step(
self,
action_dict: Optional[Dict[str, Union[str, int, float, Dict]]] = None,
**kwargs: Union[str, int, float, Dict],
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
if action_dict is None:
action_dict = dict()
action_dict.update(kwargs)
action = cast(str, action_dict["action"])
skip_render = "renderImage" in action_dict and not action_dict["renderImage"]
last_frame: Optional[np.ndarray] = None
if skip_render:
last_frame = self.current_frame
if self.simplify_physics:
action_dict["simplifyPhysics"] = True
if "Move" in action and "Hand" not in action: # type: ignore
action_dict = {
**action_dict,
"moveMagnitude": self._move_mag,
} # type: ignore
start_location = self.get_agent_location()
sr = self.controller.step(action_dict)
if self.restrict_to_initially_reachable_points:
end_location_tuple = self._agent_location_to_tuple(
self.get_agent_location()
)
if end_location_tuple not in self.initially_reachable_points_set:
self.teleport_agent_to(**start_location, force_action=True) # type: ignore
self.last_action = action
self.last_action_success = False
self.last_event.metadata[
"errorMessage"
] = "Moved to location outside of initially reachable points."
elif "RandomizeHideSeekObjects" in action:
last_position = self.get_agent_location()
self.controller.step(action_dict)
metadata = self.last_event.metadata
if self.position_dist(last_position, self.get_agent_location()) > 0.001:
self.teleport_agent_to(**last_position, force_action=True) # type: ignore
get_logger().warning(
"In scene {}, after randomization of hide and seek objects, agent moved.".format(
self.scene_name
)
)
sr = self.controller.step({"action": "GetReachablePositions"})
self._initially_reachable_points = self.controller.last_event.metadata[
"actionReturn"
]
self._initially_reachable_points_set = None
self.last_action = action
self.last_action_success = metadata["lastActionSuccess"]
self.controller.last_event.metadata["actionReturn"] = []
elif "RotateUniverse" in action:
sr = self.controller.step(action_dict)
metadata = self.last_event.metadata
if metadata["lastActionSuccess"]:
sr = self.controller.step({"action": "GetReachablePositions"})
self._initially_reachable_points = self.controller.last_event.metadata[
"actionReturn"
]
self._initially_reachable_points_set = None
self.last_action = action
self.last_action_success = metadata["lastActionSuccess"]
self.controller.last_event.metadata["actionReturn"] = []
else:
sr = self.controller.step(action_dict)
if self.restrict_to_initially_reachable_points:
self._snap_agent_to_initially_reachable()
if skip_render:
assert last_frame is not None
self.last_event.frame = last_frame
return sr
@staticmethod
def position_dist(
p0: Mapping[str, Any],
p1: Mapping[str, Any],
ignore_y: bool = False,
l1_dist: bool = False,
) -> float:
"""Distance between two points of the form {"x": x, "y":y, "z":z"}."""
if l1_dist:
return (
abs(p0["x"] - p1["x"])
+ (0 if ignore_y else abs(p0["y"] - p1["y"]))
+ abs(p0["z"] - p1["z"])
)
else:
return math.sqrt(
(p0["x"] - p1["x"]) ** 2
+ (0 if ignore_y else (p0["y"] - p1["y"]) ** 2)
+ (p0["z"] - p1["z"]) ** 2
)
@staticmethod
def rotation_dist(a: Dict[str, float], b: Dict[str, float]):
"""Distance between rotations."""
def deg_dist(d0: float, d1: float):
dist = (d0 - d1) % 360
return min(dist, 360 - dist)
return sum(deg_dist(a[k], b[k]) for k in ["x", "y", "z"])
@staticmethod
def angle_between_rotations(a: Dict[str, float], b: Dict[str, float]):
return np.abs(
(180 / (2 * math.pi))
* (
Rotation.from_euler("xyz", [a[k] for k in "xyz"], degrees=True)
* Rotation.from_euler("xyz", [b[k] for k in "xyz"], degrees=True).inv()
).as_rotvec()
).sum()
def closest_object_with_properties(
self, properties: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
"""Find the object closest to the agent that has the given
properties."""
agent_pos = self.controller.last_event.metadata["agent"]["position"]
min_dist = float("inf")
closest = None
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
d = self.position_dist(agent_pos, o["position"])
if d < min_dist:
min_dist = d
closest = o
return closest
def closest_visible_object_of_type(
self, object_type: str
) -> Optional[Dict[str, Any]]:
"""Find the object closest to the agent that is visible and has the
given type."""
properties = {"visible": True, "objectType": object_type}
return self.closest_object_with_properties(properties)
def closest_object_of_type(self, object_type: str) -> Optional[Dict[str, Any]]:
"""Find the object closest to the agent that has the given type."""
properties = {"objectType": object_type}
return self.closest_object_with_properties(properties)
def closest_reachable_point_to_position(
self, position: Dict[str, float]
) -> Tuple[Dict[str, float], float]:
"""Of all reachable positions, find the one that is closest to the
given location."""
target = np.array([position["x"], position["z"]])
min_dist = float("inf")
closest_point = None
for pt in self.initially_reachable_points:
dist = np.linalg.norm(target - np.array([pt["x"], pt["z"]]))
if dist < min_dist:
closest_point = pt
min_dist = dist
if min_dist < 1e-3:
break
assert closest_point is not None
return closest_point, min_dist
@staticmethod
def _angle_from_to(a_from: float, a_to: float) -> float:
a_from = a_from % 360
a_to = a_to % 360
min_rot = min(a_from, a_to)
max_rot = max(a_from, a_to)
rot_across_0 = (360 - max_rot) + min_rot
rot_not_across_0 = max_rot - min_rot
rot_err = min(rot_across_0, rot_not_across_0)
if rot_across_0 == rot_err:
rot_err *= -1 if a_to > a_from else 1
else:
rot_err *= 1 if a_to > a_from else -1
return rot_err
def agent_xz_to_scene_xz(self, agent_xz: Dict[str, float]) -> Dict[str, float]:
agent_pos = self.get_agent_location()
x_rel_agent = agent_xz["x"]
z_rel_agent = agent_xz["z"]
scene_x = agent_pos["x"]
scene_z = agent_pos["z"]
rotation = agent_pos["rotation"]
if abs(rotation) < 1e-5:
scene_x += x_rel_agent
scene_z += z_rel_agent
elif abs(rotation - 90) < 1e-5:
scene_x += z_rel_agent
scene_z += -x_rel_agent
elif abs(rotation - 180) < 1e-5:
scene_x += -x_rel_agent
scene_z += -z_rel_agent
elif abs(rotation - 270) < 1e-5:
scene_x += -z_rel_agent
scene_z += x_rel_agent
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
return {"x": scene_x, "z": scene_z}
def scene_xz_to_agent_xz(self, scene_xz: Dict[str, float]) -> Dict[str, float]:
agent_pos = self.get_agent_location()
x_err = scene_xz["x"] - agent_pos["x"]
z_err = scene_xz["z"] - agent_pos["z"]
rotation = agent_pos["rotation"]
if abs(rotation) < 1e-5:
agent_x = x_err
agent_z = z_err
elif abs(rotation - 90) < 1e-5:
agent_x = -z_err
agent_z = x_err
elif abs(rotation - 180) < 1e-5:
agent_x = -x_err
agent_z = -z_err
elif abs(rotation - 270) < 1e-5:
agent_x = z_err
agent_z = -x_err
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
return {"x": agent_x, "z": agent_z}
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.controller.last_event.metadata["objects"]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
def get_object_by_id(self, object_id: str) -> Optional[Dict[str, Any]]:
for o in self.last_event.metadata["objects"]:
if o["objectId"] == object_id:
return o
return None
###
# Following is used for computing shortest paths between states
###
_CACHED_GRAPHS: Dict[str, nx.DiGraph] = {}
GRAPH_ACTIONS_SET = {"LookUp", "LookDown", "RotateLeft", "RotateRight", "MoveAhead"}
def reachable_points_with_rotations_and_horizons(self):
self.controller.step({"action": "GetReachablePositions"})
assert self.last_action_success
points_slim = self.last_event.metadata["actionReturn"]
points = []
for r in [0, 90, 180, 270]:
for horizon in [-30, 0, 30, 60]:
for p in points_slim:
p = copy.copy(p)
p["rotation"] = r
p["horizon"] = horizon
points.append(p)
return points
@staticmethod
def location_for_key(key, y_value=0.0):
x, z, rot, hor = key
loc = dict(x=x, y=y_value, z=z, rotation=rot, horizon=hor)
return loc
@staticmethod
def get_key(input_dict: Dict[str, Any]) -> Tuple[float, float, int, int]:
if "x" in input_dict:
x = input_dict["x"]
z = input_dict["z"]
rot = input_dict["rotation"]
hor = input_dict["horizon"]
else:
x = input_dict["position"]["x"]
z = input_dict["position"]["z"]
rot = input_dict["rotation"]["y"]
hor = input_dict["cameraHorizon"]
return (
round(x, 2),
round(z, 2),
round_to_factor(rot, 90) % 360,
round_to_factor(hor, 30) % 360,
)
def update_graph_with_failed_action(self, failed_action: str):
if (
self.scene_name not in self._CACHED_GRAPHS
or failed_action not in self.GRAPH_ACTIONS_SET
):
return
source_key = self.get_key(self.last_event.metadata["agent"])
self._check_contains_key(source_key)
edge_dict = self.graph[source_key]
to_remove_key = None
for target_key in self.graph[source_key]:
if edge_dict[target_key]["action"] == failed_action:
to_remove_key = target_key
break
if to_remove_key is not None:
self.graph.remove_edge(source_key, to_remove_key)
def _add_from_to_edge(
self,
g: nx.DiGraph,
s: Tuple[float, float, int, int],
t: Tuple[float, float, int, int],
):
def ae(x, y):
return abs(x - y) < 0.001
s_x, s_z, s_rot, s_hor = s
t_x, t_z, t_rot, t_hor = t
dist = round(math.sqrt((s_x - t_x) ** 2 + (s_z - t_z) ** 2), 2)
angle_dist = (round_to_factor(t_rot - s_rot, 90) % 360) // 90
horz_dist = (round_to_factor(t_hor - s_hor, 30) % 360) // 30
# If source and target differ by more than one action, continue
if sum(x != 0 for x in [dist, angle_dist, horz_dist]) != 1:
return
grid_size = self._grid_size
action = None
if angle_dist != 0:
if angle_dist == 1:
action = "RotateRight"
elif angle_dist == 3:
action = "RotateLeft"
elif horz_dist != 0:
if horz_dist == 11:
action = "LookUp"
elif horz_dist == 1:
action = "LookDown"
elif ae(dist, grid_size):
if (
(s_rot == 0 and ae(t_z - s_z, grid_size))
or (s_rot == 90 and ae(t_x - s_x, grid_size))
or (s_rot == 180 and ae(t_z - s_z, -grid_size))
or (s_rot == 270 and ae(t_x - s_x, -grid_size))
):
g.add_edge(s, t, action="MoveAhead")
if action is not None:
g.add_edge(s, t, action=action)
@functools.lru_cache(1)
def possible_neighbor_offsets(self) -> Tuple[Tuple[float, float, int, int], ...]:
grid_size = round(self._grid_size, 2)
offsets = []
for rot_diff in [-90, 0, 90]:
for horz_diff in [-30, 0, 30, 60]:
for x_diff in [-grid_size, 0, grid_size]:
for z_diff in [-grid_size, 0, grid_size]:
if (rot_diff != 0) + (horz_diff != 0) + (x_diff != 0) + (
z_diff != 0
) == 1:
offsets.append((x_diff, z_diff, rot_diff, horz_diff))
return tuple(offsets)
def _add_node_to_graph(self, graph: nx.DiGraph, s: Tuple[float, float, int, int]):
if s in graph:
return
existing_nodes = set(graph.nodes())
graph.add_node(s)
for o in self.possible_neighbor_offsets():
t = (s[0] + o[0], s[1] + o[1], s[2] + o[2], s[3] + o[3])
if t in existing_nodes:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
@property
def graph(self):
if self.scene_name not in self._CACHED_GRAPHS:
g = nx.DiGraph()
points = self.reachable_points_with_rotations_and_horizons()
for p in points:
self._add_node_to_graph(g, self.get_key(p))
self._CACHED_GRAPHS[self.scene_name] = g
return self._CACHED_GRAPHS[self.scene_name]
@graph.setter
def graph(self, g):
self._CACHED_GRAPHS[self.scene_name] = g
def _check_contains_key(self, key: Tuple[float, float, int, int], add_if_not=True):
if key not in self.graph:
get_logger().warning(
"{} was not in the graph for scene {}.".format(key, self.scene_name)
)
if add_if_not:
self._add_node_to_graph(self.graph, key)
def shortest_state_path(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
# noinspection PyBroadException
try:
path = nx.shortest_path(self.graph, source_state_key, goal_state_key)
return path
except Exception as _:
return None
def action_transitioning_between_keys(self, s, t):
self._check_contains_key(s)
self._check_contains_key(t)
if self.graph.has_edge(s, t):
return self.graph.get_edge_data(s, t)["action"]
else:
return None
def shortest_path_next_state(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
if source_state_key == goal_state_key:
raise RuntimeError("called next state on the same source and goal state")
state_path = self.shortest_state_path(source_state_key, goal_state_key)
return state_path[1]
def shortest_path_next_action(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
next_state_key = self.shortest_path_next_state(source_state_key, goal_state_key)
return self.graph.get_edge_data(source_state_key, next_state_key)["action"]
def shortest_path_length(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
try:
return nx.shortest_path_length(self.graph, source_state_key, goal_state_key)
except nx.NetworkXNoPath as _:
return float("inf")
| allenact-main | allenact_plugins/ithor_plugin/ithor_environment.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"Cannot `import ai2thor`, please install `ai2thor` (`pip install ai2thor`)."
):
# noinspection PyUnresolvedReferences
import ai2thor
| allenact-main | allenact_plugins/ithor_plugin/__init__.py |
"""Common constants used when training agents to complete tasks in iTHOR, the
interactive version of AI2-THOR."""
from collections import OrderedDict
from typing import Set, Dict
MOVE_AHEAD = "MoveAhead"
ROTATE_LEFT = "RotateLeft"
ROTATE_RIGHT = "RotateRight"
LOOK_DOWN = "LookDown"
LOOK_UP = "LookUp"
END = "End"
VISIBILITY_DISTANCE = 1.25
FOV = 90.0
ORDERED_SCENE_TYPES = ("kitchens", "livingrooms", "bedrooms", "bathrooms")
NUM_SCENE_TYPES = len(ORDERED_SCENE_TYPES)
def make_scene_name(type_ind, scene_num):
if type_ind == 1:
return "FloorPlan" + str(scene_num) + "_physics"
elif scene_num < 10:
return "FloorPlan" + str(type_ind) + "0" + str(scene_num) + "_physics"
else:
return "FloorPlan" + str(type_ind) + str(scene_num) + "_physics"
SCENES_TYPE_TO_SCENE_NAMES = OrderedDict(
[
(
ORDERED_SCENE_TYPES[type_ind - 1],
tuple(
make_scene_name(type_ind=type_ind, scene_num=scene_num)
for scene_num in range(1, 31)
),
)
for type_ind in range(1, NUM_SCENE_TYPES + 1)
]
)
SCENES_TYPE_TO_TRAIN_SCENE_NAMES = OrderedDict(
(key, scenes[:20]) for key, scenes in SCENES_TYPE_TO_SCENE_NAMES.items()
)
SCENES_TYPE_TO_VALID_SCENE_NAMES = OrderedDict(
(key, scenes[20:25]) for key, scenes in SCENES_TYPE_TO_SCENE_NAMES.items()
)
SCENES_TYPE_TO_TEST_SCENE_NAMES = OrderedDict(
(key, scenes[25:30]) for key, scenes in SCENES_TYPE_TO_SCENE_NAMES.items()
)
ALL_SCENE_NAMES = sum(SCENES_TYPE_TO_SCENE_NAMES.values(), tuple())
TRAIN_SCENE_NAMES = sum(
(scenes for scenes in SCENES_TYPE_TO_TRAIN_SCENE_NAMES.values()), tuple()
)
VALID_SCENE_NAMES = sum(
(scenes for scenes in SCENES_TYPE_TO_VALID_SCENE_NAMES.values()), tuple()
)
TEST_SCENE_NAMES = sum(
(scenes for scenes in SCENES_TYPE_TO_TEST_SCENE_NAMES.values()), tuple()
)
TRAIN_SCENE_NAMES_SET = set(TRAIN_SCENE_NAMES)
VALID_SCENE_NAMES_SET = set(VALID_SCENE_NAMES)
TEST_SCENE_NAMES_SET = set(TEST_SCENE_NAMES)
_object_type_and_location_tsv = """
AlarmClock bedrooms
Apple kitchens
ArmChair livingrooms,bedrooms
BaseballBat bedrooms
BasketBall bedrooms
Bathtub bathrooms
BathtubBasin bathrooms
Bed bedrooms
Blinds kitchens,bedrooms
Book kitchens,livingrooms,bedrooms
Boots livingrooms,bedrooms
Bottle kitchens
Bowl kitchens,livingrooms,bedrooms
Box livingrooms,bedrooms
Bread kitchens
ButterKnife kitchens
Cabinet kitchens,livingrooms,bedrooms,bathrooms
Candle livingrooms,bathrooms
Cart bathrooms
CD bedrooms
CellPhone kitchens,livingrooms,bedrooms
Chair kitchens,livingrooms,bedrooms
Cloth bedrooms,bathrooms
CoffeeMachine kitchens
CoffeeTable livingrooms,bedrooms
CounterTop kitchens,livingrooms,bedrooms,bathrooms
CreditCard kitchens,livingrooms,bedrooms
Cup kitchens
Curtains kitchens,livingrooms,bedrooms
Desk bedrooms
DeskLamp livingrooms,bedrooms
DiningTable kitchens,livingrooms,bedrooms
DishSponge kitchens,bathrooms
Drawer kitchens,livingrooms,bedrooms,bathrooms
Dresser livingrooms,bedrooms,bathrooms
Egg kitchens
Faucet kitchens,bathrooms
FloorLamp livingrooms,bedrooms
Footstool bedrooms
Fork kitchens
Fridge kitchens
GarbageCan kitchens,livingrooms,bedrooms,bathrooms
HandTowel bathrooms
HandTowelHolder bathrooms
HousePlant kitchens,livingrooms,bedrooms,bathrooms
Kettle kitchens
KeyChain livingrooms,bedrooms
Knife kitchens
Ladle kitchens
Laptop kitchens,livingrooms,bedrooms
LaundryHamper bedrooms
LaundryHamperLid bedrooms
Lettuce kitchens
LightSwitch kitchens,livingrooms,bedrooms,bathrooms
Microwave kitchens
Mirror kitchens,livingrooms,bedrooms,bathrooms
Mug kitchens,bedrooms
Newspaper livingrooms
Ottoman livingrooms,bedrooms
Painting kitchens,livingrooms,bedrooms,bathrooms
Pan kitchens
PaperTowel kitchens,bathrooms
Pen kitchens,livingrooms,bedrooms
Pencil kitchens,livingrooms,bedrooms
PepperShaker kitchens
Pillow livingrooms,bedrooms
Plate kitchens,livingrooms
Plunger bathrooms
Poster bedrooms
Pot kitchens
Potato kitchens
RemoteControl livingrooms,bedrooms
Safe kitchens,livingrooms,bedrooms
SaltShaker kitchens
ScrubBrush bathrooms
Shelf kitchens,livingrooms,bedrooms,bathrooms
ShowerCurtain bathrooms
ShowerDoor bathrooms
ShowerGlass bathrooms
ShowerHead bathrooms
SideTable livingrooms,bedrooms
Sink kitchens,bathrooms
SinkBasin kitchens,bathrooms
SoapBar bathrooms
SoapBottle kitchens,bathrooms
Sofa livingrooms,bedrooms
Spatula kitchens
Spoon kitchens
SprayBottle bathrooms
Statue kitchens,livingrooms,bedrooms
StoveBurner kitchens
StoveKnob kitchens
TeddyBear bedrooms
Television livingrooms,bedrooms
TennisRacket bedrooms
TissueBox livingrooms,bedrooms,bathrooms
Toaster kitchens
Toilet bathrooms
ToiletPaper bathrooms
ToiletPaperHanger bathrooms
Tomato kitchens
Towel bathrooms
TowelHolder bathrooms
TVStand livingrooms
Vase kitchens,livingrooms,bedrooms
Watch livingrooms,bedrooms
WateringCan livingrooms
Window kitchens,livingrooms,bedrooms,bathrooms
WineBottle kitchens
"""
OBJECT_TYPE_TO_SCENE_TYPES = OrderedDict()
for ot_tab_scene_types in _object_type_and_location_tsv.split("\n"):
if ot_tab_scene_types != "":
ot, scene_types_csv = ot_tab_scene_types.split("\t")
OBJECT_TYPE_TO_SCENE_TYPES[ot] = tuple(sorted(scene_types_csv.split(",")))
SCENE_TYPE_TO_OBJECT_TYPES: Dict[str, Set[str]] = OrderedDict(
((k, set()) for k in ORDERED_SCENE_TYPES)
)
for ot_tab_scene_types in _object_type_and_location_tsv.split("\n"):
if ot_tab_scene_types != "":
ot, scene_types_csv = ot_tab_scene_types.split("\t")
for scene_type in scene_types_csv.split(","):
SCENE_TYPE_TO_OBJECT_TYPES[scene_type].add(ot)
| allenact-main | allenact_plugins/ithor_plugin/ithor_constants.py |
import glob
import math
import os
import platform
import traceback
import warnings
from contextlib import contextmanager
from typing import Sequence
import Xlib
import Xlib.display
import ai2thor.controller
@contextmanager
def include_object_data(controller: ai2thor.controller.Controller):
needs_reset = len(controller.last_event.metadata["objects"]) == 0
try:
if needs_reset:
controller.step("ResetObjectFilter")
assert controller.last_event.metadata["lastActionSuccess"]
yield None
finally:
if needs_reset:
controller.step("SetObjectFilter", objectIds=[])
assert controller.last_event.metadata["lastActionSuccess"]
def vertical_to_horizontal_fov(
vertical_fov_in_degrees: float, height: float, width: float
):
assert 0 < vertical_fov_in_degrees < 180
aspect_ratio = width / height
vertical_fov_in_rads = (math.pi / 180) * vertical_fov_in_degrees
return (
(180 / math.pi)
* math.atan(math.tan(vertical_fov_in_rads * 0.5) * aspect_ratio)
* 2
)
def horizontal_to_vertical_fov(
horizontal_fov_in_degrees: float, height: float, width: float
):
return vertical_to_horizontal_fov(
vertical_fov_in_degrees=horizontal_fov_in_degrees, height=width, width=height,
)
def round_to_factor(num: float, base: int) -> int:
"""Rounds floating point number to the nearest integer multiple of the
given base. E.g., for floating number 90.1 and integer base 45, the result
is 90.
# Attributes
num : floating point number to be rounded.
base: integer base
"""
return round(num / base) * base
def get_open_x_displays(throw_error_if_empty: bool = False) -> Sequence[str]:
assert platform.system() == "Linux", "Can only get X-displays for Linux systems."
displays = []
open_display_strs = [
os.path.basename(s)[1:] for s in glob.glob("/tmp/.X11-unix/X*")
]
for open_display_str in sorted(open_display_strs):
try:
open_display_str = str(int(open_display_str))
display = Xlib.display.Display(f":{open_display_str}")
except Exception:
warnings.warn(
f"Encountered error when attempting to open display :{open_display_str},"
f" error message:\n{traceback.format_exc()}"
)
continue
displays.extend(
[f"{open_display_str}.{i}" for i in range(display.screen_count())]
)
if throw_error_if_empty and len(displays) == 0:
raise IOError(
"Could not find any open X-displays on which to run AI2-THOR processes. "
" Please see the AI2-THOR installation instructions at"
" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin"
" for information as to how to start such displays."
)
return displays
| allenact-main | allenact_plugins/ithor_plugin/ithor_util.py |
import copy
from functools import reduce
from typing import Any, Dict, Optional, Union, Sequence
import ai2thor.controller
import gym
import gym.spaces
import numpy as np
import torch
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.embodiedai.mapping.mapping_utils.map_builders import (
BinnedPointCloudMapBuilder,
SemanticMapBuilder,
ObjectHull2d,
)
from allenact.embodiedai.sensors.vision_sensors import RGBSensor
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_tasks import ObjectNaviThorGridTask
from allenact_plugins.ithor_plugin.ithor_util import include_object_data
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask, ObjectNavTask
THOR_ENV_TYPE = Union[
ai2thor.controller.Controller, IThorEnvironment, RoboThorEnvironment
]
THOR_TASK_TYPE = Union[
Task[ai2thor.controller.Controller],
Task[IThorEnvironment],
Task[RoboThorEnvironment],
]
class RGBSensorThor(RGBSensor[THOR_ENV_TYPE, THOR_TASK_TYPE]):
"""Sensor for RGB images in THOR.
Returns from a running IThorEnvironment instance, the current RGB
frame corresponding to the agent's egocentric view.
"""
def frame_from_env(
self, env: THOR_ENV_TYPE, task: Optional[THOR_TASK_TYPE],
) -> np.ndarray: # type:ignore
if isinstance(env, ai2thor.controller.Controller):
return env.last_event.frame.copy()
else:
return env.current_frame.copy()
class GoalObjectTypeThorSensor(Sensor):
def __init__(
self,
object_types: Sequence[str],
target_to_detector_map: Optional[Dict[str, str]] = None,
detector_types: Optional[Sequence[str]] = None,
uuid: str = "goal_object_type_ind",
**kwargs: Any,
):
self.ordered_object_types = list(object_types)
assert self.ordered_object_types == sorted(
self.ordered_object_types
), "object types input to goal object type sensor must be ordered"
self.target_to_detector_map = target_to_detector_map
if target_to_detector_map is None:
self.object_type_to_ind = {
ot: i for i, ot in enumerate(self.ordered_object_types)
}
else:
assert (
detector_types is not None
), "Missing detector_types for map {}".format(target_to_detector_map)
self.target_to_detector = target_to_detector_map
self.detector_types = detector_types
detector_index = {ot: i for i, ot in enumerate(self.detector_types)}
self.object_type_to_ind = {
ot: detector_index[self.target_to_detector[ot]]
for ot in self.ordered_object_types
}
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
if self.target_to_detector_map is None:
return gym.spaces.Discrete(len(self.ordered_object_types))
else:
return gym.spaces.Discrete(len(self.detector_types))
def get_observation(
self,
env: IThorEnvironment,
task: Optional[ObjectNaviThorGridTask],
*args: Any,
**kwargs: Any,
) -> Any:
return self.object_type_to_ind[task.task_info["object_type"]]
class TakeEndActionThorNavSensor(
Sensor[
Union[RoboThorEnvironment, IThorEnvironment],
Union[ObjectNaviThorGridTask, ObjectNavTask, PointNavTask],
]
):
def __init__(self, nactions: int, uuid: str, **kwargs: Any) -> None:
self.nactions = nactions
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.spaces.Discrete:
"""The observation space.
Equals `gym.spaces.Discrete(2)` where a 0 indicates that the agent
**should not** take the `End` action and a 1 indicates that the agent
**should** take the end action.
"""
return gym.spaces.Discrete(2)
def get_observation( # type:ignore
self,
env: IThorEnvironment,
task: Union[ObjectNaviThorGridTask, ObjectNavTask, PointNavTask],
*args,
**kwargs,
) -> np.ndarray:
if isinstance(task, ObjectNaviThorGridTask):
should_end = task.is_goal_object_visible()
elif isinstance(task, ObjectNavTask):
should_end = task._is_goal_in_range()
elif isinstance(task, PointNavTask):
should_end = task._is_goal_in_range()
else:
raise NotImplementedError
if should_end is None:
should_end = False
return np.array([1 * should_end], dtype=np.int64)
class RelativePositionChangeTHORSensor(
Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]
):
def __init__(self, uuid: str = "rel_position_change", **kwargs: Any):
observation_space = gym.spaces.Dict(
{
"last_allocentric_position": gym.spaces.Box(
low=np.array([-np.inf, -np.inf, 0], dtype=np.float32),
high=np.array([np.inf, np.inf, 360], dtype=np.float32),
shape=(3,),
dtype=np.float32,
),
"dx_dz_dr": gym.spaces.Box(
low=np.array([-np.inf, -np.inf, -360], dtype=np.float32),
high=np.array([-np.inf, -np.inf, 360], dtype=np.float32),
shape=(3,),
dtype=np.float32,
),
}
)
super().__init__(**prepare_locals_for_super(locals()))
self.last_xzr: Optional[np.ndarray] = None
@staticmethod
def get_relative_position_change(from_xzr: np.ndarray, to_xzr: np.ndarray):
dx_dz_dr = to_xzr - from_xzr
# Transform dx, dz (in global coordinates) into the relative coordinates
# given by rotation r0=from_xzr[-2]. This requires rotating everything so that
# r0 is facing in the positive z direction. Since thor rotations are negative
# the usual rotation direction this means we want to rotate by r0 degrees.
theta = np.pi * from_xzr[-1] / 180
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
dx_dz_dr = (
np.array(
[
[cos_theta, -sin_theta, 0],
[sin_theta, cos_theta, 0],
[0, 0, 1], # Don't change dr
]
)
@ dx_dz_dr.reshape(-1, 1)
).reshape(-1)
dx_dz_dr[-1] = dx_dz_dr[-1] % 360
return dx_dz_dr
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
if task.num_steps_taken() == 0:
p = env.controller.last_event.metadata["agent"]["position"]
r = env.controller.last_event.metadata["agent"]["rotation"]["y"]
self.last_xzr = np.array([p["x"], p["z"], r % 360])
p = env.controller.last_event.metadata["agent"]["position"]
r = env.controller.last_event.metadata["agent"]["rotation"]["y"]
current_xzr = np.array([p["x"], p["z"], r % 360])
dx_dz_dr = self.get_relative_position_change(
from_xzr=self.last_xzr, to_xzr=current_xzr
)
to_return = {"last_allocentric_position": self.last_xzr, "dx_dz_dr": dx_dz_dr}
self.last_xzr = current_xzr
return to_return
class ReachableBoundsTHORSensor(Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
def __init__(self, margin: float, uuid: str = "scene_bounds", **kwargs: Any):
observation_space = gym.spaces.Dict(
{
"x_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf], dtype=np.float32),
high=np.array([np.inf, np.inf], dtype=np.float32),
shape=(2,),
dtype=np.float32,
),
"z_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf], dtype=np.float32),
high=np.array([np.inf, np.inf], dtype=np.float32),
shape=(2,),
dtype=np.float32,
),
}
)
super().__init__(**prepare_locals_for_super(locals()))
self.margin = margin
self._bounds_cache = {}
@staticmethod
def get_bounds(
controller: ai2thor.controller.Controller, margin: float,
) -> Dict[str, np.ndarray]:
positions = controller.step("GetReachablePositions").metadata["actionReturn"]
min_x = min(p["x"] for p in positions)
max_x = max(p["x"] for p in positions)
min_z = min(p["z"] for p in positions)
max_z = max(p["z"] for p in positions)
return {
"x_range": np.array([min_x - margin, max_x + margin]),
"z_range": np.array([min_z - margin, max_z + margin]),
}
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
if isinstance(env, ai2thor.controller.Controller):
controller = env
else:
controller = env.controller
scene_name = controller.last_event.metadata["sceneName"]
if scene_name not in self._bounds_cache:
self._bounds_cache[scene_name] = self.get_bounds(
controller=controller, margin=self.margin
)
return copy.deepcopy(self._bounds_cache[scene_name])
class SceneBoundsTHORSensor(Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
def __init__(self, uuid: str = "scene_bounds", **kwargs: Any):
observation_space = gym.spaces.Dict(
{
"x_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf]),
high=np.array([np.inf, np.inf]),
shape=(2,),
dtype=np.float32,
),
"z_range": gym.spaces.Box(
low=np.array([-np.inf, -np.inf]),
high=np.array([np.inf, np.inf]),
shape=(2,),
dtype=np.float32,
),
}
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
scene_bounds = env.controller.last_event.metadata["sceneBounds"]
center = scene_bounds["center"]
size = scene_bounds["size"]
return {
"x_range": np.array(
[center["x"] - size["x"] / 2, center["x"] + size["x"] / 2]
),
"z_range": np.array(
[center["z"] - size["z"] / 2, center["z"] + size["z"] / 2]
),
}
class BinnedPointCloudMapTHORSensor(
Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]
):
observation_space = gym.spaces.Dict
def __init__(
self,
fov: Optional[float],
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
map_range_sensor: Sensor,
return_egocentric_local_context: bool = False,
height_bins: Sequence[float] = (0.02, 2),
ego_only: bool = True,
exclude_agent: bool = False,
uuid: str = "binned_pc_map",
device: torch.device = torch.device("cpu"),
**kwargs: Any,
):
self.fov = fov
self.vision_range_in_cm = vision_range_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.height_bins = height_bins
self.ego_only = ego_only
self.return_egocentric_local_context = return_egocentric_local_context
self.exclude_agent = exclude_agent
self.binned_pc_map_builder = BinnedPointCloudMapBuilder(
fov=fov,
vision_range_in_cm=vision_range_in_cm,
map_size_in_cm=map_size_in_cm,
resolution_in_cm=resolution_in_cm,
height_bins=height_bins,
return_egocentric_local_context=return_egocentric_local_context,
)
self.device = device
big_map_space = gym.spaces.Box(
low=0,
high=np.inf,
shape=self.binned_pc_map_builder.binned_point_cloud_map.shape,
dtype=np.float32,
)
local_map_space = gym.spaces.Box(
low=0,
high=np.inf,
shape=(self.binned_pc_map_builder.vision_range_in_map_units,) * 2
+ self.binned_pc_map_builder.binned_point_cloud_map.shape[-1:],
dtype=np.float32,
)
space_dict = {
"egocentric_update": local_map_space,
}
if self.return_egocentric_local_context:
space_dict = {
"egocentric_local_context": copy.deepcopy(local_map_space),
}
if not ego_only:
space_dict["allocentric_update"] = copy.deepcopy(big_map_space)
space_dict["map"] = copy.deepcopy(big_map_space)
observation_space = gym.spaces.Dict(space_dict)
super().__init__(**prepare_locals_for_super(locals()))
self.map_range_sensor = map_range_sensor
@property
def device(self):
return self.binned_pc_map_builder.device
@device.setter
def device(self, val: torch.device):
self.binned_pc_map_builder.device = torch.device(val)
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
if isinstance(env, ai2thor.controller.Controller):
controller = env
else:
controller = env.controller
e = controller.last_event
metadata = e.metadata
if task.num_steps_taken() == 0:
xz_ranges_dict = self.map_range_sensor.get_observation(env=env, task=task)
if self.fov is None:
self.binned_pc_map_builder.fov = e.metadata["fov"]
self.binned_pc_map_builder.reset(
min_xyz=np.array(
[
xz_ranges_dict["x_range"][0],
0, # TODO: Should y be different per scene?
xz_ranges_dict["z_range"][0],
]
)
)
depth_frame = e.depth_frame
if self.exclude_agent:
depth_frame = depth_frame.copy()
assert len(e.instance_masks) > 0
depth_frame[~reduce(np.logical_or, e.instance_masks.values())] = np.nan
map_dict = self.binned_pc_map_builder.update(
depth_frame=depth_frame,
camera_xyz=np.array(
[metadata["cameraPosition"][k] for k in ["x", "y", "z"]]
),
camera_rotation=metadata["agent"]["rotation"]["y"],
camera_horizon=metadata["agent"]["cameraHorizon"],
)
return {k: map_dict[k] for k in self.observation_space.spaces.keys()}
class SemanticMapTHORSensor(Sensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
observation_space = gym.spaces.Dict
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
ordered_object_types: Sequence[str],
map_range_sensor: Sensor,
ego_only: bool = True,
uuid: str = "semantic_map",
device: torch.device = torch.device("cpu"),
**kwargs: Any,
):
self.fov = fov
self.vision_range_in_cm = vision_range_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.ordered_object_types = ordered_object_types
self.map_range_sensor = map_range_sensor
self.ego_only = ego_only
self.semantic_map_builder = SemanticMapBuilder(
fov=fov,
vision_range_in_cm=vision_range_in_cm,
map_size_in_cm=map_size_in_cm,
resolution_in_cm=resolution_in_cm,
ordered_object_types=ordered_object_types,
device=device,
)
def get_map_space(nchannels: int, size: int):
return gym.spaces.Box(
low=0, high=1, shape=(size, size, nchannels), dtype=np.bool_,
)
n = len(self.ordered_object_types)
small = self.vision_range_in_cm // self.resolution_in_cm
big = self.semantic_map_builder.ground_truth_semantic_map.shape[0]
space_dict = {
"egocentric_update": get_map_space(nchannels=n, size=small,),
"egocentric_mask": get_map_space(nchannels=1, size=small,),
}
if not ego_only:
space_dict["explored_mask"] = get_map_space(nchannels=1, size=big,)
space_dict["map"] = get_map_space(nchannels=n, size=big,)
observation_space = gym.spaces.Dict(space_dict)
super().__init__(**prepare_locals_for_super(locals()))
@property
def device(self):
return self.semantic_map_builder.device
@device.setter
def device(self, val: torch.device):
self.semantic_map_builder.device = torch.device(val)
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[Task[RoboThorEnvironment]],
*args: Any,
**kwargs: Any,
) -> Any:
with include_object_data(env.controller):
last_event = env.controller.last_event
metadata = last_event.metadata
if task.num_steps_taken() == 0:
env.controller.step(
"Get2DSemanticHulls", objectTypes=self.ordered_object_types
)
assert env.last_event.metadata[
"lastActionSuccess"
], f"Get2DSemanticHulls failed with error '{env.last_event.metadata['lastActionSuccess']}'"
object_id_to_hull = env.controller.last_event.metadata["actionReturn"]
xz_ranges_dict = self.map_range_sensor.get_observation(
env=env, task=task
)
self.semantic_map_builder.reset(
min_xyz=np.array(
[
xz_ranges_dict["x_range"][0],
0, # TODO: Should y be different per scene?
xz_ranges_dict["z_range"][0],
]
),
object_hulls=[
ObjectHull2d(
object_id=o["objectId"],
object_type=o["objectType"],
hull_points=object_id_to_hull[o["objectId"]],
)
for o in metadata["objects"]
if o["objectId"] in object_id_to_hull
],
)
map_dict = self.semantic_map_builder.update(
depth_frame=last_event.depth_frame,
camera_xyz=np.array(
[metadata["cameraPosition"][k] for k in ["x", "y", "z"]]
),
camera_rotation=metadata["agent"]["rotation"]["y"],
camera_horizon=metadata["agent"]["cameraHorizon"],
)
return {
k: map_dict[k] > 0.001 if map_dict[k].dtype != np.bool_ else map_dict[k]
for k in self.observation_space.spaces.keys()
}
| allenact-main | allenact_plugins/ithor_plugin/ithor_sensors.py |
import copy
import json
import math
import os
from typing import Tuple, Sequence, Union, Dict, Optional, Any, cast, Generator, List
import colour as col
import cv2
import numpy as np
from PIL import Image, ImageDraw
from ai2thor.controller import Controller
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from allenact.utils.system import get_logger
from allenact.utils.viz_utils import TrajectoryViz
ITHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR = os.path.join(
os.path.expanduser("~"), ".allenact", "ithor", "top_down_viz_cache"
)
class ThorPositionTo2DFrameTranslator(object):
def __init__(
self,
frame_shape_rows_cols: Tuple[int, int],
cam_position: Sequence[float],
orth_size: float,
):
self.frame_shape = frame_shape_rows_cols
self.lower_left = np.array((cam_position[0], cam_position[2])) - orth_size
self.span = 2 * orth_size
def __call__(self, position: Sequence[float]):
if len(position) == 3:
x, _, z = position
else:
x, z = position
camera_position = (np.array((x, z)) - self.lower_left) / self.span
return np.array(
(
round(self.frame_shape[0] * (1.0 - camera_position[1])),
round(self.frame_shape[1] * camera_position[0]),
),
dtype=int,
)
class ThorViz(TrajectoryViz):
def __init__(
self,
path_to_trajectory: Sequence[str] = ("task_info", "followed_path"),
label: str = "thor_trajectory",
figsize: Tuple[float, float] = (8, 8), # width, height
fontsize: float = 10,
scenes: Union[Tuple[str, int, int], Sequence[Tuple[str, int, int]]] = (
("FloorPlan{}_physics", 1, 30),
("FloorPlan{}_physics", 201, 230),
("FloorPlan{}_physics", 301, 330),
("FloorPlan{}_physics", 401, 430),
),
viz_rows_cols: Tuple[int, int] = (448, 448),
single_color: bool = False,
view_triangle_only_on_last: bool = True,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
path_to_rot_degrees: Sequence[str] = ("rotation",),
**kwargs,
):
super().__init__(
path_to_trajectory=path_to_trajectory,
label=label,
figsize=figsize,
fontsize=fontsize,
path_to_rot_degrees=path_to_rot_degrees,
**kwargs,
)
if isinstance(scenes[0], str):
scenes = [cast(Tuple[str, int, int], scenes)] # make it list of tuples
self.scenes = cast(List[Tuple[str, int, int]], scenes)
self.room_path = ITHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR
os.makedirs(self.room_path, exist_ok=True)
self.viz_rows_cols = viz_rows_cols
self.single_color = single_color
self.view_triangle_only_on_last = view_triangle_only_on_last
self.disable_view_triangle = disable_view_triangle
self.line_opacity = line_opacity
# Only needed for rendering
self.map_data: Optional[Dict[str, Any]] = None
self.thor_top_downs: Optional[Dict[str, np.ndarray]] = None
self.controller: Optional[Controller] = None
def init_top_down_render(self):
self.map_data = self.get_translator()
self.thor_top_downs = self.make_top_down_views()
# No controller needed after this point
if self.controller is not None:
self.controller.stop()
self.controller = None
@staticmethod
def iterate_scenes(
all_scenes: Sequence[Tuple[str, int, int]]
) -> Generator[str, None, None]:
for scenes in all_scenes:
for wall in range(scenes[1], scenes[2] + 1):
roomname = scenes[0].format(wall)
yield roomname
def cached_map_data_path(self, roomname: str) -> str:
return os.path.join(self.room_path, "map_data__{}.json".format(roomname))
def get_translator(self) -> Dict[str, Any]:
# roomname = list(ThorViz.iterate_scenes(self.scenes))[0]
all_map_data = {}
for roomname in ThorViz.iterate_scenes(self.scenes):
json_file = self.cached_map_data_path(roomname)
if not os.path.exists(json_file):
self.make_controller()
self.controller.reset(roomname)
map_data = self.get_agent_map_data()
get_logger().info("Dumping {}".format(json_file))
with open(json_file, "w") as f:
json.dump(map_data, f, indent=4, sort_keys=True)
else:
with open(json_file, "r") as f:
map_data = json.load(f)
pos_translator = ThorPositionTo2DFrameTranslator(
self.viz_rows_cols,
self.position_to_tuple(map_data["cam_position"]),
map_data["cam_orth_size"],
)
map_data["pos_translator"] = pos_translator
all_map_data[roomname] = map_data
get_logger().debug("Using map_data {}".format(all_map_data))
return all_map_data
def cached_image_path(self, roomname: str) -> str:
return os.path.join(
self.room_path, "{}__r{}_c{}.png".format(roomname, *self.viz_rows_cols)
)
def make_top_down_views(self) -> Dict[str, np.ndarray]:
top_downs = {}
for roomname in self.iterate_scenes(self.scenes):
fname = self.cached_image_path(roomname)
if not os.path.exists(fname):
self.make_controller()
self.dump_top_down_view(roomname, fname)
top_downs[roomname] = cv2.imread(fname)
return top_downs
def crop_viz_image(self, viz_image: np.ndarray) -> np.ndarray:
y_min = int(self.viz_rows_cols[0] * 0)
y_max = int(self.viz_rows_cols[0] * 1)
# But it covers approximately the entire width:
x_min = 0
x_max = self.viz_rows_cols[1]
cropped_viz_image = viz_image[y_min:y_max, x_min:x_max, :]
return cropped_viz_image
def make_controller(self):
if self.controller is None:
self.controller = Controller()
self.controller.step({"action": "ChangeQuality", "quality": "Very High"})
self.controller.step(
{
"action": "ChangeResolution",
"x": self.viz_rows_cols[1],
"y": self.viz_rows_cols[0],
}
)
def get_agent_map_data(self):
self.controller.step({"action": "ToggleMapView"})
cam_position = self.controller.last_event.metadata["cameraPosition"]
cam_orth_size = self.controller.last_event.metadata["cameraOrthSize"]
to_return = {
"cam_position": cam_position,
"cam_orth_size": cam_orth_size,
}
self.controller.step({"action": "ToggleMapView"})
return to_return
@staticmethod
def position_to_tuple(position: Dict[str, float]) -> Tuple[float, float, float]:
return position["x"], position["y"], position["z"]
@staticmethod
def add_lines_to_map(
ps: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if len(ps) <= 1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
for i in range(len(ps) - 1):
draw.line(
tuple(reversed(pos_translator(ps[i])))
+ tuple(reversed(pos_translator(ps[i + 1]))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_line_to_map(
p0: Any,
p1: Any,
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if p0 == p1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
draw.line(
tuple(reversed(pos_translator(p0))) + tuple(reversed(pos_translator(p1))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_agent_view_triangle(
position: Any,
rotation: float,
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
scale: float = 1.0,
opacity: float = 0.1,
) -> np.ndarray:
p0 = np.array((position[0], position[2]))
p1 = copy.copy(p0)
p2 = copy.copy(p0)
theta = -2 * math.pi * (rotation / 360.0)
rotation_mat = np.array(
[[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]
)
offset1 = scale * np.array([-1 / 2.0, 1])
offset2 = scale * np.array([1 / 2.0, 1])
p1 += np.matmul(rotation_mat, offset1)
p2 += np.matmul(rotation_mat, offset2)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
points = [tuple(reversed(pos_translator(p))) for p in [p0, p1, p2]]
draw = ImageDraw.Draw(img2)
draw.polygon(points, fill=(255, 255, 255, opacity))
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def visualize_agent_path(
positions: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
single_color: bool = False,
view_triangle_only_on_last: bool = False,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
trajectory_start_end_color_str: Tuple[str, str] = ("red", "green"),
) -> np.ndarray:
if single_color:
frame = ThorViz.add_lines_to_map(
list(map(ThorViz.position_to_tuple, positions)),
frame,
pos_translator,
line_opacity,
tuple(
map(
lambda x: int(round(255 * x)),
col.Color(trajectory_start_end_color_str[0]).rgb,
)
),
)
else:
if len(positions) > 1:
colors = list(
col.Color(trajectory_start_end_color_str[0]).range_to(
col.Color(trajectory_start_end_color_str[1]), len(positions) - 1
)
)
for i in range(len(positions) - 1):
frame = ThorViz.add_line_to_map(
ThorViz.position_to_tuple(positions[i]),
ThorViz.position_to_tuple(positions[i + 1]),
frame,
pos_translator,
opacity=line_opacity,
color=tuple(map(lambda x: int(round(255 * x)), colors[i].rgb)),
)
if view_triangle_only_on_last:
positions = [positions[-1]]
if disable_view_triangle:
positions = []
for position in positions:
frame = ThorViz.add_agent_view_triangle(
ThorViz.position_to_tuple(position),
rotation=position["rotation"],
frame=frame,
pos_translator=pos_translator,
opacity=0.05 + view_triangle_only_on_last * 0.2,
)
return frame
def dump_top_down_view(self, room_name: str, image_path: str):
get_logger().debug("Dumping {}".format(image_path))
self.controller.reset(room_name)
self.controller.step(
{"action": "Initialize", "gridSize": 0.1, "makeAgentsVisible": False}
)
self.controller.step({"action": "ToggleMapView"})
top_down_view = self.controller.last_event.cv2img
cv2.imwrite(image_path, top_down_view)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
trajectory: Sequence[Dict[str, Any]] = self._access(
episode, self.path_to_trajectory
)
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:2])
im = self.visualize_agent_path(
trajectory,
self.thor_top_downs[roomname],
self.map_data[roomname]["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
class ThorMultiViz(ThorViz):
def __init__(
self,
path_to_trajectory_prefix: Sequence[str] = ("task_info", "followed_path"),
agent_suffixes: Sequence[str] = ("1", "2"),
label: str = "thor_trajectories",
trajectory_start_end_color_strs: Sequence[Tuple[str, str]] = (
("red", "green"),
("cyan", "purple"),
),
**kwargs,
):
super().__init__(label=label, **kwargs)
self.path_to_trajectory_prefix = list(path_to_trajectory_prefix)
self.agent_suffixes = list(agent_suffixes)
self.trajectory_start_end_color_strs = list(trajectory_start_end_color_strs)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:2])
im = self.thor_top_downs[roomname]
for agent, start_end_color in zip(
self.agent_suffixes, self.trajectory_start_end_color_strs
):
path = self.path_to_trajectory_prefix[:]
path[-1] = path[-1] + agent
trajectory = self._access(episode, path)
im = self.visualize_agent_path(
trajectory,
im,
self.map_data[roomname]["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
trajectory_start_end_color_str=start_end_color,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
| allenact-main | allenact_plugins/ithor_plugin/ithor_viz.py |
import copy
import random
from typing import List, Dict, Optional, Any, Union, cast
import gym
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import set_deterministic_cudnn, set_seed
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_tasks import ObjectNaviThorGridTask
class ObjectNavTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
object_types: str,
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
**kwargs,
) -> None:
self.env_args = env_args
self.scenes = scenes
self.object_types = object_types
self.grid_size = 0.25
self.env: Optional[IThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[ObjectNaviThorGridTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> IThorEnvironment:
env = IThorEnvironment(
make_agents_visible=False,
object_open_speed=0.05,
restrict_to_initially_reachable_points=True,
**self.env_args,
)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None
@property
def last_sampled_task(self) -> Optional[ObjectNaviThorGridTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
def next_task(
self, force_advance_scene: bool = False
) -> Optional[ObjectNaviThorGridTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
pose = self.env.randomize_agent_location()
object_types_in_scene = set(
[o["objectType"] for o in self.env.last_event.metadata["objects"]]
)
task_info: Dict[str, Any] = {}
for ot in random.sample(self.object_types, len(self.object_types)):
if ot in object_types_in_scene:
task_info["object_type"] = ot
break
if len(task_info) == 0:
get_logger().warning(
"Scene {} does not contain any"
" objects of any of the types {}.".format(scene, self.object_types)
)
task_info["start_pose"] = copy.copy(pose)
task_info[
"id"
] = f"{scene}__{'_'.join(list(map(str, self.env.get_key(pose))))}__{task_info['object_type']}"
self._last_sampled_task = ObjectNaviThorGridTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
)
return self._last_sampled_task
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
| allenact-main | allenact_plugins/ithor_plugin/ithor_task_samplers.py |
import os
from allenact_plugins.robothor_plugin.scripts.make_objectnav_debug_dataset import (
create_debug_dataset_from_train_dataset,
)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan1"
TARGET = "Apple"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "ithor-objectnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=TARGET,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "ithor-objectnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| allenact-main | allenact_plugins/ithor_plugin/scripts/make_objectnav_debug_dataset.py |
allenact-main | allenact_plugins/ithor_plugin/scripts/__init__.py |
|
import os
from allenact_plugins.robothor_plugin.scripts.make_objectnav_debug_dataset import (
create_debug_dataset_from_train_dataset,
)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan1"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "ithor-pointnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=None,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "ithor-pointnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| allenact-main | allenact_plugins/ithor_plugin/scripts/make_pointnav_debug_dataset.py |
from collections import OrderedDict
from typing import Dict, Any, Optional, List, cast
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.cacheless_frcnn import fasterrcnn_resnet50_fpn
from allenact.utils.misc_utils import prepare_locals_for_super
class BatchedFasterRCNN(torch.nn.Module):
# fmt: off
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# fmt: on
def __init__(self, thres=0.12, maxdets=3, res=7):
super().__init__()
self.model = fasterrcnn_resnet50_fpn(pretrained=True)
self.eval()
self.min_score = thres
self.maxdets = maxdets
self.res = res
def detector_tensor(self, boxes, classes, scores, aspect_ratio=1.0):
res, maxdets = self.res, self.maxdets
bins = np.array(list(range(res + 1)))[1:-1] / res
res_classes = torch.zeros(
res, res, maxdets, dtype=torch.int64
) # 0 is background
res_boxes = -1 * torch.ones(
res, res, maxdets, 5
) # regular range is [0, 1] (vert) or [0, aspect_ratio] (horiz)
temp = [[[] for _ in range(res)] for _ in range(res)] # grid of arrays
# # TODO Debug
# print('NEW IMAGE')
for it in range(classes.shape[0]):
cx = (boxes[it, 0].item() + boxes[it, 2].item()) / 2
cy = (boxes[it, 1].item() + boxes[it, 3].item()) / 2
px = np.digitize(cx, bins=aspect_ratio * bins).item()
py = np.digitize(cy, bins=bins).item()
temp[py][px].append(
(
scores[it][classes[it]].item(), # prob
(boxes[it, 2] - boxes[it, 0]).item() / aspect_ratio, # width
(boxes[it, 3] - boxes[it, 1]).item(), # height
boxes[it, 0].item() / aspect_ratio, # x
boxes[it, 1].item(), # y
classes[it].item(), # class
)
)
# # TODO Debug:
# print(self.COCO_INSTANCE_CATEGORY_NAMES[classes[it].item()])
for py in range(res):
for px in range(res):
order = sorted(temp[py][px], reverse=True)[:maxdets]
for it, data in enumerate(order):
res_classes[py, px, it] = data[-1]
res_boxes[py, px, it, :] = torch.tensor(
list(data[:-1])
) # prob, size, top left
res_classes = res_classes.permute(2, 0, 1).unsqueeze(0).contiguous()
res_boxes = (
res_boxes.view(res, res, -1).permute(2, 0, 1).unsqueeze(0).contiguous()
)
return res_classes, res_boxes
def forward(self, imbatch):
with torch.no_grad():
imglist = [im_in.squeeze(0) for im_in in imbatch.split(split_size=1, dim=0)]
# # TODO Debug
# import cv2
# for it, im_in in enumerate(imglist):
# cvim = 255.0 * im_in.to('cpu').permute(1, 2, 0).numpy()[:, :, ::-1]
# cv2.imwrite('test_highres{}.png'.format(it), cvim)
preds = self.model(imglist)
keeps = [
pred["scores"] > self.min_score for pred in preds
] # already after nms
# [0, 1] for rows, [0, aspect_ratio] for cols (im_in is C x H x W), with all images of same size (batch)
all_boxes = [
pred["boxes"][keep] / imbatch.shape[-2]
for pred, keep in zip(preds, keeps)
]
all_classes = [pred["labels"][keep] for pred, keep in zip(preds, keeps)]
all_pred_scores = [pred["scores"][keep] for pred, keep in zip(preds, keeps)]
# hack: fill in a full prob score (all classes, 0 score if undetected) for each box, for backwards compatibility
all_scores = [
torch.zeros(pred_scores.shape[0], 91, device=pred_scores.device)
for pred_scores in all_pred_scores
]
all_scores = [
torch.where(
torch.arange(91, device=pred_scores.device).unsqueeze(0)
== merged_classes.unsqueeze(1),
pred_scores.unsqueeze(1),
scores,
)
for merged_classes, pred_scores, scores in zip(
all_classes, all_pred_scores, all_scores
)
]
all_classes_boxes = [
self.detector_tensor(
boxes,
classes,
scores,
aspect_ratio=imbatch.shape[-1] / imbatch.shape[-2],
)
for boxes, classes, scores in zip(all_boxes, all_classes, all_scores)
]
classes = torch.cat(
[classes_boxes[0] for classes_boxes in all_classes_boxes], dim=0
).to(imbatch.device)
boxes = torch.cat(
[classes_boxes[1] for classes_boxes in all_classes_boxes], dim=0
).to(imbatch.device)
return classes, boxes
class FasterRCNNPreProcessorRoboThor(Preprocessor):
"""Preprocess RGB image using a ResNet model."""
COCO_INSTANCE_CATEGORY_NAMES = BatchedFasterRCNN.COCO_INSTANCE_CATEGORY_NAMES
def __init__(
self,
input_uuids: List[str],
output_uuid: str,
input_height: int,
input_width: int,
max_dets: int,
detector_spatial_res: int,
detector_thres: float,
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
**kwargs: Any,
):
self.input_height = input_height
self.input_width = input_width
self.max_dets = max_dets
self.detector_spatial_res = detector_spatial_res
self.detector_thres = detector_thres
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
self.frcnn: BatchedFasterRCNN = BatchedFasterRCNN(
thres=self.detector_thres,
maxdets=self.max_dets,
res=self.detector_spatial_res,
)
spaces: OrderedDict[str, gym.Space] = OrderedDict()
shape = (self.max_dets, self.detector_spatial_res, self.detector_spatial_res)
spaces["frcnn_classes"] = gym.spaces.Box(
low=0, # 0 is bg
high=len(self.COCO_INSTANCE_CATEGORY_NAMES) - 1,
shape=shape,
dtype=np.int64,
)
shape = (
self.max_dets * 5,
self.detector_spatial_res,
self.detector_spatial_res,
)
spaces["frcnn_boxes"] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=shape)
assert (
len(input_uuids) == 1
), "fasterrcnn preprocessor can only consume one observation type"
observation_space = SpaceDict(spaces=spaces)
super().__init__(**prepare_locals_for_super(locals()))
def to(self, device: torch.device) -> "FasterRCNNPreProcessorRoboThor":
self.frcnn = self.frcnn.to(device)
self.device = device
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
frames_tensor = (
obs[self.input_uuids[0]].to(self.device).permute(0, 3, 1, 2)
) # bhwc -> bchw (unnormalized)
classes, boxes = self.frcnn(frames_tensor)
return {"frcnn_classes": classes, "frcnn_boxes": boxes}
| allenact-main | allenact_plugins/robothor_plugin/robothor_preprocessors.py |
import copy
import gzip
import json
import random
from typing import List, Optional, Union, Dict, Any, cast, Tuple
import gym
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.cache_utils import str_to_pos_for_cache
from allenact.utils.experiment_utils import set_seed, set_deterministic_cudnn
from allenact.utils.system import get_logger
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from allenact_plugins.robothor_plugin.robothor_tasks import (
ObjectNavTask,
PointNavTask,
NavToPartnerTask,
)
class ObjectNavTaskSampler(TaskSampler):
def __init__(
self,
scenes: Union[List[str], str],
object_types: List[str],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
allow_flipping: bool = False,
dataset_first: int = -1,
dataset_last: int = -1,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.object_types = object_types
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.allow_flipping = allow_flipping
self.scenes_is_dataset = (dataset_first >= 0) or (dataset_last >= 0)
if not self.scenes_is_dataset:
assert isinstance(
self.scenes, List
), "When not using a dataset, scenes ({}) must be a list".format(
self.scenes
)
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
else:
assert isinstance(
self.scenes, str
), "When using a dataset, scenes ({}) must be a json file name string".format(
self.scenes
)
with open(self.scenes, "r") as f:
self.dataset_episodes = json.load(f)
# get_logger().debug("Loaded {} object nav episodes".format(len(self.dataset_episodes)))
self.dataset_first = dataset_first if dataset_first >= 0 else 0
self.dataset_last = (
dataset_last if dataset_last >= 0 else len(self.dataset_episodes) - 1
)
assert (
0 <= self.dataset_first <= self.dataset_last
), "dataset_last {} must be >= dataset_first {} >= 0".format(
dataset_last, dataset_first
)
self.reset_tasks = self.dataset_last - self.dataset_first + 1
# get_logger().debug("{} tasks ({}, {}) in sampler".format(self.reset_tasks, self.dataset_first, self.dataset_last))
self._last_sampled_task: Optional[ObjectNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = RoboThorEnvironment(**self.env_args)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[ObjectNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
# def sample_episode(self, scene):
# self.scene_counters[scene] = (self.scene_counters[scene] + 1) % len(self.scene_to_episodes[scene])
# if self.scene_counters[scene] == 0:
# random.shuffle(self.scene_to_episodes[scene])
# return self.scene_to_episodes[scene][self.scene_counters[scene]]
def next_task(self, force_advance_scene: bool = False) -> Optional[ObjectNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
# get_logger().debug("max_tasks {}".format(self.max_tasks))
return None
if not self.scenes_is_dataset:
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
pose = self.env.randomize_agent_location()
object_types_in_scene = set(
[o["objectType"] for o in self.env.last_event.metadata["objects"]]
)
task_info = {"scene": scene}
for ot in random.sample(self.object_types, len(self.object_types)):
if ot in object_types_in_scene:
task_info["object_type"] = ot
break
if len(task_info) == 0:
get_logger().warning(
"Scene {} does not contain any"
" objects of any of the types {}.".format(scene, self.object_types)
)
task_info["initial_position"] = {k: pose[k] for k in ["x", "y", "z"]}
task_info["initial_orientation"] = cast(Dict[str, float], pose["rotation"])[
"y"
]
else:
assert self.max_tasks is not None
next_task_id = self.dataset_first + self.max_tasks - 1
# get_logger().debug("task {}".format(next_task_id))
assert (
self.dataset_first <= next_task_id <= self.dataset_last
), "wrong task_id {} for min {} max {}".format(
next_task_id, self.dataset_first, self.dataset_last
)
task_info = copy.deepcopy(self.dataset_episodes[next_task_id])
scene = task_info["scene"]
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
self.env.step(
{
"action": "TeleportFull",
**{k: float(v) for k, v in task_info["initial_position"].items()},
"rotation": {
"x": 0.0,
"y": float(task_info["initial_orientation"]),
"z": 0.0,
},
"horizon": 0.0,
"standing": True,
}
)
assert self.env.last_action_success, "Failed to reset agent for {}".format(
task_info
)
self.max_tasks -= 1
# task_info["actions"] = [] # TODO populated by Task(Generic[EnvType]).step(...) but unused
if self.allow_flipping and random.random() > 0.5:
task_info["mirrored"] = True
else:
task_info["mirrored"] = False
self._last_sampled_task = ObjectNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
if not self.scenes_is_dataset:
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class ObjectNavDatasetTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
scene_directory: str,
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
loop_dataset: bool = True,
allow_flipping=False,
env_class=RoboThorEnvironment,
randomize_materials_in_training: bool = False,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.episodes = {
scene: ObjectNavDatasetTaskSampler.load_dataset(
scene, scene_directory + "/episodes"
)
for scene in scenes
}
# Only keep episodes containing desired objects
if "object_types" in kwargs:
self.episodes = {
scene: [
ep for ep in episodes if ep["object_type"] in kwargs["object_types"]
]
for scene, episodes in self.episodes.items()
}
self.episodes = {
scene: episodes
for scene, episodes in self.episodes.items()
if len(episodes) > 0
}
self.scenes = [scene for scene in self.scenes if scene in self.episodes]
self.env_class = env_class
self.object_types = [
ep["object_type"] for scene in self.episodes for ep in self.episodes[scene]
]
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.allow_flipping = allow_flipping
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
# get the total number of tasks assigned to this process
if loop_dataset:
self.max_tasks = None
else:
self.max_tasks = sum(len(self.episodes[scene]) for scene in self.episodes)
self.reset_tasks = self.max_tasks
self.scene_index = 0
self.episode_index = 0
self.randomize_materials_in_training = randomize_materials_in_training
self._last_sampled_task: Optional[ObjectNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = self.env_class(**self.env_args)
return env
@staticmethod
def load_dataset(scene: str, base_directory: str) -> List[Dict]:
filename = (
"/".join([base_directory, scene])
if base_directory[-1] != "/"
else "".join([base_directory, scene])
)
filename += ".json.gz"
fin = gzip.GzipFile(filename, "r")
json_bytes = fin.read()
fin.close()
json_str = json_bytes.decode("utf-8")
data = json.loads(json_str)
random.shuffle(data)
return data
@staticmethod
def load_distance_cache_from_file(scene: str, base_directory: str) -> Dict:
filename = (
"/".join([base_directory, scene])
if base_directory[-1] != "/"
else "".join([base_directory, scene])
)
filename += ".json.gz"
fin = gzip.GzipFile(filename, "r")
json_bytes = fin.read()
fin.close()
json_str = json_bytes.decode("utf-8")
data = json.loads(json_str)
return data
@property
def __len__(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[ObjectNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
def next_task(self, force_advance_scene: bool = False) -> Optional[ObjectNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.episode_index >= len(self.episodes[self.scenes[self.scene_index]]):
self.scene_index = (self.scene_index + 1) % len(self.scenes)
# shuffle the new list of episodes to train on
random.shuffle(self.episodes[self.scenes[self.scene_index]])
self.episode_index = 0
scene = self.scenes[self.scene_index]
episode = self.episodes[scene][self.episode_index]
if self.env is None:
self.env = self._create_environment()
if scene.replace("_physics", "") != self.env.scene_name.replace("_physics", ""):
self.env.reset(scene_name=scene)
else:
self.env.reset_object_filter()
self.env.set_object_filter(
object_ids=[
o["objectId"]
for o in self.env.last_event.metadata["objects"]
if o["objectType"] == episode["object_type"]
]
)
# only randomize materials in train scenes
were_materials_randomized = False
if self.randomize_materials_in_training:
if (
"Train" in scene
or int(scene.replace("FloorPlan", "").replace("_physics", "")) % 100
< 21
):
were_materials_randomized = True
self.env.controller.step(action="RandomizeMaterials")
task_info = {
"scene": scene,
"object_type": episode["object_type"],
"materials_randomized": were_materials_randomized,
}
if len(task_info) == 0:
get_logger().warning(
"Scene {} does not contain any"
" objects of any of the types {}.".format(scene, self.object_types)
)
task_info["initial_position"] = episode["initial_position"]
task_info["initial_orientation"] = episode["initial_orientation"]
task_info["initial_horizon"] = episode.get("initial_horizon", 0)
task_info["distance_to_target"] = episode.get("shortest_path_length")
task_info["path_to_target"] = episode.get("shortest_path")
task_info["object_type"] = episode["object_type"]
task_info["id"] = episode["id"]
if self.allow_flipping and random.random() > 0.5:
task_info["mirrored"] = True
else:
task_info["mirrored"] = False
self.episode_index += 1
if self.max_tasks is not None:
self.max_tasks -= 1
if not self.env.teleport(
pose=episode["initial_position"],
rotation=episode["initial_orientation"],
horizon=episode.get("initial_horizon", 0),
):
return self.next_task()
self._last_sampled_task = ObjectNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.episode_index = 0
self.scene_index = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class PointNavTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
# object_types: List[str],
# scene_to_episodes: List[Dict[str, Any]],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
# self.object_types = object_types
# self.scene_to_episodes = scene_to_episodes
# self.scene_counters = {scene: -1 for scene in self.scene_to_episodes}
# self.scenes = list(self.scene_to_episodes.keys())
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[PointNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = RoboThorEnvironment(**self.env_args)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled.
Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
# total = 0
# for scene in self.scene_to_episodes:
# total += len(self.scene_to_episodes[scene])
# return total
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[PointNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler
have the same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
# def sample_episode(self, scene):
# self.scene_counters[scene] = (self.scene_counters[scene] + 1) % len(self.scene_to_episodes[scene])
# if self.scene_counters[scene] == 0:
# random.shuffle(self.scene_to_episodes[scene])
# return self.scene_to_episodes[scene][self.scene_counters[scene]]
def next_task(self, force_advance_scene: bool = False) -> Optional[PointNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
# task_info = copy.deepcopy(self.sample_episode(scene))
# task_info['target'] = task_info['target_position']
# task_info['actions'] = []
locs = self.env.known_good_locations_list()
# get_logger().debug("locs[0] {} locs[-1] {}".format(locs[0], locs[-1]))
ys = [loc["y"] for loc in locs]
miny = min(ys)
maxy = max(ys)
assert maxy - miny < 1e-6, "miny {} maxy {} for scene {}".format(
miny, maxy, scene
)
too_close_to_target = True
target: Optional[Dict[str, float]] = None
for _ in range(10):
self.env.randomize_agent_location()
target = copy.copy(random.choice(locs))
too_close_to_target = self.env.distance_to_point(target) <= 0
if not too_close_to_target:
break
pose = self.env.agent_state()
task_info = {
"scene": scene,
"initial_position": {k: pose[k] for k in ["x", "y", "z"]},
"initial_orientation": pose["rotation"]["y"],
"target": target,
"actions": [],
}
if too_close_to_target:
get_logger().warning("No path for sampled episode {}".format(task_info))
# else:
# get_logger().debug("Path found for sampled episode {}".format(task_info))
# pose = {**task_info['initial_position'], 'rotation': {'x': 0.0, 'y': task_info['initial_orientation'], 'z': 0.0}, 'horizon': 0.0}
# self.env.step({"action": "TeleportFull", **pose})
# assert self.env.last_action_success, "Failed to initialize agent to {} in {} for epsiode {}".format(pose, scene, task_info)
self._last_sampled_task = PointNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
# for scene in self.scene_to_episodes:
# random.shuffle(self.scene_to_episodes[scene])
# for scene in self.scene_counters:
# self.scene_counters[scene] = -1
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class PointNavDatasetTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
scene_directory: str,
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
loop_dataset: bool = True,
shuffle_dataset: bool = True,
allow_flipping=False,
env_class=RoboThorEnvironment,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.shuffle_dataset: bool = shuffle_dataset
self.episodes = {
scene: ObjectNavDatasetTaskSampler.load_dataset(
scene, scene_directory + "/episodes"
)
for scene in scenes
}
self.env_class = env_class
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.allow_flipping = allow_flipping
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
# get the total number of tasks assigned to this process
if loop_dataset:
self.max_tasks = None
else:
self.max_tasks = sum(len(self.episodes[scene]) for scene in self.episodes)
self.reset_tasks = self.max_tasks
self.scene_index = 0
self.episode_index = 0
self._last_sampled_task: Optional[PointNavTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
env = self.env_class(**self.env_args)
return env
@property
def __len__(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[PointNavTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def next_task(self, force_advance_scene: bool = False) -> Optional[PointNavTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.episode_index >= len(self.episodes[self.scenes[self.scene_index]]):
self.scene_index = (self.scene_index + 1) % len(self.scenes)
# shuffle the new list of episodes to train on
if self.shuffle_dataset:
random.shuffle(self.episodes[self.scenes[self.scene_index]])
self.episode_index = 0
scene = self.scenes[self.scene_index]
episode = self.episodes[scene][self.episode_index]
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene, filtered_objects=[])
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene, filtered_objects=[])
def to_pos(s):
if isinstance(s, (Dict, Tuple)):
return s
if isinstance(s, float):
return {"x": 0, "y": s, "z": 0}
return str_to_pos_for_cache(s)
for k in ["initial_position", "initial_orientation", "target_position"]:
episode[k] = to_pos(episode[k])
task_info = {
"scene": scene,
"initial_position": episode["initial_position"],
"initial_orientation": episode["initial_orientation"],
"target": episode["target_position"],
"shortest_path": episode["shortest_path"],
"distance_to_target": episode["shortest_path_length"],
"id": episode["id"],
}
if self.allow_flipping and random.random() > 0.5:
task_info["mirrored"] = True
else:
task_info["mirrored"] = False
self.episode_index += 1
if self.max_tasks is not None:
self.max_tasks -= 1
if not self.env.teleport(
pose=episode["initial_position"], rotation=episode["initial_orientation"]
):
return self.next_task()
self._last_sampled_task = PointNavTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.episode_index = 0
self.scene_index = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled.
Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
class NavToPartnerTaskSampler(TaskSampler):
def __init__(
self,
scenes: List[str],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
**kwargs,
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.env: Optional[RoboThorEnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[NavToPartnerTask] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
def _create_environment(self) -> RoboThorEnvironment:
assert (
self.env_args["agentCount"] == 2
), "NavToPartner is only defined for 2 agents!"
env = RoboThorEnvironment(**self.env_args)
return env
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled.
Can be float('inf').
"""
return float("inf") if self.max_tasks is None else self.max_tasks
@property
def total_unique(self) -> Optional[Union[int, float]]:
return self.reset_tasks
@property
def last_sampled_task(self) -> Optional[NavToPartnerTask]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler
have the same observation space. Otherwise False.
"""
return True
def sample_scene(self, force_advance_scene: bool):
if force_advance_scene:
if self.scene_period != "manual":
get_logger().warning(
"When sampling scene, have `force_advance_scene == True`"
"but `self.scene_period` is not equal to 'manual',"
"this may cause unexpected behavior."
)
self.scene_id = (1 + self.scene_id) % len(self.scenes)
if self.scene_id == 0:
random.shuffle(self.scene_order)
if self.scene_period is None:
# Random scene
self.scene_id = random.randint(0, len(self.scenes) - 1)
elif self.scene_period == "manual":
pass
elif self.scene_counter >= cast(int, self.scene_period):
if self.scene_id == len(self.scene_order) - 1:
# Randomize scene order for next iteration
random.shuffle(self.scene_order)
# Move to next scene
self.scene_id = 0
else:
# Move to next scene
self.scene_id += 1
# Reset scene counter
self.scene_counter = 1
elif isinstance(self.scene_period, int):
# Stay in current scene
self.scene_counter += 1
else:
raise NotImplementedError(
"Invalid scene_period {}".format(self.scene_period)
)
if self.max_tasks is not None:
self.max_tasks -= 1
return self.scenes[int(self.scene_order[self.scene_id])]
def next_task(
self, force_advance_scene: bool = False
) -> Optional[NavToPartnerTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
scene = self.sample_scene(force_advance_scene)
if self.env is not None:
if scene.replace("_physics", "") != self.env.scene_name.replace(
"_physics", ""
):
self.env.reset(scene_name=scene)
else:
self.env = self._create_environment()
self.env.reset(scene_name=scene)
too_close_to_target = True
for _ in range(10):
self.env.randomize_agent_location(agent_id=0)
self.env.randomize_agent_location(agent_id=1)
pose1 = self.env.agent_state(0)
pose2 = self.env.agent_state(1)
dist = self.env.distance_cache.find_distance(
self.env.scene_name,
{k: pose1[k] for k in ["x", "y", "z"]},
{k: pose2[k] for k in ["x", "y", "z"]},
self.env.distance_from_point_to_point,
)
too_close_to_target = (
dist <= 1.25 * self.rewards_config["max_success_distance"]
)
if not too_close_to_target:
break
task_info = {
"scene": scene,
"initial_position1": {k: pose1[k] for k in ["x", "y", "z"]},
"initial_position2": {k: pose2[k] for k in ["x", "y", "z"]},
"initial_orientation1": pose1["rotation"]["y"],
"initial_orientation2": pose2["rotation"]["y"],
"id": "_".join(
[scene]
# + ["%4.2f" % pose1[k] for k in ["x", "y", "z"]]
# + ["%4.2f" % pose1["rotation"]["y"]]
# + ["%4.2f" % pose2[k] for k in ["x", "y", "z"]]
# + ["%4.2f" % pose2["rotation"]["y"]]
+ ["%d" % random.randint(0, 2 ** 63 - 1)]
),
}
if too_close_to_target:
get_logger().warning("Bad sampled episode {}".format(task_info))
self._last_sampled_task = NavToPartnerTask(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
| allenact-main | allenact_plugins/robothor_plugin/robothor_task_samplers.py |
import copy
import math
import random
import warnings
from typing import Any, Optional, Dict, List, Union, Tuple, Collection
import ai2thor.server
import numpy as np
from ai2thor.controller import Controller
from ai2thor.fifo_server import FifoServer
from ai2thor.util import metrics
from allenact.utils.cache_utils import DynamicDistanceCache
from allenact.utils.experiment_utils import recursive_update
from allenact.utils.system import get_logger
class RoboThorEnvironment:
"""Wrapper for the robo2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/robothor/documentation) for comprehensive
documentation on RoboTHOR.
# Attributes
controller : The AI2-THOR controller.
config : The AI2-THOR controller configuration
"""
def __init__(self, all_metadata_available: bool = True, **kwargs):
self.config = dict(
rotateStepDegrees=30.0,
visibilityDistance=1.0,
gridSize=0.25,
continuousMode=True,
snapToGrid=False,
agentMode="locobot",
width=640,
height=480,
agentCount=1,
server_class=FifoServer,
)
if "agentCount" in kwargs:
assert kwargs["agentCount"] > 0
kwargs["agentMode"] = kwargs.get("agentMode", "locobot")
if kwargs["agentMode"] not in ["bot", "locobot"]:
warnings.warn(
f"The RoboTHOR environment has not been tested using"
f" an agent of mode '{kwargs['agentMode']}'."
)
recursive_update(self.config, kwargs)
self.controller = Controller(**self.config,)
self.all_metadata_available = all_metadata_available
self.scene_to_reachable_positions: Optional[Dict[str, Any]] = None
self.distance_cache: Optional[DynamicDistanceCache] = None
if self.all_metadata_available:
self.scene_to_reachable_positions = {
self.scene_name: copy.deepcopy(self.currently_reachable_points)
}
assert len(self.scene_to_reachable_positions[self.scene_name]) > 10
self.distance_cache = DynamicDistanceCache(rounding=1)
self.agent_count = self.config["agentCount"]
self._extra_teleport_kwargs: Dict[
str, Any
] = {} # Used for backwards compatability with the teleport action
def initialize_grid_dimensions(
self, reachable_points: Collection[Dict[str, float]]
) -> Tuple[int, int, int, int]:
"""Computes bounding box for reachable points quantized with the
current gridSize."""
points = {
(
round(p["x"] / self.config["gridSize"]),
round(p["z"] / self.config["gridSize"]),
): p
for p in reachable_points
}
assert len(reachable_points) == len(points)
xmin, xmax = min([p[0] for p in points]), max([p[0] for p in points])
zmin, zmax = min([p[1] for p in points]), max([p[1] for p in points])
return xmin, xmax, zmin, zmax
def set_object_filter(self, object_ids: List[str]):
self.controller.step("SetObjectFilter", objectIds=object_ids, renderImage=False)
def reset_object_filter(self):
self.controller.step("ResetObjectFilter", renderImage=False)
def path_from_point_to_object_type(
self, point: Dict[str, float], object_type: str, allowed_error: float
) -> Optional[List[Dict[str, float]]]:
event = self.controller.step(
action="GetShortestPath",
objectType=object_type,
position=point,
allowedError=allowed_error,
)
if event.metadata["lastActionSuccess"]:
return event.metadata["actionReturn"]["corners"]
else:
get_logger().debug(
"Failed to find path for {} in {}. Start point {}, agent state {}.".format(
object_type,
self.controller.last_event.metadata["sceneName"],
point,
self.agent_state(),
)
)
return None
def distance_from_point_to_object_type(
self, point: Dict[str, float], object_type: str, allowed_error: float
) -> float:
"""Minimal geodesic distance from a point to an object of the given
type.
It might return -1.0 for unreachable targets.
"""
path = self.path_from_point_to_object_type(point, object_type, allowed_error)
if path:
# Because `allowed_error != 0` means that the path returned above might not start
# at `point`, we explicitly add any offset there is.
s_dist = math.sqrt(
(point["x"] - path[0]["x"]) ** 2 + (point["z"] - path[0]["z"]) ** 2
)
return metrics.path_distance(path) + s_dist
return -1.0
def distance_to_object_type(self, object_type: str, agent_id: int = 0) -> float:
"""Minimal geodesic distance to object of given type from agent's
current location.
It might return -1.0 for unreachable targets.
"""
assert 0 <= agent_id < self.agent_count
assert (
self.all_metadata_available
), "`distance_to_object_type` cannot be called when `self.all_metadata_available` is `False`."
def retry_dist(position: Dict[str, float], object_type: str):
allowed_error = 0.05
debug_log = ""
d = -1.0
while allowed_error < 2.5:
d = self.distance_from_point_to_object_type(
position, object_type, allowed_error
)
if d < 0:
debug_log = (
f"In scene {self.scene_name}, could not find a path from {position} to {object_type} with"
f" {allowed_error} error tolerance. Increasing this tolerance to"
f" {2 * allowed_error} any trying again."
)
allowed_error *= 2
else:
break
if d < 0:
get_logger().debug(
f"In scene {self.scene_name}, could not find a path from {position} to {object_type}"
f" with {allowed_error} error tolerance. Returning a distance of -1."
)
elif debug_log != "":
get_logger().debug(debug_log)
return d
return self.distance_cache.find_distance(
self.scene_name,
self.controller.last_event.events[agent_id].metadata["agent"]["position"],
object_type,
retry_dist,
)
def path_from_point_to_point(
self, position: Dict[str, float], target: Dict[str, float], allowedError: float
) -> Optional[List[Dict[str, float]]]:
try:
return self.controller.step(
action="GetShortestPathToPoint",
position=position,
target=target,
allowedError=allowedError,
).metadata["actionReturn"]["corners"]
except ValueError:
raise
except Exception:
get_logger().debug(
"Failed to find path for {} in {}. Start point {}, agent state {}.".format(
target,
self.controller.last_event.metadata["sceneName"],
position,
self.agent_state(),
)
)
return None
def distance_from_point_to_point(
self, position: Dict[str, float], target: Dict[str, float], allowed_error: float
) -> float:
path = self.path_from_point_to_point(position, target, allowed_error)
if path:
# Because `allowed_error != 0` means that the path returned above might not start
# or end exactly at the position/target points, we explictly add any offset there is.
s_dist = math.sqrt(
(position["x"] - path[0]["x"]) ** 2
+ (position["z"] - path[0]["z"]) ** 2
)
t_dist = math.sqrt(
(target["x"] - path[-1]["x"]) ** 2 + (target["z"] - path[-1]["z"]) ** 2
)
return metrics.path_distance(path) + s_dist + t_dist
return -1.0
def distance_to_point(self, target: Dict[str, float], agent_id: int = 0) -> float:
"""Minimal geodesic distance to end point from agent's current
location.
It might return -1.0 for unreachable targets.
"""
assert 0 <= agent_id < self.agent_count
assert (
self.all_metadata_available
), "`distance_to_object_type` cannot be called when `self.all_metadata_available` is `False`."
def retry_dist(position: Dict[str, float], target: Dict[str, float]):
allowed_error = 0.05
debug_log = ""
d = -1.0
while allowed_error < 2.5:
d = self.distance_from_point_to_point(position, target, allowed_error)
if d < 0:
debug_log = (
f"In scene {self.scene_name}, could not find a path from {position} to {target} with"
f" {allowed_error} error tolerance. Increasing this tolerance to"
f" {2 * allowed_error} any trying again."
)
allowed_error *= 2
else:
break
if d < 0:
get_logger().debug(
f"In scene {self.scene_name}, could not find a path from {position} to {target}"
f" with {allowed_error} error tolerance. Returning a distance of -1."
)
elif debug_log != "":
get_logger().debug(debug_log)
return d
return self.distance_cache.find_distance(
self.scene_name,
self.controller.last_event.events[agent_id].metadata["agent"]["position"],
target,
retry_dist,
)
def agent_state(self, agent_id: int = 0) -> Dict:
"""Return agent position, rotation and horizon."""
assert 0 <= agent_id < self.agent_count
agent_meta = self.last_event.events[agent_id].metadata["agent"]
return {
**{k: float(v) for k, v in agent_meta["position"].items()},
"rotation": {k: float(v) for k, v in agent_meta["rotation"].items()},
"horizon": round(float(agent_meta["cameraHorizon"]), 1),
}
def teleport(
self,
pose: Dict[str, float],
rotation: Dict[str, float],
horizon: float = 0.0,
agent_id: int = 0,
):
assert 0 <= agent_id < self.agent_count
try:
e = self.controller.step(
action="TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=rotation,
horizon=horizon,
agentId=agent_id,
**self._extra_teleport_kwargs,
)
except ValueError as e:
if len(self._extra_teleport_kwargs) == 0:
self._extra_teleport_kwargs["standing"] = True
else:
raise e
return self.teleport(
pose=pose, rotation=rotation, horizon=horizon, agent_id=agent_id
)
return e.metadata["lastActionSuccess"]
def reset(
self, scene_name: str = None, filtered_objects: Optional[List[str]] = None
) -> None:
"""Resets scene to a known initial state."""
if scene_name is not None and scene_name != self.scene_name:
self.controller.reset(scene_name)
assert self.last_action_success, "Could not reset to new scene"
if (
self.all_metadata_available
and scene_name not in self.scene_to_reachable_positions
):
self.scene_to_reachable_positions[scene_name] = copy.deepcopy(
self.currently_reachable_points
)
assert len(self.scene_to_reachable_positions[scene_name]) > 10
if filtered_objects:
self.set_object_filter(filtered_objects)
else:
self.reset_object_filter()
def random_reachable_state(
self, seed: Optional[int] = None
) -> Dict[str, Union[Dict[str, float], float]]:
"""Returns a random reachable location in the scene."""
assert (
self.all_metadata_available
), "`random_reachable_state` cannot be called when `self.all_metadata_available` is `False`."
if seed is not None:
random.seed(seed)
# xyz = random.choice(self.currently_reachable_points)
assert len(self.scene_to_reachable_positions[self.scene_name]) > 10
xyz = copy.deepcopy(
random.choice(self.scene_to_reachable_positions[self.scene_name])
)
rotation = random.choice(
np.arange(0.0, 360.0, self.config["rotateStepDegrees"])
)
horizon = 0.0 # random.choice([0.0, 30.0, 330.0])
return {
**{k: float(v) for k, v in xyz.items()},
"rotation": {"x": 0.0, "y": float(rotation), "z": 0.0},
"horizon": float(horizon),
}
def randomize_agent_location(
self,
seed: int = None,
partial_position: Optional[Dict[str, float]] = None,
agent_id: int = 0,
) -> Dict[str, Union[Dict[str, float], float]]:
"""Teleports the agent to a random reachable location in the scene."""
assert 0 <= agent_id < self.agent_count
if partial_position is None:
partial_position = {}
k = 0
state: Optional[Dict] = None
while k == 0 or (not self.last_action_success and k < 10):
# self.reset()
state = {**self.random_reachable_state(seed=seed), **partial_position}
# get_logger().debug("picked target location {}".format(state))
self.controller.step("TeleportFull", **state, agentId=agent_id)
k += 1
if not self.last_action_success:
get_logger().warning(
(
"Randomize agent location in scene {} and current random state {}"
" with seed {} and partial position {} failed in "
"10 attempts. Forcing the action."
).format(self.scene_name, state, seed, partial_position)
)
self.controller.step("TeleportFull", **state, force_action=True, agentId=agent_id) # type: ignore
assert self.last_action_success, "Force action failed with {}".format(state)
# get_logger().debug("location after teleport full {}".format(self.agent_state()))
# self.controller.step("TeleportFull", **self.agent_state()) # TODO only for debug
# get_logger().debug("location after re-teleport full {}".format(self.agent_state()))
return self.agent_state(agent_id=agent_id)
def known_good_locations_list(self):
assert (
self.all_metadata_available
), "`known_good_locations_list` cannot be called when `self.all_metadata_available` is `False`."
return self.scene_to_reachable_positions[self.scene_name]
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
self.controller.step(action="GetReachablePositions")
assert (
self.last_action_success
), f"Could not get reachable positions for reason {self.last_event.metadata['errorMessage']}."
return self.last_action_return
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"].replace("_physics", "")
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.controller.last_event.frame
@property
def current_depth(self) -> np.ndarray:
"""Returns depth image corresponding to the agent's egocentric view."""
return self.controller.last_event.depth_frame
@property
def current_frames(self) -> List[np.ndarray]:
"""Returns rgb images corresponding to the agents' egocentric views."""
return [
self.controller.last_event.events[agent_id].frame
for agent_id in range(self.agent_count)
]
@property
def current_depths(self) -> List[np.ndarray]:
"""Returns depth images corresponding to the agents' egocentric
views."""
return [
self.controller.last_event.events[agent_id].depth_frame
for agent_id in range(self.agent_count)
]
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self.controller.last_event.metadata["lastAction"]
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.controller.last_event.metadata["actionReturn"]
def step(
self,
action_dict: Optional[Dict[str, Union[str, int, float, Dict]]] = None,
**kwargs: Union[str, int, float, Dict],
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
if action_dict is None:
action_dict = dict()
action_dict.update(kwargs)
return self.controller.step(**action_dict)
def stop(self):
"""Stops the ai2thor controller."""
try:
self.controller.stop()
except Exception as e:
get_logger().warning(str(e))
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.controller.last_event.metadata["objects"]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
| allenact-main | allenact_plugins/robothor_plugin/robothor_environment.py |
MOVE_AHEAD = "MoveAhead"
ROTATE_LEFT = "RotateLeft"
ROTATE_RIGHT = "RotateRight"
LOOK_DOWN = "LookDown"
LOOK_UP = "LookUp"
END = "End"
PASS = "Pass"
| allenact-main | allenact_plugins/robothor_plugin/robothor_constants.py |
from typing import Tuple
import torch
from allenact.base_abstractions.distributions import CategoricalDistr, Distr
class TupleCategoricalDistr(Distr):
def __init__(self, probs=None, logits=None, validate_args=None):
self.dists = CategoricalDistr(
probs=probs, logits=logits, validate_args=validate_args
)
def log_prob(self, actions: Tuple[torch.LongTensor, ...]) -> torch.FloatTensor:
# flattened output [steps, samplers, num_agents]
return self.dists.log_prob(torch.stack(actions, dim=-1))
def entropy(self) -> torch.FloatTensor:
# flattened output [steps, samplers, num_agents]
return self.dists.entropy()
def sample(self, sample_shape=torch.Size()) -> Tuple[torch.LongTensor, ...]:
# split and remove trailing singleton dim
res = self.dists.sample(sample_shape).split(1, dim=-1)
return tuple([r.view(r.shape[:2]) for r in res])
def mode(self) -> Tuple[torch.LongTensor, ...]:
# split and remove trailing singleton dim
res = self.dists.mode().split(1, dim=-1)
return tuple([r.view(r.shape[:2]) for r in res])
| allenact-main | allenact_plugins/robothor_plugin/robothor_distributions.py |
from typing import Tuple, Optional
import gym
import torch
from gym.spaces import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
LinearActorCriticHead,
DistributionType,
Memory,
ObservationType,
)
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.embodiedai.models.basic_models import RNNStateEncoder, SimpleCNN
from allenact_plugins.robothor_plugin.robothor_distributions import (
TupleCategoricalDistr,
)
class TupleLinearActorCriticHead(LinearActorCriticHead):
def forward(self, x):
out = self.actor_and_critic(x)
logits = out[..., :-1]
values = out[..., -1:]
# noinspection PyArgumentList
return (
TupleCategoricalDistr(logits=logits), # [steps, samplers, ...]
values.view(*values.shape[:2], -1), # [steps, samplers, flattened]
)
class NavToPartnerActorCriticSimpleConvRNN(ActorCriticModel[TupleCategoricalDistr]):
action_space: gym.spaces.Tuple
def __init__(
self,
action_space: gym.spaces.Tuple,
observation_space: SpaceDict,
rgb_uuid: Optional[str] = "rgb",
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
self.rgb_uuid = rgb_uuid
self.visual_encoder = SimpleCNN(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=self.rgb_uuid,
depth_uuid=None,
)
self.state_encoder = RNNStateEncoder(
0 if self.is_blind else self.recurrent_hidden_state_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.actor_critic = TupleLinearActorCriticHead(
self._hidden_size, action_space[0].n
)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return self.visual_encoder.is_blind
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
@property
def num_agents(self):
return len(self.action_space)
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("agent", self.num_agents),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
else:
# TODO manage blindness for all agents simultaneously or separate?
raise NotImplementedError()
# TODO alternative where all agents consume all observations
x, rnn_hidden_states = self.state_encoder(
perception_embed, memory.tensor("rnn"), masks
)
dists, vals = self.actor_critic(x)
return (
ActorCriticOutput(distributions=dists, values=vals, extras={},),
memory.set_tensor("rnn", rnn_hidden_states),
)
| allenact-main | allenact_plugins/robothor_plugin/robothor_models.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"Cannot `import ai2thor`, please install `ai2thor` (`pip install ai2thor`)."
):
# noinspection PyUnresolvedReferences
import ai2thor
| allenact-main | allenact_plugins/robothor_plugin/__init__.py |
import math
from typing import Tuple, List, Dict, Any, Optional, Union, Sequence, cast
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.system import get_logger
from allenact.utils.tensor_utils import tile_images
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.robothor_plugin.robothor_constants import (
MOVE_AHEAD,
ROTATE_LEFT,
ROTATE_RIGHT,
END,
LOOK_UP,
LOOK_DOWN,
)
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
def spl_metric(
success: bool, optimal_distance: float, travelled_distance: float
) -> Optional[float]:
if not success:
return 0.0
elif optimal_distance < 0:
return None
elif optimal_distance == 0:
if travelled_distance == 0:
return 1.0
else:
return 0.0
else:
travelled_distance = max(travelled_distance, optimal_distance)
return optimal_distance / travelled_distance
class PointNavTask(Task[RoboThorEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END)
def __init__(
self,
env: RoboThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
reward_configs: Dict[str, Any],
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.reward_configs = reward_configs
self._took_end_action: bool = False
self._success: Optional[bool] = False
self.last_geodesic_distance = self.env.distance_to_point(
self.task_info["target"]
)
self.optimal_distance = self.last_geodesic_distance
self._rewards: List[float] = []
self._distance_to_goal: List[float] = []
self._metrics = None
self.path: List[
Any
] = [] # the initial coordinate will be directly taken from the optimal path
self.travelled_distance = 0.0
self.task_info["followed_path"] = [self.env.agent_state()]
self.task_info["action_names"] = self.action_names()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.env.step({"action": action_str})
self.last_action_success = self.env.last_action_success
pose = self.env.agent_state()
self.path.append({k: pose[k] for k in ["x", "y", "z"]})
self.task_info["followed_path"].append(pose)
if len(self.path) > 1:
self.travelled_distance += IThorEnvironment.position_dist(
p0=self.path[-1], p1=self.path[-2], ignore_y=True
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success, "action": action},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
if mode == "rgb":
return self.env.current_frame
elif mode == "depth":
return self.env.current_depth
def _is_goal_in_range(self) -> Optional[bool]:
tget = self.task_info["target"]
dist = self.dist_to_target()
if -0.5 < dist <= 0.2:
return True
elif dist > 0.2:
return False
else:
get_logger().debug(
"No path for {} from {} to {}".format(
self.env.scene_name, self.env.agent_state(), tget
)
)
return None
def shaping(self) -> float:
rew = 0.0
if self.reward_configs["shaping_weight"] == 0.0:
return rew
geodesic_distance = self.dist_to_target()
if geodesic_distance == -1.0:
geodesic_distance = self.last_geodesic_distance
if (
self.last_geodesic_distance > -0.5 and geodesic_distance > -0.5
): # (robothor limits)
rew += self.last_geodesic_distance - geodesic_distance
self.last_geodesic_distance = geodesic_distance
return rew * self.reward_configs["shaping_weight"]
def judge(self) -> float:
"""Judge the last event."""
reward = self.reward_configs["step_penalty"]
reward += self.shaping()
if self._took_end_action:
if self._success is not None:
reward += (
self.reward_configs["goal_success_reward"]
if self._success
else self.reward_configs["failed_stop_reward"]
)
elif self.num_steps_taken() + 1 >= self.max_steps:
reward += self.reward_configs.get("reached_max_steps_reward", 0.0)
self._rewards.append(float(reward))
return float(reward)
def dist_to_target(self):
return self.env.distance_to_point(self.task_info["target"])
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
total_reward = float(np.sum(self._rewards))
self._rewards = []
if self._success is None:
return {}
dist2tget = self.dist_to_target()
spl = spl_metric(
success=self._success,
optimal_distance=self.optimal_distance,
travelled_distance=self.travelled_distance,
)
metrics = {
**super(PointNavTask, self).metrics(),
"success": self._success, # False also if no path to target
"total_reward": total_reward,
"dist_to_target": dist2tget,
"spl": 0 if spl is None else spl,
}
return metrics
class ObjectNavTask(Task[RoboThorEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END, LOOK_UP, LOOK_DOWN)
def __init__(
self,
env: RoboThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
reward_configs: Dict[str, Any],
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.reward_configs = reward_configs
self._took_end_action: bool = False
self._success: Optional[bool] = False
self.mirror = task_info["mirrored"]
self._all_metadata_available = env.all_metadata_available
self._rewards: List[float] = []
self._distance_to_goal: List[float] = []
self._metrics = None
self.path: List = (
[]
) # the initial coordinate will be directly taken from the optimal path
self.travelled_distance = 0.0
self.task_info["followed_path"] = [self.env.agent_state()]
self.task_info["taken_actions"] = []
self.task_info["action_names"] = self.class_action_names()
if self._all_metadata_available:
self.last_geodesic_distance = self.env.distance_to_object_type(
self.task_info["object_type"]
)
self.optimal_distance = self.last_geodesic_distance
self.closest_geo_distance = self.last_geodesic_distance
self.last_expert_action: Optional[int] = None
self.last_action_success = False
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
action_str = self.class_action_names()[action]
if self.mirror:
if action_str == ROTATE_RIGHT:
action_str = ROTATE_LEFT
elif action_str == ROTATE_LEFT:
action_str = ROTATE_RIGHT
self.task_info["taken_actions"].append(action_str)
if action_str == END:
self._took_end_action = True
self._success = self._is_goal_in_range()
self.last_action_success = self._success
else:
self.env.step({"action": action_str})
self.last_action_success = self.env.last_action_success
pose = self.env.agent_state()
self.path.append({k: pose[k] for k in ["x", "y", "z"]})
self.task_info["followed_path"].append(pose)
if len(self.path) > 1:
self.travelled_distance += IThorEnvironment.position_dist(
p0=self.path[-1], p1=self.path[-2], ignore_y=True
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success, "action": action},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
if mode == "rgb":
frame = self.env.current_frame.copy()
elif mode == "depth":
frame = self.env.current_depth.copy()
else:
raise NotImplementedError(f"Mode '{mode}' is not supported.")
if self.mirror:
frame = frame[:, ::-1, :].copy() # horizontal flip
# print("mirrored render")
return frame
def _is_goal_in_range(self) -> bool:
return any(
o["objectType"] == self.task_info["object_type"]
for o in self.env.visible_objects()
)
def shaping(self) -> float:
rew = 0.0
if self.reward_configs["shaping_weight"] == 0.0:
return rew
geodesic_distance = self.env.distance_to_object_type(
self.task_info["object_type"]
)
# Ensuring the reward magnitude is not greater than the total distance moved
max_reward_mag = 0.0
if len(self.path) >= 2:
p0, p1 = self.path[-2:]
max_reward_mag = math.sqrt(
(p0["x"] - p1["x"]) ** 2 + (p0["z"] - p1["z"]) ** 2
)
if self.reward_configs.get("positive_only_reward", False):
if geodesic_distance > 0.5:
rew = max(self.closest_geo_distance - geodesic_distance, 0)
else:
if (
self.last_geodesic_distance > -0.5 and geodesic_distance > -0.5
): # (robothor limits)
rew += self.last_geodesic_distance - geodesic_distance
self.last_geodesic_distance = geodesic_distance
self.closest_geo_distance = min(self.closest_geo_distance, geodesic_distance)
return (
max(min(rew, max_reward_mag), -max_reward_mag,)
* self.reward_configs["shaping_weight"]
)
def judge(self) -> float:
"""Judge the last event."""
reward = self.reward_configs["step_penalty"]
reward += self.shaping()
if self._took_end_action:
if self._success:
reward += self.reward_configs["goal_success_reward"]
else:
reward += self.reward_configs["failed_stop_reward"]
elif self.num_steps_taken() + 1 >= self.max_steps:
reward += self.reward_configs.get("reached_max_steps_reward", 0.0)
self._rewards.append(float(reward))
return float(reward)
def get_observations(self, **kwargs) -> Any:
obs = self.sensor_suite.get_observations(env=self.env, task=self)
if self.mirror:
for o in obs:
if ("rgb" in o or "depth" in o) and isinstance(obs[o], np.ndarray):
if (
len(obs[o].shape) == 3
): # heuristic to determine this is a visual sensor
obs[o] = obs[o][:, ::-1, :].copy() # horizontal flip
elif len(obs[o].shape) == 2: # perhaps only two axes for depth?
obs[o] = obs[o][:, ::-1].copy() # horizontal flip
return obs
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
metrics = super(ObjectNavTask, self).metrics()
if self._all_metadata_available:
dist2tget = self.env.distance_to_object_type(self.task_info["object_type"])
spl = spl_metric(
success=self._success,
optimal_distance=self.optimal_distance,
travelled_distance=self.travelled_distance,
)
metrics = {
**metrics,
"success": self._success,
"total_reward": np.sum(self._rewards),
"dist_to_target": dist2tget,
"spl": 0 if spl is None else spl,
}
return metrics
def query_expert(self, end_action_only: bool = False, **kwargs) -> Tuple[int, bool]:
if self._is_goal_in_range():
return self.class_action_names().index(END), True
if end_action_only:
return 0, False
else:
try:
self.env.step(
{
"action": "ObjectNavExpertAction",
"objectType": self.task_info["object_type"],
}
)
except ValueError:
raise RuntimeError(
"Attempting to use the action `ObjectNavExpertAction` which is not supported by your version of"
" AI2-THOR. The action `ObjectNavExpertAction` is experimental. In order"
" to enable this action, please install the (in development) version of AI2-THOR. Through pip"
" this can be done with the command"
" `pip install -e git+https://github.com/allenai/ai2thor.git@7d914cec13aae62298f5a6a816adb8ac6946c61f#egg=ai2thor`."
)
if self.env.last_action_success:
expert_action: Optional[str] = self.env.last_event.metadata[
"actionReturn"
]
if isinstance(expert_action, str):
if self.mirror:
if expert_action == "RotateLeft":
expert_action = "RotateRight"
elif expert_action == "RotateRight":
expert_action = "RotateLeft"
return self.class_action_names().index(expert_action), True
else:
# This should have been caught by self._is_goal_in_range()...
return 0, False
else:
return 0, False
class NavToPartnerTask(Task[RoboThorEnvironment]):
_actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT)
def __init__(
self,
env: RoboThorEnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
reward_configs: Dict[str, Any],
**kwargs,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.reward_configs = reward_configs
assert self.env.agent_count == 2, "NavToPartnerTask only defined for 2 agents!"
pose1 = self.env.agent_state(0)
pose2 = self.env.agent_state(1)
self.last_geodesic_distance = self.env.distance_cache.find_distance(
self.env.scene_name,
{k: pose1[k] for k in ["x", "y", "z"]},
{k: pose2[k] for k in ["x", "y", "z"]},
self.env.distance_from_point_to_point,
)
self.task_info["followed_path1"] = [pose1]
self.task_info["followed_path2"] = [pose2]
self.task_info["action_names"] = self.class_action_names()
@property
def action_space(self):
return gym.spaces.Tuple(
[
gym.spaces.Discrete(len(self._actions)),
gym.spaces.Discrete(len(self._actions)),
]
)
def reached_terminal_state(self) -> bool:
return (
self.last_geodesic_distance <= self.reward_configs["max_success_distance"]
)
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def _step(self, action: Tuple[int, int]) -> RLStepResult:
assert isinstance(action, tuple)
action_str1 = self.class_action_names()[action[0]]
action_str2 = self.class_action_names()[action[1]]
self.env.step({"action": action_str1, "agentId": 0})
self.last_action_success1 = self.env.last_action_success
self.env.step({"action": action_str2, "agentId": 1})
self.last_action_success2 = self.env.last_action_success
pose1 = self.env.agent_state(0)
self.task_info["followed_path1"].append(pose1)
pose2 = self.env.agent_state(1)
self.task_info["followed_path2"].append(pose2)
self.last_geodesic_distance = self.env.distance_cache.find_distance(
self.env.scene_name,
{k: pose1[k] for k in ["x", "y", "z"]},
{k: pose2[k] for k in ["x", "y", "z"]},
self.env.distance_from_point_to_point,
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={
"last_action_success": [
self.last_action_success1,
self.last_action_success2,
],
"action": action,
},
)
return step_result
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode in ["rgb", "depth"], "only rgb and depth rendering is implemented"
if mode == "rgb":
return tile_images(self.env.current_frames)
elif mode == "depth":
return tile_images(self.env.current_depths)
def judge(self) -> float:
"""Judge the last event."""
reward = self.reward_configs["step_penalty"]
if self.reached_terminal_state():
reward += self.reward_configs["success_reward"]
return reward # reward shared by both agents (no shaping)
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
return {
**super().metrics(),
"success": self.reached_terminal_state(),
}
| allenact-main | allenact_plugins/robothor_plugin/robothor_tasks.py |
from typing import Any, Tuple, Optional
import ai2thor.controller
import gym
import numpy as np
import quaternion # noqa # pylint: disable=unused-import
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
THOR_ENV_TYPE,
THOR_TASK_TYPE,
)
from allenact_plugins.robothor_plugin.robothor_environment import RoboThorEnvironment
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask
class RGBSensorRoboThor(RGBSensorThor):
"""Sensor for RGB images in RoboTHOR.
Returns from a running RoboThorEnvironment instance, the current RGB
frame corresponding to the agent's egocentric view.
"""
def __init__(self, *args: Any, **kwargs: Any):
get_logger().warning(
"`RGBSensorRoboThor` is deprecated, use `RGBSensorThor` instead."
)
super().__init__(*args, **kwargs)
class RGBSensorMultiRoboThor(RGBSensor[RoboThorEnvironment, Task[RoboThorEnvironment]]):
"""Sensor for RGB images in RoboTHOR.
Returns from a running RoboThorEnvironment instance, the current RGB
frame corresponding to the agent's egocentric view.
"""
def __init__(self, agent_count: int = 2, **kwargs):
# TODO take all named args from superclass and pass with super().__init__(**prepare_locals_for_super(locals()))
super().__init__(**kwargs)
self.agent_count = agent_count
self.agent_id = 0
def frame_from_env(
self, env: RoboThorEnvironment, task: Optional[Task[RoboThorEnvironment]]
) -> np.ndarray:
return env.current_frames[self.agent_id].copy()
def get_observation(
self,
env: RoboThorEnvironment,
task: Task[RoboThorEnvironment],
*args: Any,
**kwargs: Any
) -> Any:
obs = []
for self.agent_id in range(self.agent_count):
obs.append(super().get_observation(env, task, *args, **kwargs))
return np.stack(obs, axis=0) # agents x width x height x channels
class GPSCompassSensorRoboThor(Sensor[RoboThorEnvironment, PointNavTask]):
def __init__(self, uuid: str = "target_coordinates_ind", **kwargs: Any):
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self):
return gym.spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
)
@staticmethod
def _compute_pointgoal(
source_position: np.ndarray,
source_rotation: np.quaternion,
goal_position: np.ndarray,
):
direction_vector = goal_position - source_position
direction_vector_agent = GPSCompassSensorRoboThor.quaternion_rotate_vector(
source_rotation.inverse(), direction_vector
)
rho, phi = GPSCompassSensorRoboThor.cartesian_to_polar(
direction_vector_agent[2], -direction_vector_agent[0]
)
return np.array([rho, phi], dtype=np.float32)
@staticmethod
def quaternion_from_y_angle(angle: float) -> np.quaternion:
r"""Creates a quaternion from rotation angle around y axis"""
return GPSCompassSensorRoboThor.quaternion_from_coeff(
np.array(
[0.0, np.sin(np.pi * angle / 360.0), 0.0, np.cos(np.pi * angle / 360.0)]
)
)
@staticmethod
def quaternion_from_coeff(coeffs: np.ndarray) -> np.quaternion:
r"""Creates a quaternions from coeffs in [x, y, z, w] format"""
quat = np.quaternion(0, 0, 0, 0)
quat.real = coeffs[3]
quat.imag = coeffs[0:3]
return quat
@staticmethod
def cartesian_to_polar(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return rho, phi
@staticmethod
def quaternion_rotate_vector(quat: np.quaternion, v: np.array) -> np.array:
r"""Rotates a vector by a quaternion
Args:
quat: The quaternion to rotate by
v: The vector to rotate
Returns:
np.array: The rotated vector
"""
vq = np.quaternion(0, 0, 0, 0)
vq.imag = v
return (quat * vq * quat.inverse()).imag
def get_observation(
self,
env: RoboThorEnvironment,
task: Optional[PointNavTask],
*args: Any,
**kwargs: Any
) -> Any:
agent_state = env.agent_state()
agent_position = np.array([agent_state[k] for k in ["x", "y", "z"]])
rotation_world_agent = self.quaternion_from_y_angle(
agent_state["rotation"]["y"]
)
goal_position = np.array([task.task_info["target"][k] for k in ["x", "y", "z"]])
return self._compute_pointgoal(
agent_position, rotation_world_agent, goal_position
)
class DepthSensorThor(DepthSensor[THOR_ENV_TYPE, THOR_TASK_TYPE,],):
def __init__(
self,
use_resnet_normalization: Optional[bool] = None,
use_normalization: Optional[bool] = None,
mean: Optional[np.ndarray] = np.array([[0.5]], dtype=np.float32),
stdev: Optional[np.ndarray] = np.array([[0.25]], dtype=np.float32),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "depth",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 1,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 5.0,
scale_first: bool = False,
**kwargs: Any
):
# Give priority to use_normalization, but use_resnet_normalization for backward compat. if not set
if use_resnet_normalization is not None and use_normalization is None:
use_normalization = use_resnet_normalization
elif use_normalization is None:
use_normalization = False
super().__init__(**prepare_locals_for_super(locals()))
def frame_from_env(
self, env: THOR_ENV_TYPE, task: Optional[THOR_TASK_TYPE]
) -> np.ndarray:
if not isinstance(env, ai2thor.controller.Controller):
return env.controller.last_event.depth_frame
return env.last_event.depth_frame
class DepthSensorRoboThor(DepthSensorThor):
# For backwards compatibility
def __init__(self, *args: Any, **kwargs: Any):
get_logger().warning(
"`DepthSensorRoboThor` is deprecated, use `DepthSensorThor` instead."
)
super().__init__(*args, **kwargs)
| allenact-main | allenact_plugins/robothor_plugin/robothor_sensors.py |
import copy
import json
import math
import os
from typing import Tuple, Sequence, Union, Dict, Optional, Any, cast, Generator, List
import cv2
import numpy as np
from PIL import Image, ImageDraw
from ai2thor.controller import Controller
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
import colour as col
from allenact.utils.system import get_logger
from allenact.utils.viz_utils import TrajectoryViz
ROBOTHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR = os.path.join(
os.path.expanduser("~"), ".allenact", "robothor", "top_down_viz_cache"
)
class ThorPositionTo2DFrameTranslator(object):
def __init__(
self,
frame_shape_rows_cols: Tuple[int, int],
cam_position: Sequence[float],
orth_size: float,
):
self.frame_shape = frame_shape_rows_cols
self.lower_left = np.array((cam_position[0], cam_position[2])) - orth_size
self.span = 2 * orth_size
def __call__(self, position: Sequence[float]):
if len(position) == 3:
x, _, z = position
else:
x, z = position
camera_position = (np.array((x, z)) - self.lower_left) / self.span
return np.array(
(
round(self.frame_shape[0] * (1.0 - camera_position[1])),
round(self.frame_shape[1] * camera_position[0]),
),
dtype=int,
)
class ThorViz(TrajectoryViz):
def __init__(
self,
path_to_trajectory: Sequence[str] = ("task_info", "followed_path"),
label: str = "thor_trajectory",
figsize: Tuple[float, float] = (8, 4), # width, height
fontsize: float = 10,
scenes: Union[
Tuple[str, int, int, int, int], Sequence[Tuple[str, int, int, int, int]]
] = ("FloorPlan_Val{}_{}", 1, 3, 1, 5),
viz_rows_cols: Tuple[int, int] = (448, 448),
single_color: bool = False,
view_triangle_only_on_last: bool = True,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
**kwargs
):
super().__init__(
path_to_trajectory=path_to_trajectory,
label=label,
figsize=figsize,
fontsize=fontsize,
**kwargs
)
if isinstance(scenes[0], str):
scenes = [
cast(Tuple[str, int, int, int, int], scenes)
] # make it list of tuples
self.scenes = cast(List[Tuple[str, int, int, int, int]], scenes)
self.room_path = ROBOTHOR_VIZ_CACHED_TOPDOWN_VIEWS_DIR
os.makedirs(self.room_path, exist_ok=True)
self.viz_rows_cols = viz_rows_cols
self.single_color = single_color
self.view_triangle_only_on_last = view_triangle_only_on_last
self.disable_view_triangle = disable_view_triangle
self.line_opacity = line_opacity
# Only needed for rendering
self.map_data: Optional[Dict[str, Any]] = None
self.thor_top_downs: Optional[Dict[str, np.ndarray]] = None
self.controller: Optional[Controller] = None
def init_top_down_render(self):
self.map_data = self.get_translator()
self.thor_top_downs = self.make_top_down_views()
# No controller needed after this point
if self.controller is not None:
self.controller.stop()
self.controller = None
@staticmethod
def iterate_scenes(
all_scenes: Sequence[Tuple[str, int, int, int, int]]
) -> Generator[str, None, None]:
for scenes in all_scenes:
for wall in range(scenes[1], scenes[2] + 1):
for furniture in range(scenes[3], scenes[4] + 1):
roomname = scenes[0].format(wall, furniture)
yield roomname
def cached_map_data_path(self, roomname: str) -> str:
return os.path.join(self.room_path, "map_data__{}.json".format(roomname))
def get_translator(self) -> Dict[str, Any]:
roomname = list(ThorViz.iterate_scenes(self.scenes))[0]
json_file = self.cached_map_data_path(roomname)
if not os.path.exists(json_file):
self.make_controller()
self.controller.reset(roomname)
map_data = self.get_agent_map_data()
get_logger().info("Dumping {}".format(json_file))
with open(json_file, "w") as f:
json.dump(map_data, f, indent=4, sort_keys=True)
else:
with open(json_file, "r") as f:
map_data = json.load(f)
pos_translator = ThorPositionTo2DFrameTranslator(
self.viz_rows_cols,
self.position_to_tuple(map_data["cam_position"]),
map_data["cam_orth_size"],
)
map_data["pos_translator"] = pos_translator
get_logger().debug("Using map_data {}".format(map_data))
return map_data
def cached_image_path(self, roomname: str) -> str:
return os.path.join(
self.room_path, "{}__r{}_c{}.png".format(roomname, *self.viz_rows_cols)
)
def make_top_down_views(self) -> Dict[str, np.ndarray]:
top_downs = {}
for roomname in self.iterate_scenes(self.scenes):
fname = self.cached_image_path(roomname)
if not os.path.exists(fname):
self.make_controller()
self.dump_top_down_view(roomname, fname)
top_downs[roomname] = cv2.imread(fname)
return top_downs
def crop_viz_image(self, viz_image: np.ndarray) -> np.ndarray:
# Top-down view of room spans vertically near the center of the frame in RoboTHOR:
y_min = int(self.viz_rows_cols[0] * 0.3)
y_max = int(self.viz_rows_cols[0] * 0.8)
# But it covers approximately the entire width:
x_min = 0
x_max = self.viz_rows_cols[1]
cropped_viz_image = viz_image[y_min:y_max, x_min:x_max, :]
return cropped_viz_image
def make_controller(self):
if self.controller is None:
self.controller = Controller()
self.controller.step({"action": "ChangeQuality", "quality": "Very High"})
self.controller.step(
{
"action": "ChangeResolution",
"x": self.viz_rows_cols[1],
"y": self.viz_rows_cols[0],
}
)
def get_agent_map_data(self):
self.controller.step({"action": "ToggleMapView"})
cam_position = self.controller.last_event.metadata["cameraPosition"]
cam_orth_size = self.controller.last_event.metadata["cameraOrthSize"]
to_return = {
"cam_position": cam_position,
"cam_orth_size": cam_orth_size,
}
self.controller.step({"action": "ToggleMapView"})
return to_return
@staticmethod
def position_to_tuple(position: Dict[str, float]) -> Tuple[float, float, float]:
return position["x"], position["y"], position["z"]
@staticmethod
def add_lines_to_map(
ps: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if len(ps) <= 1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
for i in range(len(ps) - 1):
draw.line(
tuple(reversed(pos_translator(ps[i])))
+ tuple(reversed(pos_translator(ps[i + 1]))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_line_to_map(
p0: Any,
p1: Any,
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
opacity: float,
color: Optional[Tuple[int, ...]] = None,
) -> np.ndarray:
if p0 == p1:
return frame
if color is None:
color = (255, 0, 0)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
draw.line(
tuple(reversed(pos_translator(p0))) + tuple(reversed(pos_translator(p1))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def add_agent_view_triangle(
position: Any,
rotation: Dict[str, float],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
scale: float = 1.0,
opacity: float = 0.1,
) -> np.ndarray:
p0 = np.array((position[0], position[2]))
p1 = copy.copy(p0)
p2 = copy.copy(p0)
theta = -2 * math.pi * (rotation["y"] / 360.0)
rotation_mat = np.array(
[[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]
)
offset1 = scale * np.array([-1 / 2.0, 1])
offset2 = scale * np.array([1 / 2.0, 1])
p1 += np.matmul(rotation_mat, offset1)
p2 += np.matmul(rotation_mat, offset2)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
points = [tuple(reversed(pos_translator(p))) for p in [p0, p1, p2]]
draw = ImageDraw.Draw(img2)
draw.polygon(points, fill=(255, 255, 255, opacity))
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
@staticmethod
def visualize_agent_path(
positions: Sequence[Any],
frame: np.ndarray,
pos_translator: ThorPositionTo2DFrameTranslator,
single_color: bool = False,
view_triangle_only_on_last: bool = False,
disable_view_triangle: bool = False,
line_opacity: float = 1.0,
trajectory_start_end_color_str: Tuple[str, str] = ("red", "green"),
) -> np.ndarray:
if single_color:
frame = ThorViz.add_lines_to_map(
list(map(ThorViz.position_to_tuple, positions)),
frame,
pos_translator,
line_opacity,
tuple(
map(
lambda x: int(round(255 * x)),
col.Color(trajectory_start_end_color_str[0]).rgb,
)
),
)
else:
if len(positions) > 1:
colors = list(
col.Color(trajectory_start_end_color_str[0]).range_to(
col.Color(trajectory_start_end_color_str[1]), len(positions) - 1
)
)
for i in range(len(positions) - 1):
frame = ThorViz.add_line_to_map(
ThorViz.position_to_tuple(positions[i]),
ThorViz.position_to_tuple(positions[i + 1]),
frame,
pos_translator,
opacity=line_opacity,
color=tuple(map(lambda x: int(round(255 * x)), colors[i].rgb)),
)
if view_triangle_only_on_last:
positions = [positions[-1]]
if disable_view_triangle:
positions = []
for position in positions:
frame = ThorViz.add_agent_view_triangle(
ThorViz.position_to_tuple(position),
rotation=position["rotation"],
frame=frame,
pos_translator=pos_translator,
opacity=0.05 + view_triangle_only_on_last * 0.2,
)
return frame
def dump_top_down_view(self, room_name: str, image_path: str):
get_logger().debug("Dumping {}".format(image_path))
self.controller.reset(room_name)
self.controller.step(
{"action": "Initialize", "gridSize": 0.1, "makeAgentsVisible": False}
)
self.controller.step({"action": "ToggleMapView"})
top_down_view = self.controller.last_event.cv2img
cv2.imwrite(image_path, top_down_view)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
trajectory: Sequence[Dict[str, Any]] = self._access(
episode, self.path_to_trajectory
)
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:3])
im = self.visualize_agent_path(
trajectory,
self.thor_top_downs[roomname],
self.map_data["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
class ThorMultiViz(ThorViz):
def __init__(
self,
path_to_trajectory_prefix: Sequence[str] = ("task_info", "followed_path"),
agent_suffixes: Sequence[str] = ("1", "2"),
label: str = "thor_trajectories",
trajectory_start_end_color_strs: Sequence[Tuple[str, str]] = (
("red", "green"),
("cyan", "purple"),
),
**kwargs
):
super().__init__(label=label, **kwargs)
self.path_to_trajectory_prefix = list(path_to_trajectory_prefix)
self.agent_suffixes = list(agent_suffixes)
self.trajectory_start_end_color_strs = list(trajectory_start_end_color_strs)
def make_fig(self, episode: Any, episode_id: str) -> Figure:
if self.thor_top_downs is None:
self.init_top_down_render()
roomname = "_".join(episode_id.split("_")[:3])
im = self.thor_top_downs[roomname]
for agent, start_end_color in zip(
self.agent_suffixes, self.trajectory_start_end_color_strs
):
path = self.path_to_trajectory_prefix[:]
path[-1] = path[-1] + agent
trajectory = self._access(episode, path)
im = self.visualize_agent_path(
trajectory,
im,
self.map_data["pos_translator"],
single_color=self.single_color,
view_triangle_only_on_last=self.view_triangle_only_on_last,
disable_view_triangle=self.disable_view_triangle,
line_opacity=self.line_opacity,
trajectory_start_end_color_str=start_end_color,
)
fig, ax = plt.subplots(figsize=self.figsize)
ax.set_title(episode_id, fontsize=self.fontsize)
ax.imshow(self.crop_viz_image(im)[:, :, ::-1])
ax.axis("off")
return fig
| allenact-main | allenact_plugins/robothor_plugin/robothor_viz.py |
allenact-main | allenact_plugins/robothor_plugin/configs/__init__.py |
|
import gzip
import json
import os
from typing import Sequence, Optional
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
ObjectNavDatasetTaskSampler,
)
def create_debug_dataset_from_train_dataset(
scene: str,
target_object_type: Optional[str],
episodes_subset: Sequence[int],
train_dataset_path: str,
base_debug_output_path: str,
):
downloaded_episodes = os.path.join(
train_dataset_path, "episodes", scene + ".json.gz"
)
assert os.path.exists(downloaded_episodes), (
"'{}' doesn't seem to exist or is empty. Make sure you've downloaded to download the appropriate"
" training dataset with"
" datasets/download_navigation_datasets.sh".format(downloaded_episodes)
)
# episodes
episodes = ObjectNavDatasetTaskSampler.load_dataset(
scene=scene, base_directory=os.path.join(train_dataset_path, "episodes")
)
if target_object_type is not None:
ids = {
"{}_{}_{}".format(scene, target_object_type, epit)
for epit in episodes_subset
}
else:
ids = {"{}_{}".format(scene, epit) for epit in episodes_subset}
debug_episodes = [ep for ep in episodes if ep["id"] in ids]
assert len(ids) == len(debug_episodes), (
f"Number of input ids ({len(ids)}) does not equal"
f" number of output debug tasks ({len(debug_episodes)})"
)
# sort by episode_ids
debug_episodes = [
idep[1]
for idep in sorted(
[(int(ep["id"].split("_")[-1]), ep) for ep in debug_episodes],
key=lambda x: x[0],
)
]
assert len(debug_episodes) == len(episodes_subset)
episodes_dir = os.path.join(base_debug_output_path, "episodes")
os.makedirs(episodes_dir, exist_ok=True)
episodes_file = os.path.join(episodes_dir, scene + ".json.gz")
json_str = json.dumps(debug_episodes)
json_bytes = json_str.encode("utf-8")
with gzip.GzipFile(episodes_file, "w") as fout:
fout.write(json_bytes)
assert os.path.exists(episodes_file)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan_Train1_1"
TARGET = "Television"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "robothor-objectnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=TARGET,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "robothor-objectnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| allenact-main | allenact_plugins/robothor_plugin/scripts/make_objectnav_debug_dataset.py |
allenact-main | allenact_plugins/robothor_plugin/scripts/__init__.py |
|
import os
from allenact_plugins.robothor_plugin.scripts.make_objectnav_debug_dataset import (
create_debug_dataset_from_train_dataset,
)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan_Train1_1"
EPISODES = [3, 4, 5, 6]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "robothor-pointnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=None,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "robothor-pointnav", "train"
),
base_debug_output_path=BASE_OUT,
)
| allenact-main | allenact_plugins/robothor_plugin/scripts/make_pointnav_debug_dataset.py |
import random
from typing import Tuple, Any, List, Dict, Optional, Union, Callable, Sequence, cast
import gym
import networkx as nx
import numpy as np
from gym.utils import seeding
from gym_minigrid.envs import CrossingEnv
from gym_minigrid.minigrid import (
DIR_TO_VEC,
IDX_TO_OBJECT,
MiniGridEnv,
OBJECT_TO_IDX,
)
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.system import get_logger
from allenact_plugins.minigrid_plugin.minigrid_environments import (
AskForHelpSimpleCrossing,
)
class MiniGridTask(Task[CrossingEnv]):
_ACTION_NAMES: Tuple[str, ...] = ("left", "right", "forward")
_ACTION_IND_TO_MINIGRID_IND = tuple(
MiniGridEnv.Actions.__members__[name].value for name in _ACTION_NAMES
)
_CACHED_GRAPHS: Dict[str, nx.DiGraph] = {}
_NEIGHBOR_OFFSETS = tuple(
[(-1, 0, 0), (0, -1, 0), (0, 0, -1), (1, 0, 0), (0, 1, 0), (0, 0, 1),]
)
_XY_DIFF_TO_AGENT_DIR = {
tuple(vec): dir_ind for dir_ind, vec in enumerate(DIR_TO_VEC)
}
""" Task around a MiniGrid Env, allows interfacing allenact with
MiniGrid tasks. (currently focussed towards LavaCrossing)
"""
def __init__(
self,
env: Union[CrossingEnv],
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
task_cache_uid: Optional[str] = None,
corrupt_expert_within_actions_of_goal: Optional[int] = None,
**kwargs,
):
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._graph: Optional[nx.DiGraph] = None
self._minigrid_done = False
self._task_cache_uid = task_cache_uid
self.corrupt_expert_within_actions_of_goal = (
corrupt_expert_within_actions_of_goal
)
self.closest_agent_has_been_to_goal: Optional[float] = None
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(len(self._ACTION_NAMES))
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
return self.env.render(mode=mode)
def _step(self, action: int) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
minigrid_obs, reward, self._minigrid_done, info = self.env.step(
action=self._ACTION_IND_TO_MINIGRID_IND[action]
)
# self.env.render()
return RLStepResult(
observation=self.get_observations(minigrid_output_obs=minigrid_obs),
reward=reward,
done=self.is_done(),
info=info,
)
def get_observations(
self, *args, minigrid_output_obs: Optional[Dict[str, Any]] = None, **kwargs
) -> Any:
return self.sensor_suite.get_observations(
env=self.env, task=self, minigrid_output_obs=minigrid_output_obs
)
def reached_terminal_state(self) -> bool:
return self._minigrid_done
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._ACTION_NAMES
def close(self) -> None:
pass
def metrics(self) -> Dict[str, Any]:
# noinspection PyUnresolvedReferences,PyCallingNonCallable
env_metrics = self.env.metrics() if hasattr(self.env, "metrics") else {}
return {
**super(MiniGridTask, self).metrics(),
**{k: float(v) for k, v in env_metrics.items()},
"success": int(
self.env.was_successful
if hasattr(self.env, "was_successful")
else self.cumulative_reward > 0
),
}
@property
def graph_created(self):
return self._graph is not None
@property
def graph(self):
if self._graph is None:
if self._task_cache_uid is not None:
if self._task_cache_uid not in self._CACHED_GRAPHS:
self._CACHED_GRAPHS[self._task_cache_uid] = self.generate_graph()
self._graph = self._CACHED_GRAPHS[self._task_cache_uid]
else:
self._graph = self.generate_graph()
return self._graph
@graph.setter
def graph(self, graph: nx.DiGraph):
self._graph = graph
@classmethod
def possible_neighbor_offsets(cls) -> Tuple[Tuple[int, int, int], ...]:
# Tuples of format:
# (X translation, Y translation, rotation by 90 degrees)
# A constant is returned, this function can be changed if anything
# more complex needs to be done.
# offsets_superset = itertools.product(
# [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]
# )
#
# valid_offsets = []
# for off in offsets_superset:
# if (int(off[0] != 0) + int(off[1] != 0) + int(off[2] != 0)) == 1:
# valid_offsets.append(off)
#
# return tuple(valid_offsets)
return cls._NEIGHBOR_OFFSETS
@classmethod
def _add_from_to_edge(
cls, g: nx.DiGraph, s: Tuple[int, int, int], t: Tuple[int, int, int],
):
"""Adds nodes and corresponding edges to existing nodes.
This approach avoids adding the same edge multiple times.
Pre-requisite knowledge about MiniGrid:
DIR_TO_VEC = [
# Pointing right (positive X)
np.array((1, 0)),
# Down (positive Y)
np.array((0, 1)),
# Pointing left (negative X)
np.array((-1, 0)),
# Up (negative Y)
np.array((0, -1)),
]
or
AGENT_DIR_TO_STR = {
0: '>',
1: 'V',
2: '<',
3: '^'
}
This also implies turning right (clockwise) means:
agent_dir += 1
"""
s_x, s_y, s_rot = s
t_x, t_y, t_rot = t
x_diff = t_x - s_x
y_diff = t_y - s_y
angle_diff = (t_rot - s_rot) % 4
# If source and target differ by more than one action, continue
if (x_diff != 0) + (y_diff != 0) + (angle_diff != 0) != 1 or angle_diff == 2:
return
action = None
if angle_diff == 1:
action = "right"
elif angle_diff == 3:
action = "left"
elif cls._XY_DIFF_TO_AGENT_DIR[(x_diff, y_diff)] == s_rot:
# if translation is the same direction as source
# orientation, then it's a valid forward action
action = "forward"
else:
# This is when the source and target aren't one action
# apart, despite having dx=1 or dy=1
pass
if action is not None:
g.add_edge(s, t, action=action)
def _add_node_to_graph(
self,
graph: nx.DiGraph,
s: Tuple[int, int, int],
valid_node_types: Tuple[str, ...],
attr_dict: Dict[Any, Any] = None,
include_rotation_free_leaves: bool = False,
):
if s in graph:
return
if attr_dict is None:
get_logger().warning("adding a node with neighbor checks and no attributes")
graph.add_node(s, **attr_dict)
if include_rotation_free_leaves:
rot_free_leaf = (*s[:-1], None)
if rot_free_leaf not in graph:
graph.add_node(rot_free_leaf)
graph.add_edge(s, rot_free_leaf, action="NA")
if attr_dict["type"] in valid_node_types:
for o in self.possible_neighbor_offsets():
t = (s[0] + o[0], s[1] + o[1], (s[2] + o[2]) % 4)
if t in graph and graph.nodes[t]["type"] in valid_node_types:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
def generate_graph(self,) -> nx.DiGraph:
"""The generated graph is based on the fully observable grid (as the
expert sees it all).
env: environment to generate the graph over
"""
image = self.env.grid.encode()
width, height, _ = image.shape
graph = nx.DiGraph()
# In fully observable grid, there shouldn't be any "unseen"
# Currently dealing with "empty", "wall", "goal", "lava"
valid_object_ids = np.sort(
[OBJECT_TO_IDX[o] for o in ["empty", "wall", "lava", "goal"]]
)
assert np.all(np.union1d(image[:, :, 0], valid_object_ids) == valid_object_ids)
# Grid to nodes
for x in range(width):
for y in range(height):
for rotation in range(4):
type, color, state = image[x, y]
self._add_node_to_graph(
graph,
(x, y, rotation),
attr_dict={
"type": IDX_TO_OBJECT[type],
"color": color,
"state": state,
},
valid_node_types=("empty", "goal"),
)
if IDX_TO_OBJECT[type] == "goal":
if not graph.has_node("unified_goal"):
graph.add_node("unified_goal")
graph.add_edge((x, y, rotation), "unified_goal")
return graph
def query_expert(self, **kwargs) -> Tuple[int, bool]:
if self._minigrid_done:
get_logger().warning("Episode is completed, but expert is still queried.")
return -1, False
paths = []
agent_x, agent_y = self.env.agent_pos
agent_rot = self.env.agent_dir
source_state_key = (agent_x, agent_y, agent_rot)
assert source_state_key in self.graph
paths.append(nx.shortest_path(self.graph, source_state_key, "unified_goal"))
if len(paths) == 0:
return -1, False
shortest_path_ind = int(np.argmin([len(p) for p in paths]))
if self.closest_agent_has_been_to_goal is None:
self.closest_agent_has_been_to_goal = len(paths[shortest_path_ind]) - 1
else:
self.closest_agent_has_been_to_goal = min(
len(paths[shortest_path_ind]) - 1, self.closest_agent_has_been_to_goal
)
if (
self.corrupt_expert_within_actions_of_goal is not None
and self.corrupt_expert_within_actions_of_goal
>= self.closest_agent_has_been_to_goal
):
return (
int(self.env.np_random.randint(0, len(self.class_action_names()))),
True,
)
if len(paths[shortest_path_ind]) == 2:
# Since "unified_goal" is 1 step away from actual goals
# if a path like [actual_goal, unified_goal] exists, then
# you are already at a goal.
get_logger().warning(
"Shortest path computations suggest we are at"
" the target but episode does not think so."
)
return -1, False
next_key_on_shortest_path = paths[shortest_path_ind][1]
return (
self.class_action_names().index(
self.graph.get_edge_data(source_state_key, next_key_on_shortest_path)[
"action"
]
),
True,
)
class AskForHelpSimpleCrossingTask(MiniGridTask):
_ACTION_NAMES = ("left", "right", "forward", "toggle")
_ACTION_IND_TO_MINIGRID_IND = tuple(
MiniGridEnv.Actions.__members__[name].value for name in _ACTION_NAMES
)
_CACHED_GRAPHS: Dict[str, nx.DiGraph] = {}
def __init__(
self,
env: AskForHelpSimpleCrossing,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
max_steps: int,
**kwargs,
):
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self.did_toggle: List[bool] = []
def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:
assert isinstance(action, int)
action = cast(int, action)
self.did_toggle.append(self._ACTION_NAMES[action] == "toggle")
return super(AskForHelpSimpleCrossingTask, self)._step(action=action)
def metrics(self) -> Dict[str, Any]:
return {
**super(AskForHelpSimpleCrossingTask, self).metrics(),
"toggle_percent": float(
sum(self.did_toggle) / max(len(self.did_toggle), 1)
),
}
class MiniGridTaskSampler(TaskSampler):
def __init__(
self,
env_class: Callable[..., Union[MiniGridEnv]],
sensors: Union[SensorSuite, List[Sensor]],
env_info: Optional[Dict[str, Any]] = None,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
cache_graphs: Optional[bool] = False,
task_class: Callable[..., MiniGridTask] = MiniGridTask,
repeat_failed_task_for_min_steps: int = 0,
extra_task_kwargs: Optional[Dict] = None,
**kwargs,
):
super(MiniGridTaskSampler, self).__init__()
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_tasks = max_tasks
self.num_unique_seeds = num_unique_seeds
self.cache_graphs = cache_graphs
self.deterministic_sampling = deterministic_sampling
self.repeat_failed_task_for_min_steps = repeat_failed_task_for_min_steps
self.extra_task_kwargs = (
extra_task_kwargs if extra_task_kwargs is not None else {}
)
self._last_env_seed: Optional[int] = None
self._last_task: Optional[MiniGridTask] = None
self._number_of_steps_taken_with_task_seed = 0
assert (not deterministic_sampling) or repeat_failed_task_for_min_steps <= 0, (
"If `deterministic_sampling` is True then we require"
" `repeat_failed_task_for_min_steps <= 0`"
)
assert (not self.cache_graphs) or self.num_unique_seeds is not None, (
"When caching graphs you must specify"
" a number of unique tasks to sample from."
)
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
if num_unique_seeds is not None and repeat_failed_task_for_min_steps > 0:
raise NotImplementedError(
"`repeat_failed_task_for_min_steps` must be <=0 if number"
" of unique seeds is not None."
)
assert (
not self.cache_graphs
) or self.num_unique_seeds <= 1000, "Too many tasks (graphs) to cache"
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
self.env = env_class(**env_info)
self.task_class = task_class
self.np_seeded_random_gen, _ = seeding.np_random(random.randint(0, 2 ** 31 - 1))
self.num_tasks_generated = 0
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None if self.num_unique_seeds is None else self.num_unique_seeds
@property
def last_sampled_task(self) -> Optional[Task]:
raise NotImplementedError
def next_task(self, force_advance_scene: bool = False) -> Optional[MiniGridTask]:
if self.length <= 0:
return None
task_cache_uid = None
repeating = False
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
self._last_env_seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
self._last_env_seed = self.np_seeded_random_gen.choice(
self.task_seeds_list
)
else:
if self._last_task is not None:
self._number_of_steps_taken_with_task_seed += (
self._last_task.num_steps_taken()
)
if (
self._last_env_seed is not None
and self._number_of_steps_taken_with_task_seed
< self.repeat_failed_task_for_min_steps
and self._last_task.cumulative_reward == 0
):
repeating = True
else:
self._number_of_steps_taken_with_task_seed = 0
self._last_env_seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
task_has_same_seed_reset = hasattr(self.env, "same_seed_reset")
if self.cache_graphs:
task_cache_uid = str(self._last_env_seed)
if repeating and task_has_same_seed_reset:
# noinspection PyUnresolvedReferences
self.env.same_seed_reset()
else:
self.env.seed(self._last_env_seed)
self.env.saved_seed = self._last_env_seed
self.env.reset()
self.num_tasks_generated += 1
task = self.task_class(
**dict(
env=self.env,
sensors=self.sensors,
task_info={},
max_steps=self.env.max_steps,
task_cache_uid=task_cache_uid,
),
**self.extra_task_kwargs,
)
if repeating and self._last_task.graph_created:
task.graph = self._last_task.graph
self._last_task = task
return task
def close(self) -> None:
self.env.close()
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.env.reset()
def set_seed(self, seed: int) -> None:
self.np_seeded_random_gen, _ = seeding.np_random(seed)
| allenact-main | allenact_plugins/minigrid_plugin/minigrid_tasks.py |
import copy
from typing import Optional, Set
import numpy as np
from gym import register
from gym_minigrid.envs import CrossingEnv
from gym_minigrid.minigrid import Lava, Wall
class FastCrossing(CrossingEnv):
"""Similar to `CrossingEnv`, but to support faster task sampling as per
`repeat_failed_task_for_min_steps` flag in MiniGridTaskSampler."""
def __init__(self, size=9, num_crossings=1, obstacle_type=Lava, seed=None):
self.init_agent_pos: Optional[np.ndarray] = None
self.init_agent_dir: Optional[int] = None
self.step_count: Optional[int] = None
super(FastCrossing, self).__init__(
size=size,
num_crossings=num_crossings,
obstacle_type=obstacle_type,
seed=seed,
)
def same_seed_reset(self):
assert self.init_agent_pos is not None
# Current position and direction of the agent
self.agent_pos = self.init_agent_pos
self.agent_dir = self.init_agent_dir
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
assert self.carrying is None
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def reset(self, partial_reset: bool = False):
super(FastCrossing, self).reset()
self.init_agent_pos = copy.deepcopy(self.agent_pos)
self.init_agent_dir = self.agent_dir
class AskForHelpSimpleCrossing(CrossingEnv):
"""Corresponds to WC FAULTY SWITCH environment."""
def __init__(
self,
size=9,
num_crossings=1,
obstacle_type=Wall,
seed=None,
exploration_reward: Optional[float] = None,
death_penalty: Optional[float] = None,
toggle_is_permenant: bool = False,
):
self.init_agent_pos: Optional[np.ndarray] = None
self.init_agent_dir: Optional[int] = None
self.should_reveal_image: bool = False
self.exploration_reward = exploration_reward
self.death_penalty = death_penalty
self.explored_points: Set = set()
self._was_successful = False
self.toggle_is_permanent = toggle_is_permenant
self.step_count: Optional[int] = None
super(AskForHelpSimpleCrossing, self).__init__(
size=size,
num_crossings=num_crossings,
obstacle_type=obstacle_type,
seed=seed,
)
@property
def was_successful(self) -> bool:
return self._was_successful
def gen_obs(self):
obs = super(AskForHelpSimpleCrossing, self).gen_obs()
if not self.should_reveal_image:
obs["image"] *= 0
return obs
def metrics(self):
return {
"explored_count": len(self.explored_points),
"final_distance": float(
min(
abs(x - (self.width - 2)) + abs(y - (self.height - 2))
for x, y in self.explored_points
)
),
}
def step(self, action: int):
"""Reveal the observation only if the `toggle` action is executed."""
if action == self.actions.toggle:
self.should_reveal_image = True
else:
self.should_reveal_image = (
self.should_reveal_image and self.toggle_is_permanent
)
minigrid_obs, reward, done, info = super(AskForHelpSimpleCrossing, self).step(
action=action
)
assert not self._was_successful, "Called step after done."
self._was_successful = self._was_successful or (reward > 0)
if (
done
and self.steps_remaining != 0
and (not self._was_successful)
and self.death_penalty is not None
):
reward += self.death_penalty
t = tuple(self.agent_pos)
if self.exploration_reward is not None:
if t not in self.explored_points:
reward += self.exploration_reward
self.explored_points.add(t)
return minigrid_obs, reward, done, info
def same_seed_reset(self):
assert self.init_agent_pos is not None
self._was_successful = False
# Current position and direction of the agent
self.agent_pos = self.init_agent_pos
self.agent_dir = self.init_agent_dir
self.explored_points.clear()
self.explored_points.add(tuple(self.agent_pos))
self.should_reveal_image = False
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
assert self.carrying is None
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def reset(self, partial_reset: bool = False):
super(AskForHelpSimpleCrossing, self).reset()
self.explored_points.clear()
self.explored_points.add(tuple(self.agent_pos))
self.init_agent_pos = copy.deepcopy(self.agent_pos)
self.init_agent_dir = self.agent_dir
self._was_successful = False
self.should_reveal_image = False
class LavaCrossingS25N10(CrossingEnv):
def __init__(self):
super(LavaCrossingS25N10, self).__init__(size=25, num_crossings=10)
class LavaCrossingS15N7(CrossingEnv):
def __init__(self):
super(LavaCrossingS15N7, self).__init__(size=15, num_crossings=7)
class LavaCrossingS11N7(CrossingEnv):
def __init__(self):
super(LavaCrossingS11N7, self).__init__(size=9, num_crossings=4)
register(
id="MiniGrid-LavaCrossingS25N10-v0",
entry_point="allenact_plugins.minigrid_plugin.minigrid_environments:LavaCrossingS25N10",
)
register(
id="MiniGrid-LavaCrossingS15N7-v0",
entry_point="allenact_plugins.minigrid_plugin.minigrid_environments:LavaCrossingS15N7",
)
register(
id="MiniGrid-LavaCrossingS11N7-v0",
entry_point="allenact_plugins.minigrid_plugin.minigrid_environments:LavaCrossingS11N7",
)
| allenact-main | allenact_plugins/minigrid_plugin/minigrid_environments.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"\n\nPlease install babyai with:\n\n"
"pip install -e git+https://github.com/Lucaweihs/babyai.git@0b450eeb3a2dc7116c67900d51391986bdbb84cd#egg=babyai\n",
):
import babyai
| allenact-main | allenact_plugins/minigrid_plugin/__init__.py |
import math
import queue
import random
from collections import defaultdict
from typing import Dict, Tuple, Any, cast, List, Union, Optional
import babyai
import blosc
import numpy as np
import pickle5 as pickle
import torch
from gym_minigrid.minigrid import MiniGridEnv
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import Memory
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.algorithms.onpolicy_sync.storage import (
ExperienceStorage,
StreamingStorageMixin,
)
from allenact.base_abstractions.misc import GenericAbstractLoss, LossOutput, ModelType
from allenact.utils.misc_utils import partition_limits
from allenact.utils.system import get_logger
from allenact_plugins.minigrid_plugin.minigrid_sensors import MiniGridMissionSensor
_DATASET_CACHE: Dict[str, Any] = {}
class MiniGridOffPolicyExpertCELoss(GenericAbstractLoss):
def __init__(self, total_episodes_in_epoch: Optional[int] = None):
super().__init__()
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss( # type: ignore
self,
*, # No positional arguments
model: ModelType,
batch: ObservationType,
batch_memory: Memory,
stream_memory: Memory,
) -> LossOutput:
rollout_len, nrollouts = cast(torch.Tensor, batch["minigrid_ego_image"]).shape[
:2
]
# Initialize Memory if empty
if len(stream_memory) == 0:
spec = model.recurrent_memory_specification
for key in spec:
dims_template, dtype = spec[key]
# get sampler_dim and all_dims from dims_template (and nrollouts)
dim_names = [d[0] for d in dims_template]
sampler_dim = dim_names.index("sampler")
all_dims = [d[1] for d in dims_template]
all_dims[sampler_dim] = nrollouts
stream_memory.check_append(
key=key,
tensor=torch.zeros(
*all_dims,
dtype=dtype,
device=cast(torch.Tensor, batch["minigrid_ego_image"]).device,
),
sampler_dim=sampler_dim,
)
# Forward data (through the actor and critic)
ac_out, stream_memory = model.forward(
observations=batch,
memory=stream_memory,
prev_actions=None, # type:ignore
masks=cast(torch.FloatTensor, batch["masks"]),
)
# Compute the loss from the actor's output and expert action
expert_ce_loss = -ac_out.distributions.log_prob(batch["expert_action"]).mean()
info = {"expert_ce": expert_ce_loss.item()}
return LossOutput(
value=expert_ce_loss,
info=info,
per_epoch_info={},
batch_memory=batch_memory,
stream_memory=stream_memory,
bsize=rollout_len * nrollouts,
)
def transform_demos(demos):
# A modified version of babyai.utils.demos.transform_demos
# where we use pickle 5 instead of standard pickle
new_demos = []
for demo in demos:
new_demo = []
mission = demo[0]
all_images = demo[1]
directions = demo[2]
actions = demo[3]
# First decompress the pickle
pickled_array = blosc.blosc_extension.decompress(all_images, False)
# ... and unpickle
all_images = pickle.loads(pickled_array)
n_observations = all_images.shape[0]
assert (
len(directions) == len(actions) == n_observations
), "error transforming demos"
for i in range(n_observations):
obs = {
"image": all_images[i],
"direction": directions[i],
"mission": mission,
}
action = actions[i]
done = i == n_observations - 1
new_demo.append((obs, action, done))
new_demos.append(new_demo)
return new_demos
class MiniGridExpertTrajectoryStorage(ExperienceStorage, StreamingStorageMixin):
def __init__(
self,
data_path: str,
num_samplers: int,
rollout_len: int,
instr_len: Optional[int],
restrict_max_steps_in_dataset: Optional[int] = None,
device: torch.device = torch.device("cpu"),
):
super(MiniGridExpertTrajectoryStorage, self).__init__()
self.data_path = data_path
self._data: Optional[
List[Tuple[str, bytes, List[int], MiniGridEnv.Actions]]
] = None
self.restrict_max_steps_in_dataset = restrict_max_steps_in_dataset
self.original_num_samplers = num_samplers
self.num_samplers = num_samplers
self.rollout_len = rollout_len
self.instr_len = instr_len
self.current_worker = 0
self.num_workers = 1
self.minigrid_mission_sensor: Optional[MiniGridMissionSensor] = None
if instr_len is not None:
self.minigrid_mission_sensor = MiniGridMissionSensor(instr_len)
self.rollout_queues = []
self._remaining_inds = []
self.sampler_to_num_steps_in_queue = []
self._total_experiences = 0
self.device = device
@property
def data(self) -> List[Tuple[str, bytes, List[int], MiniGridEnv.Actions]]:
if self._data is None:
if self.data_path not in _DATASET_CACHE:
get_logger().info(
f"Loading minigrid dataset from {self.data_path} for first time..."
)
_DATASET_CACHE[self.data_path] = babyai.utils.load_demos(self.data_path)
assert (
_DATASET_CACHE[self.data_path] is not None
and len(_DATASET_CACHE[self.data_path]) != 0
)
get_logger().info(
"Loading minigrid dataset complete, it contains {} trajectories".format(
len(_DATASET_CACHE[self.data_path])
)
)
self._data = _DATASET_CACHE[self.data_path]
if self.restrict_max_steps_in_dataset is not None:
restricted_data = []
cur_len = 0
for i, d in enumerate(self._data):
if cur_len >= self.restrict_max_steps_in_dataset:
break
restricted_data.append(d)
cur_len += len(d[2])
self._data = restricted_data
parts = partition_limits(len(self._data), self.num_workers)
self._data = self._data[
parts[self.current_worker] : parts[self.current_worker + 1]
]
self.rollout_queues = [queue.Queue() for _ in range(self.num_samplers)]
self.sampler_to_num_steps_in_queue = [0 for _ in range(self.num_samplers)]
for it, q in enumerate(self.rollout_queues):
self._fill_rollout_queue(q, it)
return self._data
def set_partition(self, index: int, num_parts: int):
self.current_worker = index
self.num_workers = num_parts
self.num_samplers = int(math.ceil(self.original_num_samplers / num_parts))
self._data = None
for q in self.rollout_queues:
try:
while True:
q.get_nowait()
except queue.Empty:
pass
self.rollout_queues = []
def initialize(self, *, observations: ObservationType, **kwargs):
self.reset_stream()
assert len(self.data) != 0
def add(
self,
observations: ObservationType,
memory: Optional[Memory],
actions: torch.Tensor,
action_log_probs: torch.Tensor,
value_preds: torch.Tensor,
rewards: torch.Tensor,
masks: torch.Tensor,
):
pass
def to(self, device: torch.device):
self.device = device
@property
def total_experiences(self) -> int:
return self._total_experiences
def reset_stream(self):
self.set_partition(index=self.current_worker, num_parts=self.num_workers)
def empty(self) -> bool:
return False
def _get_next_ind(self):
if len(self._remaining_inds) == 0:
self._remaining_inds = list(range(len(self.data)))
random.shuffle(self._remaining_inds)
return self._remaining_inds.pop()
def _fill_rollout_queue(self, q: queue.Queue, sampler: int):
assert q.empty()
while self.sampler_to_num_steps_in_queue[sampler] < self.rollout_len:
next_ind = self._get_next_ind()
for i, step in enumerate(transform_demos([self.data[next_ind]])[0]):
q.put((*step, i == 0))
self.sampler_to_num_steps_in_queue[sampler] += 1
return True
def get_data_for_rollout_ind(self, sampler_ind: int) -> Dict[str, np.ndarray]:
masks: List[bool] = []
minigrid_ego_image = []
minigrid_mission = []
expert_actions = []
q = self.rollout_queues[sampler_ind]
while len(masks) != self.rollout_len:
if q.empty():
assert self.sampler_to_num_steps_in_queue[sampler_ind] == 0
self._fill_rollout_queue(q, sampler_ind)
obs, expert_action, _, is_first_obs = cast(
Tuple[
Dict[str, Union[np.array, int, str]],
MiniGridEnv.Actions,
bool,
bool,
],
q.get_nowait(),
)
self.sampler_to_num_steps_in_queue[sampler_ind] -= 1
masks.append(not is_first_obs)
minigrid_ego_image.append(obs["image"])
if self.minigrid_mission_sensor is not None:
# noinspection PyTypeChecker
minigrid_mission.append(
self.minigrid_mission_sensor.get_observation(
env=None, task=None, minigrid_output_obs=obs
)
)
expert_actions.append([expert_action])
to_return = {
"masks": torch.tensor(masks, device=self.device, dtype=torch.float32).view(
self.rollout_len, 1 # steps x mask
),
"minigrid_ego_image": torch.stack(
[torch.tensor(img, device=self.device) for img in minigrid_ego_image],
dim=0,
), # steps x height x width x channels
"expert_action": torch.tensor(
expert_actions, device=self.device, dtype=torch.int64
).view(
self.rollout_len # steps
),
}
if self.minigrid_mission_sensor is not None:
to_return["minigrid_mission"] = torch.stack(
[torch.tensor(m, device=self.device) for m in minigrid_mission], dim=0
) # steps x mission_dims
return to_return
def next_batch(self) -> Dict[str, torch.Tensor]:
all_data = defaultdict(lambda: [])
for rollout_ind in range(self.num_samplers):
data_for_ind = self.get_data_for_rollout_ind(sampler_ind=rollout_ind)
for key in data_for_ind:
all_data[key].append(data_for_ind[key])
self._total_experiences += self.num_samplers * self.rollout_len
return {
key: torch.stack(all_data[key], dim=1,) # new sampler dim
for key in all_data
}
| allenact-main | allenact_plugins/minigrid_plugin/minigrid_offpolicy.py |
from typing import Optional, Any, cast
import gym
import gym_minigrid.minigrid
import numpy as np
import torch
from babyai.utils.format import InstructionsPreprocessor
from gym_minigrid.minigrid import MiniGridEnv
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task, SubTaskType
# fmt: off
ALL_VOCAB_TOKENS = [
"a", "after", "and", "ball", "behind", "blue", "box",
"door", "front", "go", "green", "grey", "in", "key",
"left", "next", "of", "on", "open", "pick", "purple",
"put", "red", "right", "the", "then", "to", "up", "yellow",
"you", "your",
]
# fmt: on
class EgocentricMiniGridSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(
self,
agent_view_size: int,
view_channels: int = 1,
uuid: str = "minigrid_ego_image",
**kwargs: Any
):
self.agent_view_size = agent_view_size
self.view_channels = view_channels
self.num_objects = (
cast(
int, max(map(abs, gym_minigrid.minigrid.OBJECT_TO_IDX.values())) # type: ignore
)
+ 1
)
self.num_colors = (
cast(int, max(map(abs, gym_minigrid.minigrid.COLOR_TO_IDX.values()))) # type: ignore
+ 1
)
self.num_states = (
cast(int, max(map(abs, gym_minigrid.minigrid.STATE_TO_IDX.values()))) # type: ignore
+ 1
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=max(self.num_objects, self.num_colors, self.num_states) - 1,
shape=(self.agent_view_size, self.agent_view_size, self.view_channels),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is not None and minigrid_output_obs["image"].shape == (
self.agent_view_size,
self.agent_view_size,
):
img = minigrid_output_obs["image"][:, :, : self.view_channels]
else:
env.agent_view_size = self.agent_view_size
img = env.gen_obs()["image"][:, :, : self.view_channels]
assert img.dtype == np.uint8
return img
class MiniGridMissionSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(self, instr_len: int, uuid: str = "minigrid_mission", **kwargs: Any):
self.instr_preprocessor = InstructionsPreprocessor(
model_name="TMP_SENSOR", load_vocab_from=None
)
# We initialize the vocabulary with a fixed collection of tokens
# and then ensure that the size cannot exceed this number. This
# guarantees that sensors on all processes will produce the same
# values.
for token in ALL_VOCAB_TOKENS:
_ = self.instr_preprocessor.vocab[token]
self.instr_preprocessor.vocab.max_size = len(ALL_VOCAB_TOKENS)
self.instr_len = instr_len
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=self.instr_preprocessor.vocab.max_size,
shape=(self.instr_len,),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is None:
minigrid_output_obs = env.gen_obs()
out = self.instr_preprocessor([minigrid_output_obs]).view(-1)
n: int = out.shape[0]
if n > self.instr_len:
out = out[: self.instr_len]
elif n < self.instr_len:
out = torch.nn.functional.pad(
input=out, pad=[0, self.instr_len - n], value=0,
)
return out.long().numpy()
| allenact-main | allenact_plugins/minigrid_plugin/minigrid_sensors.py |
import abc
from typing import Callable, Dict, Optional, Tuple, cast
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
Memory,
DistributionType,
ActorCriticOutput,
ObservationType,
)
from allenact.base_abstractions.distributions import Distr, CategoricalDistr
from allenact.embodiedai.models.basic_models import LinearActorCritic, RNNActorCritic
from allenact.utils.misc_utils import prepare_locals_for_super
class MiniGridSimpleConvBase(ActorCriticModel[Distr], abc.ABC):
actor_critic: ActorCriticModel
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
**kwargs,
):
super().__init__(action_space=action_space, observation_space=observation_space)
self.num_objects = num_objects
self.object_embedding_dim = object_embedding_dim
vis_input_shape = observation_space["minigrid_ego_image"].shape
agent_view_x, agent_view_y, view_channels = vis_input_shape
assert agent_view_x == agent_view_y
self.agent_view = agent_view_x
self.view_channels = view_channels
assert (np.array(vis_input_shape[:2]) >= 3).all(), (
"MiniGridSimpleConvRNN requires" "that the input size be at least 3x3."
)
self.num_channels = 0
if self.num_objects > 0:
# Object embedding
self.object_embedding = nn.Embedding(
num_embeddings=num_objects, embedding_dim=self.object_embedding_dim
)
self.object_channel = self.num_channels
self.num_channels += 1
self.num_colors = num_colors
if self.num_colors > 0:
# Same dimensionality used for colors and states
self.color_embedding = nn.Embedding(
num_embeddings=num_colors, embedding_dim=self.object_embedding_dim
)
self.color_channel = self.num_channels
self.num_channels += 1
self.num_states = num_states
if self.num_states > 0:
self.state_embedding = nn.Embedding(
num_embeddings=num_states, embedding_dim=self.object_embedding_dim
)
self.state_channel = self.num_channels
self.num_channels += 1
assert self.num_channels == self.view_channels > 0
self.ac_key = "enc"
self.observations_for_ac: Dict[str, Optional[torch.Tensor]] = {
self.ac_key: None
}
self.num_agents = 1
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
minigrid_ego_image = cast(torch.Tensor, observations["minigrid_ego_image"])
use_agent = minigrid_ego_image.shape == 6
nrow, ncol, nchannels = minigrid_ego_image.shape[-3:]
nsteps, nsamplers, nagents = masks.shape[:3]
assert nrow == ncol == self.agent_view
assert nchannels == self.view_channels == self.num_channels
embed_list = []
if self.num_objects > 0:
ego_object_embeds = self.object_embedding(
minigrid_ego_image[..., self.object_channel].long()
)
embed_list.append(ego_object_embeds)
if self.num_colors > 0:
ego_color_embeds = self.color_embedding(
minigrid_ego_image[..., self.color_channel].long()
)
embed_list.append(ego_color_embeds)
if self.num_states > 0:
ego_state_embeds = self.state_embedding(
minigrid_ego_image[..., self.state_channel].long()
)
embed_list.append(ego_state_embeds)
ego_embeds = torch.cat(embed_list, dim=-1)
if use_agent:
self.observations_for_ac[self.ac_key] = ego_embeds.view(
nsteps, nsamplers, nagents, -1
)
else:
self.observations_for_ac[self.ac_key] = ego_embeds.view(
nsteps, nsamplers * nagents, -1
)
# noinspection PyCallingNonCallable
out, mem_return = self.actor_critic(
observations=self.observations_for_ac,
memory=memory,
prev_actions=prev_actions,
masks=masks,
)
self.observations_for_ac[self.ac_key] = None
return out, mem_return
class MiniGridSimpleConvRNN(MiniGridSimpleConvBase):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
hidden_size=512,
num_layers=1,
rnn_type="GRU",
head_type: Callable[
..., ActorCriticModel[CategoricalDistr]
] = LinearActorCritic,
**kwargs,
):
super().__init__(**prepare_locals_for_super(locals()))
self._hidden_size = hidden_size
agent_view_x, agent_view_y, view_channels = observation_space[
"minigrid_ego_image"
].shape
self.actor_critic = RNNActorCritic(
input_uuid=self.ac_key,
action_space=action_space,
observation_space=SpaceDict(
{
self.ac_key: gym.spaces.Box(
low=np.float32(-1.0),
high=np.float32(1.0),
shape=(
self.object_embedding_dim
* agent_view_x
* agent_view_y
* view_channels,
),
)
}
),
hidden_size=hidden_size,
num_layers=num_layers,
rnn_type=rnn_type,
head_type=head_type,
)
self.memory_key = "rnn"
self.train()
@property
def num_recurrent_layers(self):
return self.actor_critic.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
def _recurrent_memory_specification(self):
return {
self.memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
}
class MiniGridSimpleConv(MiniGridSimpleConvBase):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
**kwargs,
):
super().__init__(**prepare_locals_for_super(locals()))
agent_view_x, agent_view_y, view_channels = observation_space[
"minigrid_ego_image"
].shape
self.actor_critic = LinearActorCritic(
self.ac_key,
action_space=action_space,
observation_space=SpaceDict(
{
self.ac_key: gym.spaces.Box(
low=np.float32(-1.0),
high=np.float32(1.0),
shape=(
self.object_embedding_dim
* agent_view_x
* agent_view_y
* view_channels,
),
)
}
),
)
self.memory_key = None
self.train()
@property
def num_recurrent_layers(self):
return 0
@property
def recurrent_hidden_state_size(self):
return 0
# noinspection PyMethodMayBeStatic
def _recurrent_memory_specification(self):
return None
| allenact-main | allenact_plugins/minigrid_plugin/minigrid_models.py |
allenact-main | allenact_plugins/minigrid_plugin/configs/__init__.py |
|
"""Experiment Config for MiniGrid tutorial."""
import gym
import torch.nn as nn
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConv
from allenact_plugins.minigrid_plugin.minigrid_tasks import MiniGridTask
from projects.tutorials.minigrid_tutorial import MiniGridTutorialExperimentConfig
class MiniGridNoMemoryExperimentConfig(MiniGridTutorialExperimentConfig):
@classmethod
def tag(cls) -> str:
return "MiniGridNoMemory"
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return MiniGridSimpleConv(
action_space=gym.spaces.Discrete(len(MiniGridTask.class_action_names())),
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
num_objects=cls.SENSORS[0].num_objects,
num_colors=cls.SENSORS[0].num_colors,
num_states=cls.SENSORS[0].num_states,
)
| allenact-main | allenact_plugins/minigrid_plugin/configs/minigrid_nomemory.py |
allenact-main | allenact_plugins/minigrid_plugin/scripts/__init__.py |
|
allenact-main | allenact_plugins/minigrid_plugin/data/__init__.py |
|
"""Utility functions and classes for visualization and logging."""
import os
from datetime import datetime
import cv2
import imageio
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
transport_wrapper,
)
class LoggerVisualizer:
def __init__(self, exp_name="", log_dir=""):
if log_dir == "":
log_dir = self.__class__.__name__
if exp_name == "":
exp_name = "NoNameExp"
self.exp_name = exp_name
log_dir = os.path.join(exp_name, log_dir,)
self.log_dir = log_dir
os.makedirs(self.log_dir, exist_ok=True)
self.log_queue = []
self.action_queue = []
self.logger_index = 0
def log(self, environment, action_str):
raise Exception("Not Implemented")
def is_empty(self):
return len(self.log_queue) == 0
def finish_episode_metrics(self, episode_info, task_info, metric_results):
pass
def finish_episode(self, environment, episode_info, task_info):
pass
class TestMetricLogger(LoggerVisualizer):
def __init__(self, exp_name="", log_dir="", **kwargs):
super().__init__(exp_name=exp_name, log_dir=log_dir)
self.total_metric_dict = {}
log_file_name = os.path.join(self.log_dir, "test_metric.txt")
self.metric_log_file = open(log_file_name, "w")
self.disturbance_distance_queue = []
def average_dict(self):
result = {}
for (k, v) in self.total_metric_dict.items():
result[k] = sum(v) / len(v)
return result
def finish_episode_metrics(self, episode_info, task_info, metric_results=None):
if metric_results is None:
print("had to reset")
self.action_queue = []
self.disturbance_distance_queue = []
return
for k in metric_results.keys():
if "metric" in k or k in ["ep_length", "reward", "success"]:
self.total_metric_dict.setdefault(k, [])
self.total_metric_dict[k].append(metric_results[k])
print(
"total",
len(self.total_metric_dict["success"]),
"average test metric",
self.average_dict(),
)
# save the task info and all the action queue and results
log_dict = {
"logger_number": self.logger_index,
"action_sequence": self.action_queue,
"disturbance_sequence": self.disturbance_distance_queue,
"task_info_metrics": metric_results,
}
self.logger_index += 1
self.metric_log_file.write(str(log_dict))
self.metric_log_file.write("\n")
self.metric_log_file.flush()
print("Logging to", self.metric_log_file.name)
self.action_queue = []
self.disturbance_distance_queue = []
def log(self, environment, action_str="", disturbance_str=""):
# We can add agent arm and state location if needed
self.action_queue.append(action_str)
self.disturbance_distance_queue.append(disturbance_str)
class BringObjImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%Y_%m_%d_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
source_object_id = task_info["source_object_id"]
goal_object_id = task_info["goal_object_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_from_"
+ source_object_id.split("|")[0]
+ "_to_"
+ goal_object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
this_controller = environment.controller
scene = this_controller.last_event.metadata["sceneName"]
reset_environment_and_additional_commands(this_controller, scene)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
# We should not reset here
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if not event.metadata["lastActionSuccess"]:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if not event.metadata["lastActionSuccess"]:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
# Saving the mask
target_object_id = task_info["object_id"]
all_visible_masks = this_controller.last_event.instance_masks
if target_object_id in all_visible_masks:
mask_frame = all_visible_masks[target_object_id]
else:
mask_frame = np.zeros(env.controller.last_event.frame[:, :, 0].shape)
mask_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + "_mask.png"
)
cv2.imwrite(mask_dir, mask_frame.astype(float) * 255.0)
class ImageVisualizer(LoggerVisualizer):
def __init__(
self,
exp_name="",
log_dir="",
add_top_down_view: bool = False,
add_depth_map: bool = False,
):
super().__init__(exp_name=exp_name, log_dir=log_dir)
self.add_top_down_view = add_top_down_view
self.add_depth_map = add_depth_map
if self.add_top_down_view:
self.top_down_queue = []
self.disturbance_distance_queue = []
def finish_episode(self, environment, episode_info, task_info):
time_to_write = "log_ind_{:03d}".format(self.logger_index)
self.logger_index += 1
print("Logging", time_to_write, "len", len(self.log_queue))
object_id = task_info["objectId"]
scene_name = task_info["source_location"]["scene_name"]
source_countertop = task_info["source_location"]["countertop_id"]
target_countertop = task_info["target_location"]["countertop_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ "_"
+ scene_name.split("_")[0]
+ "_obj_"
+ object_id.split("|")[0]
+ "_from_"
+ source_countertop.split("|")[0]
+ "_to_"
+ target_countertop.split("|")[0]
+ ".gif"
)
self.log_queue = put_annotation_on_image(
self.log_queue, self.disturbance_distance_queue
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
if self.add_top_down_view:
topdown_all_images = np.expand_dims(
np.stack(self.top_down_queue, axis=0), axis=1
) # (T, 1, H, W, 3)
concat_all_images = np.concatenate(
[concat_all_images, topdown_all_images], axis=1
) # (T, 2, H, W, 3)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
self.disturbance_distance_queue = []
if self.add_top_down_view:
self.top_down_queue = []
def log(self, environment, action_str="", disturbance_str=""):
self.action_queue.append(action_str)
self.disturbance_distance_queue.append(disturbance_str)
image_tensor = environment.current_frame
self.log_queue.append(image_tensor)
if self.add_top_down_view:
# Reference: https://github.com/allenai/ai2thor/pull/814
event = environment.controller.step(action="GetMapViewCameraProperties")
event = environment.controller.step(
action="AddThirdPartyCamera", **event.metadata["actionReturn"]
)
self.top_down_queue.append(event.third_party_camera_frames[0])
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
scene = this_controller.last_event.metadata[
"sceneName"
] # maybe we need to reset env actually]
reset_environment_and_additional_commands(this_controller, scene)
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if not event.metadata["lastActionSuccess"]:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if not event.metadata["lastActionSuccess"]:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = img_adr + "_" + tag + ".png"
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
if self.add_depth_map:
depth = this_controller.last_event.depth_frame.copy() # (H, W)
depth[depth > 5.0] = 5.0
norm = matplotlib.colors.Normalize(vmin=depth.min(), vmax=depth.max())
rgb = cm.get_cmap(plt.get_cmap("viridis"))(norm(depth))[:, :, :3] # [0,1]
rgb = (rgb * 255).astype(np.uint8)
depth_dir = img_adr + "_" + tag + "_depth.png"
cv2.imwrite(depth_dir, rgb[:, :, [2, 1, 0]])
def save_image_list_to_gif(image_list, gif_name, gif_dir):
gif_adr = os.path.join(gif_dir, gif_name)
seq_len, cols, w, h, c = image_list.shape
pallet = np.zeros(
(seq_len, w, h * cols, c)
) # to support multiple animations in one gif
for col_ind in range(cols):
pallet[:, :, col_ind * h : (col_ind + 1) * h, :] = image_list[:, col_ind]
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
imageio.mimsave(gif_adr, pallet.astype(np.uint8), format="GIF", duration=1 / 5)
print("Saved result in ", gif_adr)
def put_annotation_on_image(images, annotations):
all_images = []
for img, annot in zip(images, annotations):
position = (10, 10)
from PIL import Image, ImageDraw
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
draw.text(position, annot, (0, 0, 0))
all_images.append(np.array(pil_img))
return all_images
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_viz.py |
"""Task Definions for the task of ArmPointNav."""
import copy
from typing import Dict, Tuple, List, Any, Optional
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
MOVE_ARM_CONSTANT,
DISTANCE_EPS,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
LOOK_UP,
LOOK_DOWN,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
position_distance,
)
from allenact_plugins.manipulathor_plugin.manipulathor_viz import LoggerVisualizer
class AbstractPickUpDropOffTask(Task[ManipulaTHOREnvironment]):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
)
# New commit of AI2THOR has some issue that the objects will vibrate a bit
# without any external force. To eliminate the vibration effect, we have to
# introduce _vibration_dist_dict when checking the disturbance, from an external csv file.
# By default it is None, i.e. we assume there is no vibration.
_vibration_dist_dict: Optional[Dict] = None
def __init__(
self,
env: ManipulaTHOREnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
visualizers: Optional[List[LoggerVisualizer]] = None,
**kwargs
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
self.visualizers = visualizers if visualizers is not None else []
self.start_visualize()
self.action_sequence_and_success = []
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
# in allenact initialization is with 0.2
self.last_obj_to_goal_distance = None
self.last_arm_to_obj_distance = None
self.object_picked_up = False
self.got_reward_for_pickup = False
self.reward_configs = kwargs["reward_configs"]
self.initial_object_locations = self.env.get_current_object_locations()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def obj_state_aproximity(self, s1, s2):
# KIANA ignore rotation for now
position1 = s1["position"]
position2 = s2["position"]
eps = MOVE_ARM_CONSTANT * 2
return (
abs(position1["x"] - position2["x"]) < eps
and abs(position1["y"] - position2["y"]) < eps
and abs(position1["z"] - position2["z"]) < eps
)
def start_visualize(self):
for visualizer in self.visualizers:
if not visualizer.is_empty():
print("OH NO VISUALIZER WAS NOT EMPTY")
visualizer.finish_episode(self.env, self, self.task_info)
visualizer.finish_episode_metrics(self, self.task_info, None)
visualizer.log(self.env)
def visualize(self, action_str):
for vizualizer in self.visualizers:
vizualizer.log(self.env, action_str)
def finish_visualizer(self):
for visualizer in self.visualizers:
visualizer.finish_episode(self.env, self, self.task_info)
def finish_visualizer_metrics(self, metric_results):
for visualizer in self.visualizers:
visualizer.finish_episode_metrics(self, self.task_info, metric_results)
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode == "rgb", "only rgb rendering is implemented"
return self.env.current_frame
def calc_action_stat_metrics(self) -> Dict[str, Any]:
action_stat = {"action_stat/" + action_str: 0.0 for action_str in self._actions}
action_success_stat = {
"action_success/" + action_str: 0.0 for action_str in self._actions
}
action_success_stat["action_success/total"] = 0.0
seq_len = len(self.action_sequence_and_success)
for (action_name, action_success) in self.action_sequence_and_success:
action_stat["action_stat/" + action_name] += 1.0
action_success_stat[
"action_success/{}".format(action_name)
] += action_success
action_success_stat["action_success/total"] += action_success
action_success_stat["action_success/total"] /= seq_len
for action_name in self._actions:
action_success_stat["action_success/{}".format(action_name)] /= max(
action_stat["action_stat/" + action_name], 1.0
)
action_stat["action_stat/" + action_name] /= seq_len
result = {**action_stat, **action_success_stat}
return result
def metrics(self) -> Dict[str, Any]:
result = super(AbstractPickUpDropOffTask, self).metrics()
if self.is_done():
result = {**result, **self.calc_action_stat_metrics()}
# 1. goal object metrics
final_obj_distance_from_goal = self.obj_distance_from_goal()
result[
"average/final_obj_distance_from_goal"
] = final_obj_distance_from_goal
final_arm_distance_from_obj = self.arm_distance_from_obj()
result["average/final_arm_distance_from_obj"] = final_arm_distance_from_obj
final_obj_pickup = 1 if self.object_picked_up else 0
result["average/final_obj_pickup"] = final_obj_pickup
original_distance = self.get_original_object_distance() + DISTANCE_EPS
result["average/original_distance"] = original_distance
# this ratio can be more than 1
if self.object_picked_up:
ratio_distance_left = final_obj_distance_from_goal / original_distance
result["average/ratio_distance_left"] = ratio_distance_left
result["average/eplen_pickup"] = self.eplen_pickup
# 2. disturbance with other objects
current_object_locations = self.env.get_current_object_locations()
objects_moved = self.env.get_objects_moved(
self.initial_object_locations,
current_object_locations,
self.task_info["objectId"],
self._vibration_dist_dict,
)
result["disturbance/objects_moved_num"] = len(objects_moved)
# 3. conditioned on success
if self._success:
result["average/eplen_success"] = result["ep_length"]
result["average/success_wo_disturb"] = len(objects_moved) == 0
else:
result["average/success_wo_disturb"] = 0.0
result["success"] = self._success
self.finish_visualizer_metrics(result)
self.finish_visualizer()
self.action_sequence_and_success = []
return result
def _step(self, action: int) -> RLStepResult:
raise Exception("Not implemented")
def arm_distance_from_obj(self):
goal_obj_id = self.task_info["objectId"]
object_info = self.env.get_object_by_id(goal_obj_id)
hand_state = self.env.get_absolute_hand_state()
return position_distance(object_info, hand_state)
def obj_distance_from_goal(self):
goal_obj_id = self.task_info["objectId"]
object_info = self.env.get_object_by_id(goal_obj_id)
goal_state = self.task_info["target_location"]
return position_distance(object_info, goal_state)
def get_original_object_distance(self):
goal_obj_id = self.task_info["objectId"]
s_init = dict(position=self.task_info["source_location"]["object_location"])
current_location = self.env.get_object_by_id(goal_obj_id)
original_object_distance = position_distance(s_init, current_location)
return original_object_distance
def judge(self) -> float:
"""Compute the reward after having taken a step."""
raise Exception("Not implemented")
class ArmPointNavTask(AbstractPickUpDropOffTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
def __init__(
self,
env: ManipulaTHOREnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
visualizers: Optional[List[LoggerVisualizer]] = None,
**kwargs
) -> None:
super().__init__(
env=env,
sensors=sensors,
task_info=task_info,
max_steps=max_steps,
visualizers=visualizers,
**kwargs
)
self.cumulated_disturb_distance_all = 0.0
self.cumulated_disturb_distance_visible = 0.0
# NOTE: visible distance can be negative, no determinitic relation with
# all distance
self.previous_object_locations = copy.deepcopy(self.initial_object_locations)
self.current_penalized_distance = 0.0 # used in Sensor for auxiliary task
def metrics(self) -> Dict[str, Any]:
result = super(ArmPointNavTask, self).metrics()
if self.is_done():
# add disturbance distance metrics
result[
"disturbance/objects_moved_distance"
] = self.cumulated_disturb_distance_all
result[
"disturbance/objects_moved_distance_vis"
] = self.cumulated_disturb_distance_visible
return result
def visualize(self, **kwargs):
for vizualizer in self.visualizers:
vizualizer.log(self.env, **kwargs)
def _step(self, action: int) -> RLStepResult:
action_str = self.class_action_names()[action]
self._last_action_str = action_str
action_dict = {"action": action_str}
object_id = self.task_info["objectId"]
if action_str == PICKUP:
action_dict = {**action_dict, "object_id": object_id}
self.env.step(action_dict)
self.last_action_success = self.env.last_action_success
last_action_name = self._last_action_str
last_action_success = float(self.last_action_success)
self.action_sequence_and_success.append((last_action_name, last_action_success))
# If the object has not been picked up yet and it was picked up in the previous step update parameters to integrate it into reward
if not self.object_picked_up:
if self.env.is_object_at_low_level_hand(object_id):
self.object_picked_up = True
self.eplen_pickup = (
self._num_steps_taken + 1
) # plus one because this step has not been counted yet
if action_str == DONE:
self._took_end_action = True
object_state = self.env.get_object_by_id(object_id)
goal_state = self.task_info["target_location"]
goal_achieved = self.object_picked_up and self.obj_state_aproximity(
object_state, goal_state
)
self.last_action_success = goal_achieved
self._success = goal_achieved
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def judge(self) -> float:
"""Compute the reward after having taken a step."""
reward = self.reward_configs["step_penalty"]
if not self.last_action_success or (
self._last_action_str == PICKUP and not self.object_picked_up
):
reward += self.reward_configs["failed_action_penalty"]
if self._took_end_action:
reward += (
self.reward_configs["goal_success_reward"]
if self._success
else self.reward_configs["failed_stop_reward"]
)
# increase reward if object pickup and only do it once
if not self.got_reward_for_pickup and self.object_picked_up:
reward += self.reward_configs["pickup_success_reward"]
self.got_reward_for_pickup = True
current_obj_to_arm_distance = self.arm_distance_from_obj()
if self.last_arm_to_obj_distance is None:
delta_arm_to_obj_distance_reward = 0
else:
delta_arm_to_obj_distance_reward = (
self.last_arm_to_obj_distance - current_obj_to_arm_distance
)
self.last_arm_to_obj_distance = current_obj_to_arm_distance
reward += delta_arm_to_obj_distance_reward
current_obj_to_goal_distance = self.obj_distance_from_goal()
if self.last_obj_to_goal_distance is None:
delta_obj_to_goal_distance_reward = 0
else:
delta_obj_to_goal_distance_reward = (
self.last_obj_to_goal_distance - current_obj_to_goal_distance
)
self.last_obj_to_goal_distance = current_obj_to_goal_distance
reward += delta_obj_to_goal_distance_reward
# add disturbance cost
## here we measure disturbance by the sum of moving distance of all objects
## note that collided object may move for a while wo external force due to inertia
## and we may also consider mass
current_object_locations = self.env.get_current_object_locations()
disturb_distance_visible = self.env.get_objects_move_distance(
initial_object_locations=self.initial_object_locations,
previous_object_locations=self.previous_object_locations,
current_object_locations=current_object_locations,
target_object_id=self.task_info["objectId"],
only_visible=True,
thres_dict=self._vibration_dist_dict,
)
disturb_distance_all = self.env.get_objects_move_distance(
initial_object_locations=self.initial_object_locations,
previous_object_locations=self.previous_object_locations,
current_object_locations=current_object_locations,
target_object_id=self.task_info["objectId"],
only_visible=False,
thres_dict=self._vibration_dist_dict,
)
self.cumulated_disturb_distance_all += disturb_distance_all
self.cumulated_disturb_distance_visible += disturb_distance_visible
penalized_distance = (
disturb_distance_visible
if self.reward_configs["disturb_visible"]
else disturb_distance_all
)
reward += self.reward_configs["disturb_penalty"] * penalized_distance
self.current_penalized_distance = penalized_distance
self.previous_object_locations = current_object_locations
self.visualize(
action_str=self._last_action_str,
disturbance_str=str(round(penalized_distance, 4)),
)
return float(reward)
class RotateArmPointNavTask(ArmPointNavTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
class CamRotateArmPointNavTask(ArmPointNavTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
LOOK_UP,
LOOK_DOWN,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
class EasyArmPointNavTask(ArmPointNavTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
# PICKUP,
# DONE,
)
def _step(self, action: int) -> RLStepResult:
action_str = self.class_action_names()[action]
self._last_action_str = action_str
action_dict = {"action": action_str}
object_id = self.task_info["objectId"]
if action_str == PICKUP:
action_dict = {**action_dict, "object_id": object_id}
self.env.step(action_dict)
self.last_action_success = self.env.last_action_success
last_action_name = self._last_action_str
last_action_success = float(self.last_action_success)
self.action_sequence_and_success.append((last_action_name, last_action_success))
self.visualize(last_action_name)
# If the object has not been picked up yet and it was picked up in the previous step update parameters to integrate it into reward
if not self.object_picked_up:
if (
object_id
in self.env.controller.last_event.metadata["arm"]["pickupableObjects"]
):
self.env.step(dict(action="PickupObject"))
# we are doing an additional pass here, label is not right and if we fail we will do it twice
object_inventory = self.env.controller.last_event.metadata["arm"][
"heldObjects"
]
if len(object_inventory) > 0 and object_id not in object_inventory:
self.env.step(dict(action="ReleaseObject"))
if self.env.is_object_at_low_level_hand(object_id):
self.object_picked_up = True
self.eplen_pickup = (
self._num_steps_taken + 1
) # plus one because this step has not been counted yet
if self.object_picked_up:
object_state = self.env.get_object_by_id(object_id)
goal_state = self.task_info["target_location"]
goal_achieved = self.object_picked_up and self.obj_state_aproximity(
object_state, goal_state
)
if goal_achieved:
self._took_end_action = True
self.last_action_success = goal_achieved
self._success = goal_achieved
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
# def judge(self) -> float: Seems like we are fine on this
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_tasks.py |
"""Task Samplers for the task of ArmPointNav."""
import json
import random
from typing import List, Dict, Optional, Any, Union
import gym
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import set_deterministic_cudnn, set_seed
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
)
from allenact_plugins.manipulathor_plugin.manipulathor_tasks import (
AbstractPickUpDropOffTask,
ArmPointNavTask,
RotateArmPointNavTask,
CamRotateArmPointNavTask,
EasyArmPointNavTask,
)
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
transport_wrapper,
initialize_arm,
)
from allenact_plugins.manipulathor_plugin.manipulathor_viz import (
ImageVisualizer,
LoggerVisualizer,
)
class AbstractMidLevelArmTaskSampler(TaskSampler):
_TASK_TYPE = Task
def __init__(
self,
scenes: List[str],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
objects: List[str],
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
num_task_per_scene: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
fixed_tasks: Optional[List[Dict[str, Any]]] = None,
visualizers: Optional[List[LoggerVisualizer]] = None,
*args,
**kwargs
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.grid_size = 0.25
self.env: Optional[ManipulaTHOREnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.objects = objects
self.num_task_per_scene = num_task_per_scene
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[Task] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
self.visualizers = visualizers if visualizers is not None else []
self.sampler_mode = kwargs["sampler_mode"]
self.cap_training = kwargs["cap_training"]
def _create_environment(self, **kwargs) -> ManipulaTHOREnvironment:
env = ManipulaTHOREnvironment(
make_agents_visible=False, object_open_speed=0.05, env_args=self.env_args,
)
return env
@property
def last_sampled_task(self) -> Optional[Task]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.sampler_index = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class SimpleArmPointNavGeneralSampler(AbstractMidLevelArmTaskSampler):
_TASK_TYPE = AbstractPickUpDropOffTask
def __init__(self, **kwargs) -> None:
super(SimpleArmPointNavGeneralSampler, self).__init__(**kwargs)
self.all_possible_points = []
for scene in self.scenes:
for object in self.objects:
valid_position_adr = "datasets/apnd-dataset/valid_object_positions/valid_{}_positions_in_{}.json".format(
object, scene
)
try:
with open(valid_position_adr) as f:
data_points = json.load(f)
except Exception:
print("Failed to load", valid_position_adr)
continue
visible_data = [
data for data in data_points[scene] if data["visibility"]
]
self.all_possible_points += visible_data
self.countertop_object_to_data_id = self.calc_possible_trajectories(
self.all_possible_points
)
scene_names = set(
[
self.all_possible_points[counter[0]]["scene_name"]
for counter in self.countertop_object_to_data_id.values()
if len(counter) > 1
]
)
if len(set(scene_names)) < len(self.scenes):
print("Not all scenes appear")
print(
"Len dataset",
len(self.all_possible_points),
"total_remained",
sum([len(v) for v in self.countertop_object_to_data_id.values()]),
)
if (
self.sampler_mode != "train"
): # Be aware that this totally overrides some stuff
self.deterministic_data_list = []
for scene in self.scenes:
for object in self.objects:
valid_position_adr = "datasets/apnd-dataset/deterministic_tasks/tasks_{}_positions_in_{}.json".format(
object, scene
)
try:
with open(valid_position_adr) as f:
data_points = json.load(f)
except Exception:
print("Failed to load", valid_position_adr)
continue
visible_data = [
dict(scene=scene, index=i, datapoint=data)
for (i, data) in enumerate(data_points[scene])
]
if self.num_task_per_scene is None:
self.deterministic_data_list += visible_data
else: # select a small number of data points for fast evaluation
self.deterministic_data_list += visible_data[
: min(self.num_task_per_scene, len(visible_data))
]
if self.sampler_mode == "test":
random.shuffle(self.deterministic_data_list)
self.max_tasks = self.reset_tasks = len(self.deterministic_data_list)
def next_task(
self, force_advance_scene: bool = False
) -> Optional[AbstractPickUpDropOffTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.sampler_mode != "train" and self.length <= 0:
return None
source_data_point, target_data_point = self.get_source_target_indices()
scene = source_data_point["scene_name"]
assert source_data_point["object_id"] == target_data_point["object_id"]
assert source_data_point["scene_name"] == target_data_point["scene_name"]
if self.env is None:
self.env = self._create_environment()
self.env.reset(
scene_name=scene, agentMode="arm", agentControllerType="mid-level"
)
initialize_arm(self.env.controller)
source_location = source_data_point
target_location = dict(
position=target_data_point["object_location"],
rotation={"x": 0, "y": 0, "z": 0},
)
task_info = {
"objectId": source_location["object_id"],
"countertop_id": source_location["countertop_id"],
"source_location": source_location,
"target_location": target_location,
}
this_controller = self.env
transport_wrapper(
this_controller,
source_location["object_id"],
source_location["object_location"],
)
agent_state = source_location["agent_pose"]
this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
should_visualize_goal_start = [
x for x in self.visualizers if issubclass(type(x), ImageVisualizer)
]
if len(should_visualize_goal_start) > 0:
task_info["visualization_source"] = source_data_point
task_info["visualization_target"] = target_data_point
self._last_sampled_task = self._TASK_TYPE(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
visualizers=self.visualizers,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
@property
def total_unique(self) -> Optional[Union[int, float]]:
if self.sampler_mode == "train":
return None
else:
return min(self.max_tasks, len(self.deterministic_data_list))
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return (
self.total_unique - self.sampler_index
if self.sampler_mode != "train"
else (float("inf") if self.max_tasks is None else self.max_tasks)
)
def get_source_target_indices(self):
if self.sampler_mode == "train":
valid_countertops = [
k for (k, v) in self.countertop_object_to_data_id.items() if len(v) > 1
]
countertop_id = random.choice(valid_countertops)
indices = random.sample(self.countertop_object_to_data_id[countertop_id], 2)
result = (
self.all_possible_points[indices[0]],
self.all_possible_points[indices[1]],
)
else:
result = self.deterministic_data_list[self.sampler_index]["datapoint"]
self.sampler_index += 1
return result
def calc_possible_trajectories(self, all_possible_points):
object_to_data_id = {}
for i in range(len(all_possible_points)):
object_id = all_possible_points[i]["object_id"]
object_to_data_id.setdefault(object_id, [])
object_to_data_id[object_id].append(i)
return object_to_data_id
class ArmPointNavTaskSampler(SimpleArmPointNavGeneralSampler):
_TASK_TYPE = ArmPointNavTask
def __init__(self, **kwargs) -> None:
super(ArmPointNavTaskSampler, self).__init__(**kwargs)
possible_initial_locations = (
"datasets/apnd-dataset/valid_agent_initial_locations.json"
)
if self.sampler_mode == "test":
possible_initial_locations = (
"datasets/apnd-dataset/deterministic_valid_agent_initial_locations.json"
)
with open(possible_initial_locations) as f:
self.possible_agent_reachable_poses = json.load(f)
def next_task(
self, force_advance_scene: bool = False
) -> Optional[AbstractPickUpDropOffTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.sampler_mode != "train" and self.length <= 0:
return None
source_data_point, target_data_point = self.get_source_target_indices()
scene = source_data_point["scene_name"]
assert source_data_point["object_id"] == target_data_point["object_id"]
assert source_data_point["scene_name"] == target_data_point["scene_name"]
if self.env is None:
self.env = self._create_environment()
self.env.reset(
scene_name=scene, agentMode="arm", agentControllerType="mid-level"
)
initialize_arm(self.env.controller)
source_location = source_data_point
target_location = dict(
position=target_data_point["object_location"],
rotation={"x": 0, "y": 0, "z": 0},
countertop_id=target_data_point["countertop_id"],
)
this_controller = self.env
transport_wrapper(
this_controller,
source_location["object_id"],
source_location["object_location"],
)
agent_state = source_location[
"initial_agent_pose"
] # THe only line different from father
this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
should_visualize_goal_start = [
x for x in self.visualizers if issubclass(type(x), ImageVisualizer)
]
initial_object_info = self.env.get_object_by_id(source_location["object_id"])
initial_agent_location = self.env.controller.last_event.metadata["agent"]
initial_hand_state = self.env.get_absolute_hand_state()
task_info = {
"objectId": source_location["object_id"],
"source_location": source_location, # used in analysis
"target_location": target_location, # used in analysis
"agent_initial_state": initial_agent_location, # not used
"initial_object_location": initial_object_info, # not used
"initial_hand_state": initial_hand_state,
}
if len(should_visualize_goal_start) > 0:
task_info["visualization_source"] = source_data_point
task_info["visualization_target"] = target_data_point
self._last_sampled_task = self._TASK_TYPE(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
visualizers=self.visualizers,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def get_source_target_indices(self):
if self.sampler_mode == "train":
valid_countertops = [
k for (k, v) in self.countertop_object_to_data_id.items() if len(v) > 1
]
countertop_id = random.choice(valid_countertops)
indices = random.sample(self.countertop_object_to_data_id[countertop_id], 2)
result = (
self.all_possible_points[indices[0]],
self.all_possible_points[indices[1]],
)
scene_name = result[0]["scene_name"]
selected_agent_init_loc = random.choice(
self.possible_agent_reachable_poses[scene_name]
)
initial_agent_pose = {
"name": "agent",
"position": {
"x": selected_agent_init_loc["x"],
"y": selected_agent_init_loc["y"],
"z": selected_agent_init_loc["z"],
},
"rotation": {
"x": -0.0,
"y": selected_agent_init_loc["rotation"],
"z": 0.0,
},
"cameraHorizon": selected_agent_init_loc["horizon"],
"isStanding": True,
}
result[0]["initial_agent_pose"] = initial_agent_pose
else: # agent init location needs to be fixed, therefore we load a fixed valid agent init that is previously randomized
result = self.deterministic_data_list[self.sampler_index]["datapoint"]
scene_name = self.deterministic_data_list[self.sampler_index]["scene"]
datapoint_original_index = self.deterministic_data_list[self.sampler_index][
"index"
]
selected_agent_init_loc = self.possible_agent_reachable_poses[scene_name][
datapoint_original_index
]
initial_agent_pose = {
"name": "agent",
"position": {
"x": selected_agent_init_loc["x"],
"y": selected_agent_init_loc["y"],
"z": selected_agent_init_loc["z"],
},
"rotation": {
"x": -0.0,
"y": selected_agent_init_loc["rotation"],
"z": 0.0,
},
"cameraHorizon": selected_agent_init_loc["horizon"],
"isStanding": True,
}
result[0]["initial_agent_pose"] = initial_agent_pose
self.sampler_index += 1
return result
class RotateArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = RotateArmPointNavTask
class CamRotateArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = CamRotateArmPointNavTask
class EasyArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = EasyArmPointNavTask
def get_all_tuples_from_list(list):
result = []
for first_ind in range(len(list) - 1):
for second_ind in range(first_ind + 1, len(list)):
result.append([list[first_ind], list[second_ind]])
return result
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_task_samplers.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"Cannot `import ai2thor`, please install `ai2thor` (`pip install ai2thor`)."
):
# noinspection PyUnresolvedReferences
import ai2thor
| allenact-main | allenact_plugins/manipulathor_plugin/__init__.py |
"""Constant values and hyperparameters that are used by the environment."""
import ai2thor.fifo_server
ARM_MIN_HEIGHT = 0.450998873
ARM_MAX_HEIGHT = 1.8009994
ADDITIONAL_ARM_ARGS = {
"disableRendering": True,
"returnToStart": True,
"speed": 1,
}
MOVE_AHEAD = "MoveAheadContinuous"
MOVE_BACK = "MoveBackContinuous"
ROTATE_LEFT = "RotateLeftContinuous"
ROTATE_RIGHT = "RotateRightContinuous"
MOVE_ARM_HEIGHT_P = "MoveArmHeightP"
MOVE_ARM_HEIGHT_M = "MoveArmHeightM"
MOVE_ARM_X_P = "MoveArmXP"
MOVE_ARM_X_M = "MoveArmXM"
MOVE_ARM_Y_P = "MoveArmYP"
MOVE_ARM_Y_M = "MoveArmYM"
MOVE_ARM_Z_P = "MoveArmZP"
MOVE_ARM_Z_M = "MoveArmZM"
ROTATE_WRIST_PITCH_P = "RotateArmWristPitchP"
ROTATE_WRIST_PITCH_M = "RotateArmWristPitchM"
ROTATE_WRIST_YAW_P = "RotateArmWristYawP"
ROTATE_WRIST_YAW_M = "RotateArmWristYawM"
ROTATE_WRIST_ROLL_P = "RotateArmWristRollP"
ROTATE_WRIST_ROLL_M = "RotateArmWristRollM"
ROTATE_ELBOW_P = "RotateArmElbowP"
ROTATE_ELBOW_M = "RotateArmElbowM"
LOOK_UP = "LookUp"
LOOK_DOWN = "LookDown"
PICKUP = "PickUpMidLevel"
DROP = "DropMidLevel"
DONE = "DoneMidLevel"
ENV_ARGS = dict(
gridSize=0.25,
width=224,
height=224,
visibilityDistance=1.0,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
useMassThreshold=True,
massThreshold=10,
autoSimulation=False,
autoSyncTransforms=True,
)
VALID_OBJECT_LIST = [
"Knife",
"Bread",
"Fork",
"Potato",
"SoapBottle",
"Pan",
"Plate",
"Tomato",
"Egg",
"Pot",
"Spatula",
"Cup",
"Bowl",
"SaltShaker",
"PepperShaker",
"Lettuce",
"ButterKnife",
"Apple",
"DishSponge",
"Spoon",
"Mug",
]
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_constants.py |
import json
import os
from typing import Dict, Optional, Any
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
TRAIN_OBJECTS = ["Apple", "Bread", "Tomato", "Lettuce", "Pot", "Mug"]
TEST_OBJECTS = ["Potato", "SoapBottle", "Pan", "Egg", "Spatula", "Cup"]
MOVE_ARM_CONSTANT = 0.05
MOVE_ARM_HEIGHT_CONSTANT = MOVE_ARM_CONSTANT
UNWANTED_MOVE_THR = 0.01
DISTANCE_EPS = 1e-9
DISTANCE_MAX = 10.0
dataset_json_file = os.path.join(
ABS_PATH_OF_TOP_LEVEL_DIR, "datasets", "apnd-dataset", "starting_pose.json"
)
_ARM_START_POSITIONS: Optional[Dict[str, Any]] = None
def get_agent_start_positions():
global _ARM_START_POSITIONS
if _ARM_START_POSITIONS is not None:
try:
with open(dataset_json_file) as f:
_ARM_START_POSITIONS = json.load(f)
except Exception:
raise Exception(f"Dataset not found in {dataset_json_file}")
return _ARM_START_POSITIONS
| allenact-main | allenact_plugins/manipulathor_plugin/armpointnav_constants.py |
"""Utility classes and functions for sensory inputs used by the models."""
from typing import Any, Union, Optional
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor
from allenact.embodiedai.sensors.vision_sensors import DepthSensor, RGBSensor
from allenact.base_abstractions.task import Task
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.manipulathor_plugin.arm_calculation_utils import (
world_coords_to_agent_coords,
state_dict_to_tensor,
diff_position,
coord_system_transform,
)
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
)
class DepthSensorThor(
DepthSensor[Union[ManipulaTHOREnvironment], Union[Task[ManipulaTHOREnvironment]],]
):
"""Sensor for Depth images in THOR.
Returns from a running ManipulaTHOREnvironment instance, the current
RGB frame corresponding to the agent's egocentric view.
"""
def frame_from_env(
self, env: ManipulaTHOREnvironment, task: Optional[Task]
) -> np.ndarray:
return env.controller.last_event.depth_frame.copy()
class NoVisionSensorThor(
RGBSensor[Union[ManipulaTHOREnvironment], Union[Task[ManipulaTHOREnvironment]],]
):
"""Sensor for RGB images in THOR.
Returns from a running ManipulaTHOREnvironment instance, the current
RGB frame corresponding to the agent's egocentric view.
"""
def frame_from_env(
self, env: ManipulaTHOREnvironment, task: Optional[Task]
) -> np.ndarray:
return np.zeros_like(env.current_frame)
class AgentRelativeCurrentObjectStateThorSensor(Sensor):
def __init__(self, uuid: str = "relative_current_obj_state", **kwargs: Any):
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(6,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
object_id = task.task_info["objectId"]
current_object_state = env.get_object_by_id(object_id)
relative_current_obj = world_coords_to_agent_coords(
current_object_state, env.controller.last_event.metadata["agent"]
)
result = state_dict_to_tensor(
dict(
position=relative_current_obj["position"],
rotation=relative_current_obj["rotation"],
)
)
return result
class RelativeObjectToGoalSensor(Sensor):
def __init__(
self,
uuid: str = "relative_obj_to_goal",
coord_system: str = "xyz_unsigned",
**kwargs: Any
):
assert coord_system in [
"xyz_unsigned",
"xyz_signed",
"polar_radian",
"polar_trigo",
]
self.coord_system = coord_system
if coord_system == "polar_trigo":
obs_dim = 5
else:
obs_dim = 3
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(obs_dim,), dtype=np.float32
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
target_state = task.task_info["target_location"]
agent_state = env.controller.last_event.metadata["agent"]
relative_current_obj = world_coords_to_agent_coords(object_info, agent_state)
relative_goal_state = world_coords_to_agent_coords(target_state, agent_state)
relative_distance = diff_position(
relative_current_obj, relative_goal_state, absolute=False,
)
result = coord_system_transform(relative_distance, self.coord_system)
return result
class InitialObjectToGoalSensor(Sensor):
def __init__(self, uuid: str = "initial_obj_to_goal", **kwargs: Any):
# observation_space = gym.spaces.Discrete(len(self.detector_types))
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
object_source_location = task.task_info["initial_object_location"]
target_state = task.task_info["target_location"]
agent_state = task.task_info["agent_initial_state"]
relative_current_obj = world_coords_to_agent_coords(
object_source_location, agent_state
)
relative_goal_state = world_coords_to_agent_coords(target_state, agent_state)
relative_distance = diff_position(relative_current_obj, relative_goal_state)
result = state_dict_to_tensor(dict(position=relative_distance))
return result
class DistanceObjectToGoalSensor(Sensor):
def __init__(self, uuid: str = "distance_obj_to_goal", **kwargs: Any):
# observation_space = gym.spaces.Discrete(len(self.detector_types))
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
target_state = task.task_info["target_location"]
agent_state = env.controller.last_event.metadata["agent"]
relative_current_obj = world_coords_to_agent_coords(object_info, agent_state)
relative_goal_state = world_coords_to_agent_coords(target_state, agent_state)
relative_distance = diff_position(relative_current_obj, relative_goal_state)
result = state_dict_to_tensor(dict(position=relative_distance))
result = ((result ** 2).sum() ** 0.5).view(1)
return result
class RelativeAgentArmToObjectSensor(Sensor):
def __init__(
self,
uuid: str = "relative_agent_arm_to_obj",
coord_system: str = "xyz_unsigned",
**kwargs: Any
):
assert coord_system in [
"xyz_unsigned",
"xyz_signed",
"polar_radian",
"polar_trigo",
]
self.coord_system = coord_system
if coord_system == "polar_trigo":
obs_dim = 5
else:
obs_dim = 3
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(obs_dim,), dtype=np.float32
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
hand_state = env.get_absolute_hand_state()
relative_goal_obj = world_coords_to_agent_coords(
object_info, env.controller.last_event.metadata["agent"]
)
relative_hand_state = world_coords_to_agent_coords(
hand_state, env.controller.last_event.metadata["agent"]
)
relative_distance = diff_position(
relative_goal_obj, relative_hand_state, absolute=False,
)
result = coord_system_transform(relative_distance, self.coord_system)
return result
class InitialAgentArmToObjectSensor(Sensor):
def __init__(self, uuid: str = "initial_agent_arm_to_obj", **kwargs: Any):
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
object_source_location = task.task_info["initial_object_location"]
initial_hand_state = task.task_info["initial_hand_state"]
relative_goal_obj = world_coords_to_agent_coords(
object_source_location, env.controller.last_event.metadata["agent"]
)
relative_hand_state = world_coords_to_agent_coords(
initial_hand_state, env.controller.last_event.metadata["agent"]
)
relative_distance = diff_position(relative_goal_obj, relative_hand_state)
result = state_dict_to_tensor(dict(position=relative_distance))
return result
class DistanceAgentArmToObjectSensor(Sensor):
def __init__(self, uuid: str = "distance_agent_arm_to_obj", **kwargs: Any):
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
hand_state = env.get_absolute_hand_state()
relative_goal_obj = world_coords_to_agent_coords(
object_info, env.controller.last_event.metadata["agent"]
)
relative_hand_state = world_coords_to_agent_coords(
hand_state, env.controller.last_event.metadata["agent"]
)
relative_distance = diff_position(relative_goal_obj, relative_hand_state)
result = state_dict_to_tensor(dict(position=relative_distance))
result = ((result ** 2).sum() ** 0.5).view(1)
return result
class PickedUpObjSensor(Sensor):
def __init__(self, uuid: str = "pickedup_object", **kwargs: Any):
observation_space = gym.spaces.Box(
low=0, high=1, shape=(1,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
return task.object_picked_up
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_sensors.py |
"""Utility classes and functions for calculating the arm relative and absolute
position."""
from typing import Dict
import numpy as np
import torch
from scipy.spatial.transform import Rotation as R
from allenact.utils.system import get_logger
def state_dict_to_tensor(state: Dict):
result = []
if "position" in state:
result += [
state["position"]["x"],
state["position"]["y"],
state["position"]["z"],
]
if "rotation" in state:
result += [
state["rotation"]["x"],
state["rotation"]["y"],
state["rotation"]["z"],
]
return torch.Tensor(result)
def diff_position(state_goal, state_curr, absolute: bool = True):
p1 = state_goal["position"]
p2 = state_curr["position"]
if absolute:
result = {k: abs(p1[k] - p2[k]) for k in p1.keys()}
else:
result = {k: (p1[k] - p2[k]) for k in p1.keys()}
return result
def coord_system_transform(position: Dict, coord_system: str):
assert coord_system in [
"xyz_unsigned",
"xyz_signed",
"polar_radian",
"polar_trigo",
]
if "xyz" in coord_system:
result = [
position["x"],
position["y"],
position["z"],
]
result = torch.Tensor(result)
if coord_system == "xyz_unsigned":
return torch.abs(result)
else: # xyz_signed
return result
else:
hxy = np.hypot(position["x"], position["y"])
r = np.hypot(hxy, position["z"])
el = np.arctan2(position["z"], hxy) # elevation angle: [-pi/2, pi/2]
az = np.arctan2(position["y"], position["x"]) # azimuthal angle: [-pi, pi]
if coord_system == "polar_radian":
result = [
r,
el / (0.5 * np.pi),
az / np.pi,
] # normalize to [-1, 1]
return torch.Tensor(result)
else: # polar_trigo
result = [
r,
np.cos(el),
np.sin(el),
np.cos(az),
np.sin(az),
]
return torch.Tensor(result)
def position_rotation_to_matrix(position, rotation):
result = np.zeros((4, 4))
r = R.from_euler("xyz", [rotation["x"], rotation["y"], rotation["z"]], degrees=True)
result[:3, :3] = r.as_matrix()
result[3, 3] = 1
result[:3, 3] = [position["x"], position["y"], position["z"]]
return result
def inverse_rot_trans_matrix(mat):
mat = np.linalg.inv(mat)
return mat
def matrix_to_position_rotation(matrix):
result = {"position": None, "rotation": None}
rotation = R.from_matrix(matrix[:3, :3]).as_euler("xyz", degrees=True)
rotation_dict = {"x": rotation[0], "y": rotation[1], "z": rotation[2]}
result["rotation"] = rotation_dict
position = matrix[:3, 3]
result["position"] = {"x": position[0], "y": position[1], "z": position[2]}
return result
def find_closest_inverse(deg, use_cache):
if use_cache:
for k in _saved_inverse_rotation_mats.keys():
if abs(k - deg) < 5:
return _saved_inverse_rotation_mats[k]
# if it reaches here it means it had not calculated the degree before
rotation = R.from_euler("xyz", [0, deg, 0], degrees=True)
result = rotation.as_matrix()
inverse = inverse_rot_trans_matrix(result)
if use_cache:
get_logger().warning(f"Had to calculate the matrix for {deg}")
return inverse
def calc_inverse(deg):
rotation = R.from_euler("xyz", [0, deg, 0], degrees=True)
result = rotation.as_matrix()
inverse = inverse_rot_trans_matrix(result)
return inverse
_saved_inverse_rotation_mats = {i: calc_inverse(i) for i in range(0, 360, 45)}
_saved_inverse_rotation_mats[360] = _saved_inverse_rotation_mats[0]
def world_coords_to_agent_coords(world_obj, agent_state, use_cache=True):
position = agent_state["position"]
rotation = agent_state["rotation"]
agent_translation = [position["x"], position["y"], position["z"]]
assert abs(rotation["x"]) < 0.01 and abs(rotation["z"]) < 0.01
inverse_agent_rotation = find_closest_inverse(rotation["y"], use_cache=use_cache)
obj_matrix = position_rotation_to_matrix(
world_obj["position"], world_obj["rotation"]
)
obj_translation = np.matmul(
inverse_agent_rotation, (obj_matrix[:3, 3] - agent_translation)
)
# add rotation later
obj_matrix[:3, 3] = obj_translation
result = matrix_to_position_rotation(obj_matrix)
return result
| allenact-main | allenact_plugins/manipulathor_plugin/arm_calculation_utils.py |
import ai2thor
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
get_agent_start_positions,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
ADDITIONAL_ARM_ARGS,
)
def make_all_objects_unbreakable(controller):
all_breakable_objects = [
o["objectType"]
for o in controller.last_event.metadata["objects"]
if o["breakable"] is True
]
all_breakable_objects = set(all_breakable_objects)
for obj_type in all_breakable_objects:
controller.step(action="MakeObjectsOfTypeUnbreakable", objectType=obj_type)
def reset_environment_and_additional_commands(controller, scene_name):
controller.reset(scene_name)
controller.step(action="MakeAllObjectsMoveable")
controller.step(action="MakeObjectsStaticKinematicMassThreshold")
make_all_objects_unbreakable(controller)
return
def transport_wrapper(controller, target_object, target_location):
transport_detail = dict(
action="PlaceObjectAtPoint",
objectId=target_object,
position=target_location,
forceKinematic=True,
)
advance_detail = dict(action="AdvancePhysicsStep", simSeconds=1.0)
if issubclass(type(controller), IThorEnvironment):
event = controller.step(transport_detail)
controller.step(advance_detail)
elif type(controller) == ai2thor.controller.Controller:
event = controller.step(**transport_detail)
controller.step(**advance_detail)
else:
raise NotImplementedError
return event
def initialize_arm(controller):
# for start arm from high up,
scene = controller.last_event.metadata["sceneName"]
initial_pose = get_agent_start_positions()[scene]
event1 = controller.step(
dict(
action="TeleportFull",
standing=True,
x=initial_pose["x"],
y=initial_pose["y"],
z=initial_pose["z"],
rotation=dict(x=0, y=initial_pose["rotation"], z=0),
horizon=initial_pose["horizon"],
)
)
event2 = controller.step(
dict(action="MoveArm", position=dict(x=0.0, y=0, z=0.35), **ADDITIONAL_ARM_ARGS)
)
event3 = controller.step(dict(action="MoveArmBase", y=0.8, **ADDITIONAL_ARM_ARGS))
return event1, event2, event3
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_utils.py |
"""A wrapper for engaging with the ManipulaTHOR environment."""
import copy
import math
import warnings
from typing import Dict, Union, Any, Optional, cast
import ai2thor.server
import numpy as np
from ai2thor.controller import Controller
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.ithor_plugin.ithor_constants import VISIBILITY_DISTANCE, FOV
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
MOVE_ARM_HEIGHT_CONSTANT,
MOVE_ARM_CONSTANT,
UNWANTED_MOVE_THR,
DISTANCE_MAX,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
ADDITIONAL_ARM_ARGS,
ARM_MIN_HEIGHT,
ARM_MAX_HEIGHT,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
LOOK_UP,
LOOK_DOWN,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
)
def position_distance(s1, s2, filter_nan: bool = False):
position1 = s1["position"]
position2 = s2["position"]
dist = (
(position1["x"] - position2["x"]) ** 2
+ (position1["y"] - position2["y"]) ** 2
+ (position1["z"] - position2["z"]) ** 2
) ** 0.5
if filter_nan:
dist = DISTANCE_MAX if math.isnan(dist) or dist > DISTANCE_MAX else dist
return dist
def rotation_distance(s1: Dict[str, Dict[str, float]], s2: Dict[str, Dict[str, float]]):
"""Distance between rotations."""
rotation1 = s1["rotation"]
rotation2 = s2["rotation"]
def deg_dist(d0: float, d1: float):
dist = (d0 - d1) % 360
return min(dist, 360 - dist)
return sum(deg_dist(rotation1[k], rotation2[k]) for k in ["x", "y", "z"])
class ManipulaTHOREnvironment(IThorEnvironment):
"""Wrapper for the manipulathor controller providing arm functionality and
bookkeeping.
See [here](https://ai2thor.allenai.org/documentation/installation) for comprehensive
documentation on AI2-THOR.
# Attributes
controller : The ai2thor controller.
"""
def __init__(
self,
x_display: Optional[str] = None,
docker_enabled: bool = False,
local_thor_build: Optional[str] = None,
visibility_distance: float = VISIBILITY_DISTANCE,
fov: float = FOV,
player_screen_width: int = 224,
player_screen_height: int = 224,
quality: str = "Very Low",
restrict_to_initially_reachable_points: bool = False,
make_agents_visible: bool = True,
object_open_speed: float = 1.0,
simplify_physics: bool = False,
verbose: bool = False,
env_args=None,
) -> None:
"""Initializer.
# Parameters
x_display : The x display into which to launch ai2thor (possibly necessarily if you are running on a server
without an attached display).
docker_enabled : Whether or not to run thor in a docker container (useful on a server without an attached
display so that you don't have to start an x display).
local_thor_build : The path to a local build of ai2thor. This is probably not necessary for your use case
and can be safely ignored.
visibility_distance : The distance (in meters) at which objects, in the viewport of the agent,
are considered visible by ai2thor and will have their "visible" flag be set to `True` in the metadata.
fov : The agent's camera's field of view.
width : The width resolution (in pixels) of the images returned by ai2thor.
height : The height resolution (in pixels) of the images returned by ai2thor.
quality : The quality at which to render. Possible quality settings can be found in
`ai2thor._quality_settings.QUALITY_SETTINGS`.
restrict_to_initially_reachable_points : Whether or not to restrict the agent to locations in ai2thor
that were found to be (initially) reachable by the agent (i.e. reachable by the agent after resetting
the scene). This can be useful if you want to ensure there are only a fixed set of locations where the
agent can go.
make_agents_visible : Whether or not the agent should be visible. Most noticable when there are multiple agents
or when quality settings are high so that the agent casts a shadow.
object_open_speed : How quickly objects should be opened. High speeds mean faster simulation but also mean
that opening objects have a lot of kinetic energy and can, possibly, knock other objects away.
simplify_physics : Whether or not to simplify physics when applicable. Currently this only simplies object
interactions when opening drawers (when simplified, objects within a drawer do not slide around on
their own when the drawer is opened or closed, instead they are effectively glued down).
"""
self._verbose = verbose
self.env_args = env_args
del verbose
del env_args
super(ManipulaTHOREnvironment, self).__init__(
**prepare_locals_for_super(locals())
)
def create_controller(self):
controller = Controller(**self.env_args)
return controller
def start(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
) -> None:
"""Starts the ai2thor controller if it was previously stopped.
After starting, `reset` will be called with the scene name and move magnitude.
# Parameters
scene_name : The scene to load.
move_mag : The amount of distance the agent moves in a single `MoveAhead` step.
kwargs : additional kwargs, passed to reset.
"""
if self._started:
raise RuntimeError(
"Trying to start the environment but it is already started."
)
self.controller = self.create_controller()
self._started = True
self.reset(scene_name=scene_name, move_mag=move_mag, **kwargs)
def reset(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
):
self._move_mag = move_mag
self._grid_size = self._move_mag
if scene_name is None:
scene_name = self.controller.last_event.metadata["sceneName"]
# self.reset_init_params()#**kwargs) removing this fixes one of the crashing problem
# to solve the crash issue
# TODO why do we still have this crashing problem?
try:
reset_environment_and_additional_commands(self.controller, scene_name)
except Exception as e:
print("RESETTING THE SCENE,", scene_name, "because of", str(e))
self.controller = ai2thor.controller.Controller(**self.env_args)
reset_environment_and_additional_commands(self.controller, scene_name)
if self.object_open_speed != 1.0:
self.controller.step(
{"action": "ChangeOpenSpeed", "x": self.object_open_speed}
)
self._initially_reachable_points = None
self._initially_reachable_points_set = None
self.controller.step({"action": "GetReachablePositions"})
if not self.controller.last_event.metadata["lastActionSuccess"]:
warnings.warn(
"Error when getting reachable points: {}".format(
self.controller.last_event.metadata["errorMessage"]
)
)
self._initially_reachable_points = self.last_action_return
self.list_of_actions_so_far = []
def randomize_agent_location(
self, seed: int = None, partial_position: Optional[Dict[str, float]] = None
) -> Dict:
raise NotImplementedError
def is_object_at_low_level_hand(self, object_id):
current_objects_in_hand = self.controller.last_event.metadata["arm"][
"heldObjects"
]
return object_id in current_objects_in_hand
def object_in_hand(self):
"""Object metadata for the object in the agent's hand."""
inv_objs = self.last_event.metadata["inventoryObjects"]
if len(inv_objs) == 0:
return None
elif len(inv_objs) == 1:
return self.get_object_by_id(
self.last_event.metadata["inventoryObjects"][0]["objectId"]
)
else:
raise AttributeError("Must be <= 1 inventory objects.")
@classmethod
def correct_nan_inf(cls, flawed_dict, extra_tag=""):
corrected_dict = copy.deepcopy(flawed_dict)
for (k, v) in corrected_dict.items():
if math.isnan(v) or math.isinf(v):
corrected_dict[k] = 0
return corrected_dict
def get_object_by_id(self, object_id: str) -> Optional[Dict[str, Any]]:
for o in self.last_event.metadata["objects"]:
if o["objectId"] == object_id:
o["position"] = self.correct_nan_inf(o["position"], "obj id")
return o
return None
def get_current_arm_state(self):
h_min = ARM_MIN_HEIGHT
h_max = ARM_MAX_HEIGHT
agent_base_location = 0.9009995460510254
event = self.controller.last_event
offset = event.metadata["agent"]["position"]["y"] - agent_base_location
h_max += offset
h_min += offset
joints = event.metadata["arm"]["joints"]
arm = joints[-1]
assert arm["name"] == "robot_arm_4_jnt"
xyz_dict = copy.deepcopy(arm["rootRelativePosition"])
height_arm = joints[0]["position"]["y"]
xyz_dict["h"] = (height_arm - h_min) / (h_max - h_min)
xyz_dict = self.correct_nan_inf(xyz_dict, "realtive hand")
return xyz_dict
def get_absolute_hand_state(self):
event = self.controller.last_event
joints = event.metadata["arm"]["joints"]
arm = copy.deepcopy(joints[-1])
assert arm["name"] == "robot_arm_4_jnt"
xyz_dict = arm["position"]
xyz_dict = self.correct_nan_inf(xyz_dict, "absolute hand")
return dict(position=xyz_dict, rotation={"x": 0, "y": 0, "z": 0})
def get_pickupable_objects(self):
event = self.controller.last_event
object_list = event.metadata["arm"]["pickupableObjects"]
return object_list
def get_current_object_locations(self):
obj_loc_dict = {}
metadata = self.controller.last_event.metadata["objects"]
for o in metadata:
obj_loc_dict[o["objectId"]] = dict(
position=o["position"], rotation=o["rotation"], visible=o["visible"],
)
return copy.deepcopy(obj_loc_dict)
def close_enough(self, current_obj_pose, init_obj_pose, threshold):
position_close = [
abs(current_obj_pose["position"][k] - init_obj_pose["position"][k])
<= threshold
for k in ["x", "y", "z"]
]
position_is_close = sum(position_close) == 3
return position_is_close
def get_objects_moved(
self,
previous_object_locations,
current_object_locations,
target_object_id,
thres_dict: Optional[Dict] = None,
):
moved_objects = []
scene_id = self.scene_name.split("_")[0]
for object_id in current_object_locations.keys():
if object_id == target_object_id:
continue
if object_id not in previous_object_locations:
continue
threshold = UNWANTED_MOVE_THR
if thres_dict is not None:
threshold = max(threshold, thres_dict[scene_id + "-" + object_id])
if not self.close_enough(
current_object_locations[object_id],
previous_object_locations[object_id],
threshold=threshold,
):
moved_objects.append(object_id)
return moved_objects
def get_objects_move_distance(
self,
initial_object_locations,
previous_object_locations,
current_object_locations,
target_object_id,
only_visible: bool = False,
thres_dict: Optional[Dict] = None,
):
moved_objects_position_distance = {}
scene_id = self.scene_name.split("_")[0]
for object_id in current_object_locations.keys():
if object_id == target_object_id:
continue
if object_id not in previous_object_locations:
continue
if only_visible:
# current is visible
if not current_object_locations[object_id]["visible"]:
continue
p_initial2current = position_distance(
current_object_locations[object_id],
initial_object_locations[object_id],
filter_nan=True,
)
p_initial2previous = position_distance(
previous_object_locations[object_id],
initial_object_locations[object_id],
filter_nan=True,
)
threshold = 0.0
if thres_dict is not None:
threshold = max(threshold, thres_dict[scene_id + "-" + object_id])
p_initial2current = max(0.0, p_initial2current - threshold)
p_initial2previous = max(0.0, p_initial2previous - threshold)
moved_objects_position_distance[object_id] = (
p_initial2current - p_initial2previous
)
return sum(moved_objects_position_distance.values())
def step(
self, action_dict: Dict[str, Union[str, int, float]]
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
action = cast(str, action_dict["action"])
skip_render = "renderImage" in action_dict and not action_dict["renderImage"]
last_frame: Optional[np.ndarray] = None
if skip_render:
last_frame = self.current_frame
if self.simplify_physics:
action_dict["simplifyPhysics"] = True
if action in [PICKUP, DONE]:
if action == PICKUP:
object_id = action_dict["object_id"]
if not self.is_object_at_low_level_hand(object_id):
pickupable_objects = self.get_pickupable_objects()
#
if object_id in pickupable_objects:
# This version of the task is actually harder # consider making it easier, are we penalizing failed pickup? yes
self.step(dict(action="PickupObject"))
# we are doing an additional pass here, label is not right and if we fail we will do it twice
object_inventory = self.controller.last_event.metadata["arm"][
"heldObjects"
]
if (
len(object_inventory) > 0
and object_id not in object_inventory
):
self.step(dict(action="ReleaseObject"))
action_dict = {"action": "Pass"}
elif action in [MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT]:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
if action in [MOVE_AHEAD]:
action_dict["action"] = "MoveAgent"
action_dict["ahead"] = 0.2
elif action in [ROTATE_RIGHT]:
action_dict["action"] = "RotateAgent"
action_dict["degrees"] = 45
elif action in [ROTATE_LEFT]:
action_dict["action"] = "RotateAgent"
action_dict["degrees"] = -45
elif "MoveArm" in action:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
base_position = self.get_current_arm_state()
if "MoveArmHeight" in action:
action_dict["action"] = "MoveArmBase"
if action == "MoveArmHeightP":
base_position["h"] += MOVE_ARM_HEIGHT_CONSTANT
if action == "MoveArmHeightM":
base_position[
"h"
] -= MOVE_ARM_HEIGHT_CONSTANT # height is pretty big!
action_dict["y"] = base_position["h"]
else:
action_dict["action"] = "MoveArm"
if action == "MoveArmXP":
base_position["x"] += MOVE_ARM_CONSTANT
elif action == "MoveArmXM":
base_position["x"] -= MOVE_ARM_CONSTANT
elif action == "MoveArmYP":
base_position["y"] += MOVE_ARM_CONSTANT
elif action == "MoveArmYM":
base_position["y"] -= MOVE_ARM_CONSTANT
elif action == "MoveArmZP":
base_position["z"] += MOVE_ARM_CONSTANT
elif action == "MoveArmZM":
base_position["z"] -= MOVE_ARM_CONSTANT
action_dict["position"] = {
k: v for (k, v) in base_position.items() if k in ["x", "y", "z"]
}
elif "RotateArm" in action:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
if action == ROTATE_WRIST_PITCH_P:
action_dict["action"] = "RotateWristRelative"
action_dict["pitch"] = 15
elif action == ROTATE_WRIST_PITCH_M:
action_dict["action"] = "RotateWristRelative"
action_dict["pitch"] = -15
elif action == ROTATE_WRIST_YAW_P:
action_dict["action"] = "RotateWristRelative"
action_dict["yaw"] = 15
elif action == ROTATE_WRIST_YAW_M:
action_dict["action"] = "RotateWristRelative"
action_dict["yaw"] = -15
elif action == ROTATE_ELBOW_P:
action_dict["action"] = "RotateElbowRelative"
action_dict["degrees"] = 15
elif action == ROTATE_ELBOW_M:
action_dict["action"] = "RotateElbowRelative"
action_dict["degrees"] = -15
else:
raise ValueError("invalid action " + str(action))
elif action in [LOOK_UP, LOOK_DOWN]:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
if action == LOOK_UP:
action_dict["action"] = LOOK_UP
elif action == LOOK_DOWN:
action_dict["action"] = LOOK_DOWN
# there exists other actions e.g. "PlaceObjectAtPoint"
sr = self.controller.step(action_dict)
self.list_of_actions_so_far.append(action_dict)
if self._verbose:
print(self.controller.last_event)
if self.restrict_to_initially_reachable_points:
self._snap_agent_to_initially_reachable()
if skip_render:
assert last_frame is not None
self.last_event.frame = last_frame
return sr
| allenact-main | allenact_plugins/manipulathor_plugin/manipulathor_environment.py |
from typing import Optional
import gym
import numpy as np
class GymEnvironment(gym.Wrapper):
"""gym.Wrapper with minimal bookkeeping (initial observation)."""
def __init__(self, gym_env_name: str):
super().__init__(gym.make(gym_env_name))
self._initial_observation: Optional[np.ndarray] = None
self.reset() # generate initial observation
def reset(self) -> np.ndarray:
self._initial_observation = self.env.reset()
return self._initial_observation
@property
def initial_observation(self) -> np.ndarray:
assert (
self._initial_observation is not None
), "Attempted to read initial_observation without calling reset()"
res = self._initial_observation
self._initial_observation = None
return res
| allenact-main | allenact_plugins/gym_plugin/gym_environment.py |
allenact-main | allenact_plugins/gym_plugin/__init__.py |
|
from typing import Optional, Any
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task, SubTaskType
from allenact_plugins.gym_plugin.gym_environment import GymEnvironment
class GymBox2DSensor(Sensor[gym.Env, Task[gym.Env]]):
"""Wrapper for gym Box2D tasks' observations."""
def __init__(
self,
gym_env_name: str = "LunarLanderContinuous-v2",
uuid: str = "gym_box2d_sensor",
**kwargs: Any
):
self.gym_env_name = gym_env_name
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
if self.gym_env_name in ["LunarLanderContinuous-v2", "LunarLander-v2"]:
return gym.spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32)
elif self.gym_env_name in ["BipedalWalker-v2", "BipedalWalkerHardcore-v2"]:
high = np.array([np.inf] * 24)
return gym.spaces.Box(-high, high, dtype=np.float32)
elif self.gym_env_name == "CarRacing-v0":
state_w, state_h = 96, 96
return gym.spaces.Box(
low=0, high=255, shape=(state_h, state_w, 3), dtype=np.uint8
)
raise NotImplementedError()
def get_observation(
self,
env: GymEnvironment,
task: Optional[SubTaskType],
*args,
gym_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> np.ndarray:
if gym_obs is not None:
return gym_obs
else:
return env.initial_observation
class GymMuJoCoSensor(Sensor[gym.Env, Task[gym.Env]]):
"""Wrapper for gym MuJoCo and Robotics tasks observations."""
def __init__(self, gym_env_name: str, uuid: str, **kwargs: Any):
self.gym_env_name = gym_env_name
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
# observation space for gym MoJoCo
if self.gym_env_name == "InvertedPendulum-v2":
return gym.spaces.Box(-np.inf, np.inf, shape=(4,), dtype="float32")
elif self.gym_env_name == "Ant-v2":
return gym.spaces.Box(-np.inf, np.inf, shape=(111,), dtype="float32")
elif self.gym_env_name in ["Reacher-v2", "Hopper-v2"]:
return gym.spaces.Box(-np.inf, np.inf, shape=(11,), dtype="float32")
elif self.gym_env_name == "InvertedDoublePendulum-v2":
return gym.spaces.Box(-np.inf, np.inf, (11,), "float32")
elif self.gym_env_name in ["HumanoidStandup-v2", "Humanoid-v2"]:
return gym.spaces.Box(-np.inf, np.inf, (376,), "float32")
elif self.gym_env_name in ["HalfCheetah-v2", "Walker2d-v2"]:
return gym.spaces.Box(-np.inf, np.inf, (17,), "float32")
elif self.gym_env_name == "Swimmer-v2":
return gym.spaces.Box(-np.inf, np.inf, (8,), "float32")
# TODO observation space for gym Robotics
elif self.gym_env_name == "HandManipulateBlock-v0":
return gym.spaces.Dict(
dict(
desired_goal=gym.spaces.Box(
-np.inf, np.inf, shape=(7,), dtype="float32"
),
achieved_goal=gym.spaces.Box(
-np.inf, np.inf, shape=(7,), dtype="float32"
),
observation=gym.spaces.Box(
-np.inf, np.inf, shape=(61,), dtype="float32"
),
)
)
else:
raise NotImplementedError
def get_observation(
self,
env: GymEnvironment,
task: Optional[SubTaskType],
*args,
gym_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> np.ndarray:
if gym_obs is not None:
return np.array(gym_obs, dtype=np.float32) # coerce to be float32
else:
return np.array(env.initial_observation, dtype=np.float32)
| allenact-main | allenact_plugins/gym_plugin/gym_sensors.py |
import torch
from allenact.base_abstractions.distributions import Distr
class GaussianDistr(torch.distributions.Normal, Distr):
"""PyTorch's Normal distribution with a `mode` method."""
def mode(self) -> torch.FloatTensor:
return super().mean
| allenact-main | allenact_plugins/gym_plugin/gym_distributions.py |
from typing import Dict, Union, Optional, Tuple, Any, Sequence, cast
import gym
import torch
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
DistributionType,
)
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact_plugins.gym_plugin.gym_distributions import GaussianDistr
class MemorylessActorCritic(ActorCriticModel[GaussianDistr]):
"""ActorCriticModel for gym tasks with continuous control in the range [-1,
1]."""
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Box,
observation_space: gym.spaces.Dict,
action_std: float = 0.5,
mlp_hidden_dims: Sequence[int] = (64, 32),
):
super().__init__(action_space, observation_space)
self.input_uuid = input_uuid
assert len(observation_space[self.input_uuid].shape) == 1
state_dim = observation_space[self.input_uuid].shape[0]
assert len(action_space.shape) == 1
action_dim = action_space.shape[0]
mlp_hidden_dims = (state_dim,) + tuple(mlp_hidden_dims)
# action mean range -1 to 1
self.actor = nn.Sequential(
*self.make_mlp_hidden(nn.Tanh, *mlp_hidden_dims),
nn.Linear(32, action_dim),
nn.Tanh(),
)
# critic
self.critic = nn.Sequential(
*self.make_mlp_hidden(nn.Tanh, *mlp_hidden_dims), nn.Linear(32, 1),
)
# maximum standard deviation
self.register_buffer(
"action_std",
torch.tensor([action_std] * action_dim).view(1, 1, -1),
persistent=False,
)
@staticmethod
def make_mlp_hidden(nl, *dims):
res = []
for it, dim in enumerate(dims[:-1]):
res.append(nn.Linear(dim, dims[it + 1]),)
res.append(nl())
return res
def _recurrent_memory_specification(self):
return None
def forward( # type:ignore
self,
observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]],
memory: Memory,
prev_actions: Any,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
means = self.actor(observations[self.input_uuid])
values = self.critic(observations[self.input_uuid])
return (
ActorCriticOutput(
cast(DistributionType, GaussianDistr(loc=means, scale=self.action_std)),
values,
{},
),
None, # no Memory
)
| allenact-main | allenact_plugins/gym_plugin/gym_models.py |
import random
from typing import Any, List, Dict, Optional, Union, Callable, Sequence, Tuple
import gym
import numpy as np
from gym.utils import seeding
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.experiment_utils import set_seed
from allenact.utils.system import get_logger
from allenact_plugins.gym_plugin.gym_environment import GymEnvironment
from allenact_plugins.gym_plugin.gym_sensors import GymBox2DSensor, GymMuJoCoSensor
class GymTask(Task[gym.Env]):
"""Abstract gym task.
Subclasses need to implement `class_action_names` and `_step`.
"""
def __init__(
self,
env: GymEnvironment,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
**kwargs,
):
max_steps = env.spec.max_episode_steps
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._gym_done = False
self.task_name: str = self.env.spec.id
@property
def action_space(self) -> gym.spaces.Space:
return self.env.action_space
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
if mode == "rgb":
mode = "rgb_array"
return self.env.render(mode=mode)
def get_observations(
self, *args, gym_obs: Optional[Dict[str, Any]] = None, **kwargs
) -> Any:
return self.sensor_suite.get_observations(
env=self.env, task=self, gym_obs=gym_obs
)
def reached_terminal_state(self) -> bool:
return self._gym_done
def close(self) -> None:
pass
def metrics(self) -> Dict[str, Any]:
# noinspection PyUnresolvedReferences,PyCallingNonCallable
env_metrics = self.env.metrics() if hasattr(self.env, "metrics") else {}
return {
**super().metrics(),
**{k: float(v) for k, v in env_metrics.items()},
"success": int(
self.env.was_successful
if hasattr(self.env, "was_successful")
else self.cumulative_reward > 0
),
}
class GymContinuousTask(GymTask):
"""Task for a continuous-control gym Box2D & MuJoCo Env; it allows
interfacing allenact with gym tasks."""
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return tuple()
def _step(self, action: Sequence[float]) -> RLStepResult:
action = np.array(action)
gym_obs, reward, self._gym_done, info = self.env.step(action=action)
return RLStepResult(
observation=self.get_observations(gym_obs=gym_obs),
reward=reward,
done=self.is_done(),
info=info,
)
def default_task_selector(env_name: str) -> type:
"""Helper function for `GymTaskSampler`."""
if env_name in [
# Box2d Env
"CarRacing-v0",
"LunarLanderContinuous-v2",
"BipedalWalker-v2",
"BipedalWalkerHardcore-v2",
# MuJoCo Env
"InvertedPendulum-v2",
"Ant-v2",
"InvertedDoublePendulum-v2",
"Humanoid-v2",
"Reacher-v2",
"Hopper-v2",
"HalfCheetah-v2",
"Swimmer-v2",
"Walker2d-v2",
]:
return GymContinuousTask
raise NotImplementedError()
def sensor_selector(env_name: str) -> Sensor:
"""Helper function for `GymTaskSampler`."""
if env_name in [
"CarRacing-v0",
"LunarLanderContinuous-v2",
"BipedalWalker-v2",
"BipedalWalkerHardcore-v2",
"LunarLander-v2",
]:
return GymBox2DSensor(env_name)
elif env_name in [
"InvertedPendulum-v2",
"Ant-v2",
"InvertedDoublePendulum-v2",
"Humanoid-v2",
"Reacher-v2",
"Hopper-v2",
"HalfCheetah-v2",
"Swimmer-v2",
"Walker2d-v2",
]:
return GymMuJoCoSensor(gym_env_name=env_name, uuid="gym_mujoco_data")
raise NotImplementedError()
class GymTaskSampler(TaskSampler):
"""TaskSampler for gym environments."""
def __init__(
self,
gym_env_type: str = "LunarLanderContinuous-v2",
sensors: Optional[Union[SensorSuite, List[Sensor]]] = None,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
task_selector: Callable[[str], type] = default_task_selector,
repeat_failed_task_for_min_steps: int = 0,
extra_task_kwargs: Optional[Dict] = None,
seed: Optional[int] = None,
**kwargs,
):
super().__init__()
self.gym_env_type = gym_env_type
self.sensors: SensorSuite
if sensors is None:
self.sensors = SensorSuite([sensor_selector(self.gym_env_type)])
else:
self.sensors = (
SensorSuite(sensors)
if not isinstance(sensors, SensorSuite)
else sensors
)
self.max_tasks = max_tasks
self.num_unique_seeds = num_unique_seeds
self.deterministic_sampling = deterministic_sampling
self.repeat_failed_task_for_min_steps = repeat_failed_task_for_min_steps
self.extra_task_kwargs = (
extra_task_kwargs if extra_task_kwargs is not None else {}
)
self._last_env_seed: Optional[int] = None
self._last_task: Optional[GymTask] = None
self._number_of_steps_taken_with_task_seed = 0
assert (not deterministic_sampling) or repeat_failed_task_for_min_steps <= 0, (
"If `deterministic_sampling` is True then we require"
" `repeat_failed_task_for_min_steps <= 0`"
)
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
if num_unique_seeds is not None and repeat_failed_task_for_min_steps > 0:
raise NotImplementedError(
"`repeat_failed_task_for_min_steps` must be <=0 if number"
" of unique seeds is not None."
)
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
if seed is not None:
self.set_seed(seed)
else:
self.np_seeded_random_gen, _ = seeding.np_random(
random.randint(0, 2 ** 31 - 1)
)
self.num_tasks_generated = 0
self.task_type = task_selector(self.gym_env_type)
self.env: GymEnvironment = GymEnvironment(self.gym_env_type)
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None if self.num_unique_seeds is None else self.num_unique_seeds
@property
def last_sampled_task(self) -> Optional[Task]:
raise NotImplementedError
def next_task(self, force_advance_scene: bool = False) -> Optional[GymTask]:
if self.length <= 0:
return None
repeating = False
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
self._last_env_seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
self._last_env_seed = self.np_seeded_random_gen.choice(
self.task_seeds_list
)
else:
if self._last_task is not None:
self._number_of_steps_taken_with_task_seed += (
self._last_task.num_steps_taken()
)
if (
self._last_env_seed is not None
and self._number_of_steps_taken_with_task_seed
< self.repeat_failed_task_for_min_steps
and self._last_task.cumulative_reward == 0
):
repeating = True
else:
self._number_of_steps_taken_with_task_seed = 0
self._last_env_seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
task_has_same_seed_reset = hasattr(self.env, "same_seed_reset")
if repeating and task_has_same_seed_reset:
# noinspection PyUnresolvedReferences
self.env.same_seed_reset()
else:
self.env.seed(self._last_env_seed)
self.env.saved_seed = self._last_env_seed
self.env.reset()
self.num_tasks_generated += 1
task_info = {"id": "random%d" % random.randint(0, 2 ** 63 - 1)}
self._last_task = self.task_type(
**dict(env=self.env, sensors=self.sensors, task_info=task_info),
**self.extra_task_kwargs,
)
return self._last_task
def close(self) -> None:
self.env.close()
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.env.reset()
def set_seed(self, seed: int) -> None:
self.np_seeded_random_gen, _ = seeding.np_random(seed)
if seed is not None:
set_seed(seed)
| allenact-main | allenact_plugins/gym_plugin/gym_tasks.py |
allenact-main | allenact_plugins/navigation_plugin/__init__.py |
|
"""Baseline models for use in the point navigation task.
Object navigation is currently available as a Task in AI2-THOR and
Facebook's Habitat.
"""
from typing import Optional, List, Union, Sequence
import gym
import torch
import torch.nn as nn
from gym.spaces import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.embodiedai.models import resnet as resnet
from allenact.embodiedai.models.basic_models import SimpleCNN
from allenact.embodiedai.models.visual_nav_models import (
VisualNavActorCritic,
FusionType,
)
class PointNavActorCritic(VisualNavActorCritic):
"""Use raw image as observation to the agent."""
def __init__(
# base params
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=False,
add_prev_action_null_token=False,
action_embed_size=4,
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[Sequence[str]] = None,
# custom params
rgb_uuid: Optional[str] = None,
depth_uuid: Optional[str] = None,
embed_coordinates=False,
coordinate_embedding_dim=8,
coordinate_dims=2,
# perception backbone params,
backbone="gnresnet18",
resnet_baseplanes=32,
):
super().__init__(
action_space=action_space,
observation_space=observation_space,
hidden_size=hidden_size,
multiple_beliefs=multiple_beliefs,
beliefs_fusion=beliefs_fusion,
auxiliary_uuids=auxiliary_uuids,
)
self.goal_sensor_uuid = goal_sensor_uuid
self.embed_coordinates = embed_coordinates
if self.embed_coordinates:
self.coordinate_embedding_size = coordinate_embedding_dim
else:
self.coordinate_embedding_size = coordinate_dims
self.sensor_fusion = False
if rgb_uuid is not None and depth_uuid is not None:
self.sensor_fuser = nn.Linear(hidden_size * 2, hidden_size)
self.sensor_fusion = True
self.backbone = backbone
if backbone == "simple_cnn":
self.visual_encoder = SimpleCNN(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
)
else: # resnet family
self.visual_encoder = resnet.GroupNormResNetEncoder(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
)
if self.embed_coordinates:
self.coordinate_embedding = nn.Linear(
coordinate_dims, coordinate_embedding_dim
)
self.create_state_encoders(
obs_embed_size=self.goal_visual_encoder_output_dims,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
add_prev_actions=add_prev_actions,
add_prev_action_null_token=add_prev_action_null_token,
prev_action_embed_size=action_embed_size,
)
self.create_actorcritic_head()
self.create_aux_models(
obs_embed_size=self.goal_visual_encoder_output_dims,
action_embed_size=action_embed_size,
)
self.train()
@property
def is_blind(self):
return self.visual_encoder.is_blind
@property
def goal_visual_encoder_output_dims(self):
dims = self.coordinate_embedding_size
if self.is_blind:
return dims
return dims + self.recurrent_hidden_state_size
def get_target_coordinates_encoding(self, observations):
if self.embed_coordinates:
return self.coordinate_embedding(
observations[self.goal_sensor_uuid].to(torch.float32)
)
else:
return observations[self.goal_sensor_uuid].to(torch.float32)
def forward_encoder(self, observations: ObservationType) -> torch.FloatTensor:
target_encoding = self.get_target_coordinates_encoding(observations)
obs_embeds: Union[torch.Tensor, List[torch.Tensor]]
obs_embeds = [target_encoding]
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
if self.sensor_fusion:
perception_embed = self.sensor_fuser(perception_embed)
obs_embeds = [perception_embed] + obs_embeds
obs_embeds = torch.cat(obs_embeds, dim=-1)
return obs_embeds
| allenact-main | allenact_plugins/navigation_plugin/pointnav/models.py |
allenact-main | allenact_plugins/navigation_plugin/pointnav/__init__.py |
|
"""Baseline models for use in the object navigation task.
Object navigation is currently available as a Task in AI2-THOR and
Facebook's Habitat.
"""
from typing import Optional, List, Dict, cast, Tuple, Sequence
import gym
import torch
import torch.nn as nn
from gym.spaces import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.embodiedai.models import resnet as resnet
from allenact.embodiedai.models.basic_models import SimpleCNN
from allenact.embodiedai.models.visual_nav_models import (
VisualNavActorCritic,
FusionType,
)
class CatObservations(nn.Module):
def __init__(self, ordered_uuids: Sequence[str], dim: int):
super().__init__()
assert len(ordered_uuids) != 0
self.ordered_uuids = ordered_uuids
self.dim = dim
def forward(self, observations: ObservationType):
if len(self.ordered_uuids) == 1:
return observations[self.ordered_uuids[0]]
return torch.cat(
[observations[uuid] for uuid in self.ordered_uuids], dim=self.dim
)
class ObjectNavActorCritic(VisualNavActorCritic):
"""Baseline recurrent actor critic model for object-navigation.
# Attributes
action_space : The space of actions available to the agent. Currently only discrete
actions are allowed (so this space will always be of type `gym.spaces.Discrete`).
observation_space : The observation space expected by the agent. This observation space
should include (optionally) 'rgb' images and 'depth' images and is required to
have a component corresponding to the goal `goal_sensor_uuid`.
goal_sensor_uuid : The uuid of the sensor of the goal object. See `GoalObjectTypeThorSensor`
as an example of such a sensor.
hidden_size : The hidden size of the GRU RNN.
object_type_embedding_dim: The dimensionality of the embedding corresponding to the goal
object type.
"""
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
# RNN
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=False,
add_prev_action_null_token=False,
action_embed_size=6,
# Aux loss
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[Sequence[str]] = None,
# below are custom params
rgb_uuid: Optional[str] = None,
depth_uuid: Optional[str] = None,
object_type_embedding_dim=8,
trainable_masked_hidden_state: bool = False,
# perception backbone params,
backbone="gnresnet18",
resnet_baseplanes=32,
):
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
action_space=action_space,
observation_space=observation_space,
hidden_size=hidden_size,
multiple_beliefs=multiple_beliefs,
beliefs_fusion=beliefs_fusion,
auxiliary_uuids=auxiliary_uuids,
)
self.rgb_uuid = rgb_uuid
self.depth_uuid = depth_uuid
self.goal_sensor_uuid = goal_sensor_uuid
self._n_object_types = self.observation_space.spaces[self.goal_sensor_uuid].n
self.object_type_embedding_size = object_type_embedding_dim
self.backbone = backbone
if backbone == "simple_cnn":
self.visual_encoder = SimpleCNN(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
)
self.visual_encoder_output_size = hidden_size
assert self.is_blind == self.visual_encoder.is_blind
elif backbone == "gnresnet18": # resnet family
self.visual_encoder = resnet.GroupNormResNetEncoder(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
)
self.visual_encoder_output_size = hidden_size
assert self.is_blind == self.visual_encoder.is_blind
elif backbone in ["identity", "projection"]:
good_uuids = [
uuid for uuid in [self.rgb_uuid, self.depth_uuid] if uuid is not None
]
cat_model = CatObservations(ordered_uuids=good_uuids, dim=-1,)
after_cat_size = sum(
observation_space[uuid].shape[-1] for uuid in good_uuids
)
if backbone == "identity":
self.visual_encoder = cat_model
self.visual_encoder_output_size = after_cat_size
else:
self.visual_encoder = nn.Sequential(
cat_model, nn.Linear(after_cat_size, hidden_size), nn.ReLU(True)
)
self.visual_encoder_output_size = hidden_size
else:
raise NotImplementedError
self.create_state_encoders(
obs_embed_size=self.goal_visual_encoder_output_dims,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
add_prev_actions=add_prev_actions,
add_prev_action_null_token=add_prev_action_null_token,
prev_action_embed_size=action_embed_size,
trainable_masked_hidden_state=trainable_masked_hidden_state,
)
self.create_actorcritic_head()
self.create_aux_models(
obs_embed_size=self.goal_visual_encoder_output_dims,
action_embed_size=action_embed_size,
)
self.object_type_embedding = nn.Embedding(
num_embeddings=self._n_object_types,
embedding_dim=object_type_embedding_dim,
)
self.train()
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.rgb_uuid is None and self.depth_uuid is None
@property
def goal_visual_encoder_output_dims(self):
dims = self.object_type_embedding_size
if self.is_blind:
return dims
return dims + self.visual_encoder_output_size
def get_object_type_encoding(
self, observations: Dict[str, torch.Tensor]
) -> torch.Tensor:
"""Get the object type encoding from input batched observations."""
# noinspection PyTypeChecker
return self.object_type_embedding( # type:ignore
observations[self.goal_sensor_uuid].to(torch.int64)
)
def forward_encoder(self, observations: ObservationType) -> torch.Tensor:
target_encoding = self.get_object_type_encoding(
cast(Dict[str, torch.Tensor], observations)
)
obs_embeds = [target_encoding]
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
obs_embeds = [perception_embed] + obs_embeds
obs_embeds = torch.cat(obs_embeds, dim=-1)
return obs_embeds
class ResnetTensorNavActorCritic(VisualNavActorCritic):
def __init__(
# base params
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=False,
add_prev_action_null_token=False,
action_embed_size=6,
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[List[str]] = None,
# custom params
rgb_resnet_preprocessor_uuid: Optional[str] = None,
depth_resnet_preprocessor_uuid: Optional[str] = None,
goal_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
**kwargs,
):
super().__init__(
action_space=action_space,
observation_space=observation_space,
hidden_size=hidden_size,
multiple_beliefs=multiple_beliefs,
beliefs_fusion=beliefs_fusion,
auxiliary_uuids=auxiliary_uuids,
**kwargs,
)
if (
rgb_resnet_preprocessor_uuid is None
or depth_resnet_preprocessor_uuid is None
):
resnet_preprocessor_uuid = (
rgb_resnet_preprocessor_uuid
if rgb_resnet_preprocessor_uuid is not None
else depth_resnet_preprocessor_uuid
)
self.goal_visual_encoder = ResnetTensorGoalEncoder(
self.observation_space,
goal_sensor_uuid,
resnet_preprocessor_uuid,
goal_dims,
resnet_compressor_hidden_out_dims,
combiner_hidden_out_dims,
)
else:
self.goal_visual_encoder = ResnetDualTensorGoalEncoder( # type:ignore
self.observation_space,
goal_sensor_uuid,
rgb_resnet_preprocessor_uuid,
depth_resnet_preprocessor_uuid,
goal_dims,
resnet_compressor_hidden_out_dims,
combiner_hidden_out_dims,
)
self.create_state_encoders(
obs_embed_size=self.goal_visual_encoder.output_dims,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
add_prev_actions=add_prev_actions,
add_prev_action_null_token=add_prev_action_null_token,
prev_action_embed_size=action_embed_size,
)
self.create_actorcritic_head()
self.create_aux_models(
obs_embed_size=self.goal_visual_encoder.output_dims,
action_embed_size=action_embed_size,
)
self.train()
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.goal_visual_encoder.is_blind
def forward_encoder(self, observations: ObservationType) -> torch.FloatTensor:
return self.goal_visual_encoder(observations)
class ResnetTensorGoalEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
goal_embed_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.resnet_uuid = resnet_preprocessor_uuid
self.goal_embed_dims = goal_embed_dims
self.resnet_hid_out_dims = resnet_compressor_hidden_out_dims
self.combine_hid_out_dims = combiner_hidden_out_dims
self.goal_space = observation_spaces.spaces[self.goal_uuid]
if isinstance(self.goal_space, gym.spaces.Discrete):
self.embed_goal = nn.Embedding(
num_embeddings=self.goal_space.n, embedding_dim=self.goal_embed_dims,
)
elif isinstance(self.goal_space, gym.spaces.Box):
self.embed_goal = nn.Linear(self.goal_space.shape[-1], self.goal_embed_dims)
else:
raise NotImplementedError
self.blind = self.resnet_uuid not in observation_spaces.spaces
if not self.blind:
self.resnet_tensor_shape = observation_spaces.spaces[self.resnet_uuid].shape
self.resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.goal_embed_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
@property
def is_blind(self):
return self.blind
@property
def output_dims(self):
if self.blind:
return self.goal_embed_dims
else:
return (
self.combine_hid_out_dims[-1]
* self.resnet_tensor_shape[1]
* self.resnet_tensor_shape[2]
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return cast(
torch.FloatTensor,
self.embed_goal(observations[self.goal_uuid].to(torch.int64)),
)
def compress_resnet(self, observations):
return self.resnet_compressor(observations[self.resnet_uuid])
def distribute_target(self, observations):
target_emb = self.embed_goal(observations[self.goal_uuid])
return target_emb.view(-1, self.goal_embed_dims, 1, 1).expand(
-1, -1, self.resnet_tensor_shape[-2], self.resnet_tensor_shape[-1]
)
def adapt_input(self, observations):
observations = {**observations}
resnet = observations[self.resnet_uuid]
goal = observations[self.goal_uuid]
use_agent = False
nagent = 1
if len(resnet.shape) == 6:
use_agent = True
nstep, nsampler, nagent = resnet.shape[:3]
else:
nstep, nsampler = resnet.shape[:2]
observations[self.resnet_uuid] = resnet.view(-1, *resnet.shape[-3:])
observations[self.goal_uuid] = goal.view(-1, goal.shape[-1])
return observations, use_agent, nstep, nsampler, nagent
@staticmethod
def adapt_output(x, use_agent, nstep, nsampler, nagent):
if use_agent:
return x.view(nstep, nsampler, nagent, -1)
return x.view(nstep, nsampler * nagent, -1)
def forward(self, observations):
observations, use_agent, nstep, nsampler, nagent = self.adapt_input(
observations
)
if self.blind:
return self.embed_goal(observations[self.goal_uuid])
embs = [
self.compress_resnet(observations),
self.distribute_target(observations),
]
x = self.target_obs_combiner(torch.cat(embs, dim=1,))
x = x.reshape(x.size(0), -1) # flatten
return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
class ResnetDualTensorGoalEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
rgb_resnet_preprocessor_uuid: str,
depth_resnet_preprocessor_uuid: str,
goal_embed_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.rgb_resnet_uuid = rgb_resnet_preprocessor_uuid
self.depth_resnet_uuid = depth_resnet_preprocessor_uuid
self.goal_embed_dims = goal_embed_dims
self.resnet_hid_out_dims = resnet_compressor_hidden_out_dims
self.combine_hid_out_dims = combiner_hidden_out_dims
self.goal_space = observation_spaces.spaces[self.goal_uuid]
if isinstance(self.goal_space, gym.spaces.Discrete):
self.embed_goal = nn.Embedding(
num_embeddings=self.goal_space.n, embedding_dim=self.goal_embed_dims,
)
elif isinstance(self.goal_space, gym.spaces.Box):
self.embed_goal = nn.Linear(self.goal_space.shape[-1], self.goal_embed_dims)
else:
raise NotImplementedError
self.blind = (
self.rgb_resnet_uuid not in observation_spaces.spaces
or self.depth_resnet_uuid not in observation_spaces.spaces
)
if not self.blind:
self.resnet_tensor_shape = observation_spaces.spaces[
self.rgb_resnet_uuid
].shape
self.rgb_resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.depth_resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.rgb_target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.goal_embed_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
self.depth_target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.goal_embed_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
@property
def is_blind(self):
return self.blind
@property
def output_dims(self):
if self.blind:
return self.goal_embed_dims
else:
return (
2
* self.combine_hid_out_dims[-1]
* self.resnet_tensor_shape[1]
* self.resnet_tensor_shape[2]
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return cast(
torch.FloatTensor,
self.embed_goal(observations[self.goal_uuid].to(torch.int64)),
)
def compress_rgb_resnet(self, observations):
return self.rgb_resnet_compressor(observations[self.rgb_resnet_uuid])
def compress_depth_resnet(self, observations):
return self.depth_resnet_compressor(observations[self.depth_resnet_uuid])
def distribute_target(self, observations):
target_emb = self.embed_goal(observations[self.goal_uuid])
return target_emb.view(-1, self.goal_embed_dims, 1, 1).expand(
-1, -1, self.resnet_tensor_shape[-2], self.resnet_tensor_shape[-1]
)
def adapt_input(self, observations):
rgb = observations[self.rgb_resnet_uuid]
depth = observations[self.depth_resnet_uuid]
use_agent = False
nagent = 1
if len(rgb.shape) == 6:
use_agent = True
nstep, nsampler, nagent = rgb.shape[:3]
else:
nstep, nsampler = rgb.shape[:2]
observations[self.rgb_resnet_uuid] = rgb.view(-1, *rgb.shape[-3:])
observations[self.depth_resnet_uuid] = depth.view(-1, *depth.shape[-3:])
observations[self.goal_uuid] = observations[self.goal_uuid].view(-1, 1)
return observations, use_agent, nstep, nsampler, nagent
@staticmethod
def adapt_output(x, use_agent, nstep, nsampler, nagent):
if use_agent:
return x.view(nstep, nsampler, nagent, -1)
return x.view(nstep, nsampler * nagent, -1)
def forward(self, observations):
observations, use_agent, nstep, nsampler, nagent = self.adapt_input(
observations
)
if self.blind:
return self.embed_goal(observations[self.goal_uuid])
rgb_embs = [
self.compress_rgb_resnet(observations),
self.distribute_target(observations),
]
rgb_x = self.rgb_target_obs_combiner(torch.cat(rgb_embs, dim=1,))
depth_embs = [
self.compress_depth_resnet(observations),
self.distribute_target(observations),
]
depth_x = self.depth_target_obs_combiner(torch.cat(depth_embs, dim=1,))
x = torch.cat([rgb_x, depth_x], dim=1)
x = x.reshape(x.shape[0], -1) # flatten
return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
| allenact-main | allenact_plugins/navigation_plugin/objectnav/models.py |
allenact-main | allenact_plugins/navigation_plugin/objectnav/__init__.py |
|
from typing import List, Optional, Any, cast, Dict, Tuple
import clip
import gym
import numpy as np
import torch
import torch.nn as nn
from clip.model import CLIP
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.misc_utils import prepare_locals_for_super
class ClipResNetEmbedder(nn.Module):
def __init__(self, resnet: CLIP, pool=True, pooling_type="avg"):
super().__init__()
self.model = resnet
self.pool = pool
self.pooling_type = pooling_type
if not pool:
self.model.visual.attnpool = nn.Identity()
elif self.pooling_type == "attn":
pass
elif self.pooling_type == "avg":
self.model.visual.attnpool = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(start_dim=-3, end_dim=-1)
)
else:
raise NotImplementedError("`pooling_type` must be 'avg' or 'attn'.")
self.eval()
def forward(self, x):
with torch.no_grad():
return self.model.visual(x)
class ClipResNetPreprocessor(Preprocessor):
"""Preprocess RGB or depth image using a ResNet model with CLIP model
weights."""
CLIP_RGB_MEANS = (0.48145466, 0.4578275, 0.40821073)
CLIP_RGB_STDS = (0.26862954, 0.26130258, 0.27577711)
def __init__(
self,
rgb_input_uuid: str,
clip_model_type: str,
pool: bool,
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
input_img_height_width: Tuple[int, int] = (224, 224),
chunk_size: Optional[int] = None,
**kwargs: Any,
):
assert clip_model_type in clip.available_models()
assert pool == False or input_img_height_width == (224, 224)
assert all(iis % 32 == 0 for iis in input_img_height_width)
output_height_width = tuple(iis // 32 for iis in input_img_height_width)
if clip_model_type == "RN50":
output_shape = (2048,) + output_height_width
elif clip_model_type == "RN50x16":
output_shape = (3072,) + output_height_width
else:
raise NotImplementedError(
f"Currently `clip_model_type` must be one of 'RN50' or 'RN50x16'"
)
if pool:
output_shape = output_shape[:1]
self.clip_model_type = clip_model_type
self.pool = pool
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
self._resnet: Optional[ClipResNetEmbedder] = None
self.chunk_size = chunk_size
low = -np.inf
high = np.inf
shape = output_shape
input_uuids = [rgb_input_uuid]
assert (
len(input_uuids) == 1
), "resnet preprocessor can only consume one observation type"
observation_space = gym.spaces.Box(low=low, high=high, shape=shape)
super().__init__(**prepare_locals_for_super(locals()))
@property
def resnet(self) -> ClipResNetEmbedder:
if self._resnet is None:
self._resnet = ClipResNetEmbedder(
clip.load(self.clip_model_type, device=self.device)[0], pool=self.pool
).to(self.device)
for module in self._resnet.modules():
if "BatchNorm" in type(module).__name__:
module.momentum = 0.0
self._resnet.eval()
return self._resnet
def to(self, device: torch.device) -> "ClipResNetPreprocessor":
self._resnet = self.resnet.to(device)
self.device = device
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
x = obs[self.input_uuids[0]].to(self.device).permute(0, 3, 1, 2) # bhwc -> bchw
# If the input is depth, repeat it across all 3 channels
if x.shape[1] == 1:
x = x.repeat(1, 3, 1, 1)
n = x.shape[0]
if self.chunk_size is not None and x.shape[0] > self.chunk_size:
processed_chunks = []
for idx in range(0, n, self.chunk_size):
processed_chunks.append(
self.resnet(
x[idx : min(idx + self.chunk_size, n)]
).float()
)
x = torch.cat(processed_chunks, dim=0)
else:
x = self.resnet(x).float()
return x
class ClipViTEmbedder(nn.Module):
def __init__(self, model: CLIP, class_emb_only: bool = False):
super().__init__()
self.model = model
self.model.visual.transformer.resblocks = nn.Sequential(
*list(self.model.visual.transformer.resblocks)[:-1]
)
self.class_emb_only = class_emb_only
self.eval()
def forward(self, x):
m = self.model.visual
with torch.no_grad():
x = m.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
m.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
x = x + m.positional_embedding.to(x.dtype)
x = m.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = m.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
if self.class_emb_only:
return x[:, 0, :]
else:
return x
class ClipViTPreprocessor(Preprocessor):
"""Preprocess RGB or depth image using a ResNet model with CLIP model
weights."""
CLIP_RGB_MEANS = (0.48145466, 0.4578275, 0.40821073)
CLIP_RGB_STDS = (0.26862954, 0.26130258, 0.27577711)
def __init__(
self,
rgb_input_uuid: str,
clip_model_type: str,
class_emb_only: bool,
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
**kwargs: Any,
):
assert clip_model_type in clip.available_models()
if clip_model_type == "ViT-B/32":
output_shape = (7 * 7 + 1, 768)
elif clip_model_type == "ViT-B/16":
output_shape = (14 * 14 + 1, 768)
elif clip_model_type == "ViT-L/14":
output_shape = (16 * 16 + 1, 1024)
else:
raise NotImplementedError(
f"Currently `clip_model_type` must be one of 'ViT-B/32', 'ViT-B/16', or 'ViT-B/14'"
)
if class_emb_only:
output_shape = output_shape[1:]
self.clip_model_type = clip_model_type
self.class_emb_only = class_emb_only
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
self._vit: Optional[ClipViTEmbedder] = None
low = -np.inf
high = np.inf
shape = output_shape
input_uuids = [rgb_input_uuid]
assert (
len(input_uuids) == 1
), "resnet preprocessor can only consume one observation type"
observation_space = gym.spaces.Box(low=low, high=high, shape=shape)
super().__init__(**prepare_locals_for_super(locals()))
@property
def vit(self) -> ClipViTEmbedder:
if self._vit is None:
self._vit = ClipViTEmbedder(
model=clip.load(self.clip_model_type, device=self.device)[0],
class_emb_only=self.class_emb_only,
).to(self.device)
for module in self._vit.modules():
if "BatchNorm" in type(module).__name__:
module.momentum = 0.0
self._vit.eval()
return self._vit
def to(self, device: torch.device) -> "ClipViTPreprocessor":
self._vit = self.vit.to(device)
self.device = device
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
x = obs[self.input_uuids[0]].to(self.device).permute(0, 3, 1, 2) # bhwc -> bchw
# If the input is depth, repeat it across all 3 channels
if x.shape[1] == 1:
x = x.repeat(1, 3, 1, 1)
x = self.vit(x).float()
return x
| allenact-main | allenact_plugins/clip_plugin/clip_preprocessors.py |
from allenact.utils.system import ImportChecker
with ImportChecker(
"Cannot `import clip`. Please install clip from the openai/CLIP git repository:"
"\n`pip install git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620`"
):
# noinspection PyUnresolvedReferences
import clip
| allenact-main | allenact_plugins/clip_plugin/__init__.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on WikiText-2 (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import json
import logging
import os
import pickle
import random
import numpy as np
import torch
from comet.data.atomic import all_categories, make_attention_mask
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from pytorch_transformers import (WEIGHTS_NAME, AdamW,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
WarmupLinearSchedule
)
from anlg.models import GPT2CometLMHeadModel
from anlg.tokenizers import AnliGpt2Tokenizer, AnliCometGpt2Tokenizer
from utils.file_utils import read_jsonl_lines
import comet.interactive.functions as comet_interactive
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'gpt2_for_anli': (GPT2Config, GPT2CometLMHeadModel, AnliGpt2Tokenizer),
'gpt2_for_anli_comet': (GPT2Config, GPT2CometLMHeadModel, AnliCometGpt2Tokenizer)
}
restricted_comet_relations = {
"obs1": ["xEffect", "xWant", "xReact"],
"obs2": ["xIntent", "xNeed"]
}
class TextDataset(Dataset):
def __init__(self, tokenizer, file_path='train', block_size=512):
assert os.path.isfile(file_path)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(directory, f'cached_lm_{block_size}_{filename}')
if os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file at %s", directory)
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
while len(tokenized_text) >= block_size: # Truncate in block of block_size
self.examples.append(
tokenizer.add_special_tokens_single_sentence(tokenized_text[:block_size]))
tokenized_text = tokenized_text[block_size:]
# Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item])
def anli_record_to_gpt_prompt(tokenizer: AnliGpt2Tokenizer, record: dict, is_eval: bool = False):
context = [
record['obs1'],
record['obs2'],
"Because, "
]
if is_eval:
return context
else:
training_instance = context + [
record['hyp' + record['label']],
]
return training_instance
def record_to_text_tokens_with_comet_pred(tokenizer: AnliCometGpt2Tokenizer,
record: dict,
include_comet=False,
comet_text_encoder=None,
comet_data_loader=None,
comet_as_text=False,
is_eval: bool = False,
restrict_comet: bool = False,
sotw: bool = False
):
comet_event_inputs = None
comet_attention_masks = None
context = []
if include_comet:
for obs in ['obs1', 'obs2']:
for category in all_categories:
if restrict_comet:
if category not in restricted_comet_relations[obs]:
continue
if comet_as_text:
context.append(tokenizer.category_begin_tag(obs, category))
if category in record['comet_preds'][obs] and \
record['comet_preds'][obs][category]['beams'][0] != "none":
context.append(record['comet_preds'][obs][category]['beams'][0])
else:
context.append(tokenizer.comet_none)
context.append(tokenizer.category_end_tag(obs, category))
else:
if comet_event_inputs is None:
comet_event_inputs = []
if comet_attention_masks is None:
comet_attention_masks = []
XMB = np.zeros(25)
obs1_comet_input = comet_text_encoder.encode([record[obs]], verbose=False)[0]
XMB[:len(obs1_comet_input)] = obs1_comet_input
XMB[-1] = comet_text_encoder.encoder["<{}>".format(category)]
attention_mask = [1 if item != 0 else 0 for item in XMB]
comet_event_inputs.append(XMB)
comet_attention_masks.append(attention_mask)
if sotw:
# only 9 placeholders if using the SOTW model
if obs == 'obs1':
context.append(tokenizer.unk_token)
else:
context.append(tokenizer.unk_token)
context.extend([
tokenizer.bo1_token,
record['obs1'],
tokenizer.eo1_token,
tokenizer.bo2_token,
record['obs2'],
tokenizer.eo2_token,
tokenizer.bexpl_token,
])
if is_eval:
return context, comet_event_inputs, comet_attention_masks
else:
training_instance = context + [
record['hyp' + record['label']],
tokenizer.eexpl_token
]
return training_instance, comet_event_inputs, comet_attention_masks
def _to_hyp_only_labels(tokenizer, tokenized_text):
hyp_start_token_idx = tokenizer.convert_tokens_to_ids([tokenizer.bexpl_token])[0]
hyp_end_token_idx = tokenizer.convert_tokens_to_ids([tokenizer.eexpl_token])[0]
start_idx = tokenized_text.index(hyp_start_token_idx)
end_idx = tokenized_text.index(hyp_end_token_idx)
labels = [-1] * len(tokenized_text)
labels[start_idx + 1: end_idx + 1] = tokenized_text[start_idx + 1:end_idx + 1]
assert len(tokenized_text) == len(labels)
return labels
class AnliDataset(Dataset):
def __init__(self, tokenizer, file_path="train", cache_dir=None, max_seq_len=256,
include_comet=False, comet_text_encoder=None, comet_data_loader=None,
comet_as_text=False, conditional_lm=False, restrict_comet=False, no_cache=False,
is_eval=False,
sotw=False):
assert os.path.isfile(file_path)
directory, filename = os.path.split(file_path)
if include_comet and not comet_as_text:
max_seq_len = max_seq_len + 18
logging.info("Increasing max length to {}.".format(max_seq_len))
if cache_dir is None:
cached_features_file = os.path.join(directory, f'cached_lm_{filename}')
else:
cached_features_file = os.path.join(cache_dir, f'cached_lm_{filename}')
if os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, 'rb') as handle:
self.examples, self.labels, self.comet_inputs, self.comet_masks = pickle.load(
handle)
else:
logger.info("Creating features from dataset file at %s", directory)
self.examples = []
self.labels = []
self.comet_inputs = []
self.comet_masks = []
records = read_jsonl_lines(file_path)
# with open(file_path, encoding="utf-8") as f:
# text = f.read()
idx = 0
for record in tqdm(records, "Encoding Data"):
text_tokens, comet_event_inputs, comet_attention_masks = \
record_to_text_tokens_with_comet_pred(
tokenizer=tokenizer,
record=record,
include_comet=include_comet,
comet_text_encoder=comet_text_encoder,
comet_data_loader=comet_data_loader,
comet_as_text=comet_as_text,
restrict_comet=restrict_comet,
sotw=sotw
)
text = " ".join(text_tokens)
tokens = tokenizer.tokenize(text)
if len(tokens) > max_seq_len:
tokens = tokens[:max_seq_len]
else:
tokens.extend([tokenizer.unk_token] * (max_seq_len - len(tokens)))
tokenized_text = tokenizer.convert_tokens_to_ids(tokens)
self.examples.append(tokenized_text)
if conditional_lm or is_eval:
labels = _to_hyp_only_labels(tokenizer, tokenized_text)
else:
unk_token_idx = tokenizer.convert_tokens_to_ids([tokenizer.unk_token])[0]
labels = [-1 if t == unk_token_idx else t for t in tokenized_text]
self.labels.append(labels)
self.comet_inputs.append(comet_event_inputs)
self.comet_masks.append(comet_attention_masks)
if idx < 5:
print("***** Example Instance *****")
print("Text: {}".format(text))
print("Tokenized Text: {}".format(tokenized_text))
if comet_event_inputs is not None:
print("Comet Event inputs: {}".format(comet_event_inputs))
print("Comet Mask: {}".format(comet_attention_masks))
print("Labels: {}".format(labels))
print("********\n")
idx += 1
if not no_cache:
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, 'wb') as handle:
pickle.dump((self.examples, self.labels, self.comet_inputs, self.comet_masks),
handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item]), \
torch.tensor(self.labels[item]), \
torch.tensor(self.comet_inputs[item]) if self.comet_inputs[item] is not None else [], \
torch.tensor(self.comet_masks[item]) if self.comet_masks[item] is not None else []
def load_and_cache_examples(args, tokenizer, evaluate=False):
dataset = TextDataset(tokenizer,
file_path=args.eval_data_file if evaluate else args.train_data_file,
block_size=args.block_size)
return dataset
def load_and_cache_anli_examples(args, tokenizer, evaluate=False, include_comet=False,
comet_text_encoder=None, comet_data_loader=None, sotw=False):
dataset = AnliDataset(
tokenizer,
file_path=args.eval_data_file if evaluate else args.train_data_file,
cache_dir=args.cache_dir,
include_comet=include_comet,
comet_text_encoder=comet_text_encoder,
comet_data_loader=comet_data_loader,
comet_as_text=args.comet_as_text,
conditional_lm=args.conditional_lm,
restrict_comet=args.restrict_comet,
no_cache=args.no_cache,
is_eval=evaluate,
max_seq_len=args.block_size,
sotw=sotw
)
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def mask_tokens(inputs, tokenizer, args):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = torch.bernoulli(torch.full(labels.shape, args.mlm_probability)).bool()
labels[~masked_indices] = -1 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(
torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def train(args, train_dataset, model, tokenizer, comet_text_encoder=None, comet_data_loader=None):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(os.path.join(args.tb_dir, "tb/"))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(
train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch",
disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
for epoch in train_iterator:
logging.info("\n\n*** Starting Epoch: {} ***\n\n".format(epoch))
epoch_iterator = tqdm(train_dataloader, desc="Iteration",
disable=args.local_rank not in [-1, 0])
# Modified code to only work for ALNI now. Need to generalize later.
assert args.task == "anli"
for step, (inputs, labels, comet_input, comet_mask) in enumerate(epoch_iterator):
# inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, torch.clone(batch))
inputs = inputs.to(args.device)
labels = labels.to(args.device)
if isinstance(comet_input, list) and len(comet_input) == 0:
comet_input = None
comet_mask = None
else:
comet_input = comet_input.to(args.device)
comet_mask = comet_mask.to(args.device)
model.train()
outputs = model(inputs, labels=labels, comet_input=comet_input, comet_mask=comet_mask)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1,
0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps,
global_step)
logging_loss = tr_loss
if args.local_rank in [-1,
0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if 0 < args.max_steps < global_step:
epoch_iterator.close()
break
if 0 < args.max_steps < global_step:
train_iterator.close()
break
logging.info("Evaluate epoch ... {}".format(epoch))
results = evaluate(args, model, tokenizer, prefix=str(epoch), comet_text_encoder=comet_text_encoder, comet_data_loader=comet_data_loader)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key.split("_")[0]), value, global_step)
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer,
evaluate=False,
comet_text_encoder=None,
comet_data_loader=None,
prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.eval_output_dir
results = {}
if args.task is None:
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
elif args.task == "anli":
eval_dataset = load_and_cache_anli_examples(args, tokenizer,
evaluate=True,
include_comet=args.include_comet,
comet_text_encoder=comet_text_encoder,
comet_data_loader=comet_data_loader,
)
else:
raise Exception("Task Unsopported")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(
eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,
batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch, labels, comet_input, comet_mask in tqdm(eval_dataloader, desc="Evaluating"):
batch = batch.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
# labels = torch.clone(batch)
# if args.task == "anli":
# labels[labels == tokenizer.convert_tokens_to_ids([tokenizer.unk_token])[0]] = -1
outputs = model(batch, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
output_eval_file = os.path.join(eval_output_dir, "metrics.json")
if os.path.exists(output_eval_file):
results = json.load(open(output_eval_file))
else:
results = {}
if len(prefix) == 0:
results.update({
"perplexity": perplexity.item(),
"eval_loss": eval_loss
})
else:
results.update({
"perplexity_{}".format(prefix): perplexity.item(),
"loss_{}".format(prefix): eval_loss
})
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write(json.dumps(results))
writer.close()
return results
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--eval_output_dir", default=None, type=str, required=False,
help="Directory to write results to")
parser.add_argument("--tb_dir", default=None, type=str, required=False,
help="Directory to write tensorboard to")
## Other parameters
parser.add_argument("--task", default=None, type=str,
help="The task to finetune the LM on. Currently supports None / anli")
parser.add_argument("--include_comet", default=False, type=bool,
help="To include comet predictions or not")
parser.add_argument("--comet_model_path", default="comet-model/atomic_pretrained_model.th",
type=str, help="Comet model path")
parser.add_argument("--comet_vocab_path", default="comet-vocab/", type=str,
help="Comet model path")
parser.add_argument("--comet_as_text", default=False, type=bool,
help="Comet feature encoded using text")
parser.add_argument("--conditional_lm", default=False, type=bool,
help="Comet feature encoded using text")
parser.add_argument("--restrict_comet", default=False, type=bool,
help="Restrict comet features to only o1's effect and o2's causes")
parser.add_argument("--sotw", default=False, type=bool,
help="Use the state of the world model.")
parser.add_argument("--no_cache", default=False, type=bool,
help="Restrict comet features to only o1's effect and o2's causes")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if args.eval_output_dir is None:
args.eval_output_dir = args.output_dir
if args.tb_dir is None:
args.tb_dir = args.output_dir
if args.model_type in ["bert", "roberta"] and not args.mlm:
raise ValueError(
"BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling).")
if args.eval_data_file is None and args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument.")
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config)
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
comet_text_encoder = None
comet_data_loader = None
comet_model = None
if args.include_comet and not args.comet_as_text:
opt, state_dict, vocab = comet_interactive.load_model_file(args.comet_model_path)
# print(opt)
comet_data_loader, comet_text_encoder = \
comet_interactive.load_data("atomic", opt, vocab, args.comet_vocab_path)
n_ctx = comet_data_loader.max_event + comet_data_loader.max_effect
n_vocab = len(comet_text_encoder.encoder) + n_ctx
if not torch.cuda.is_available():
comet_interactive.set_compute_mode("cpu")
comet_model = comet_interactive.make_model(opt, n_vocab, n_ctx, state_dict)
comet_model.train()
model.set_comet_model(comet_model)
model.set_comet_encoder(comet_text_encoder)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
if args.task is None:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
elif args.task == "anli":
train_dataset = load_and_cache_anli_examples(
args,
tokenizer,
evaluate=False,
include_comet=args.include_comet,
comet_text_encoder=comet_text_encoder,
comet_data_loader=comet_data_loader,
sotw=args.sotw
)
else:
raise Exception("Task Unsopported")
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer, comet_text_encoder, comet_data_loader)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir,
do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(
glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(
logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
comet_model = None
comet_text_encoder = None
if args.include_comet and not args.comet_as_text:
logging.info("Setting comet model")
opt, state_dict, vocab = interactive.load_model_file(args.comet_model_path)
# print(opt)
comet_data_loader, comet_text_encoder = \
interactive.load_data("atomic", opt, vocab, args.comet_vocab_path)
n_ctx = comet_data_loader.max_event + comet_data_loader.max_effect
n_vocab = len(comet_text_encoder.encoder) + n_ctx
if not torch.cuda.is_available():
interactive.set_compute_mode("cpu")
comet_model = interactive.make_model(opt, n_vocab, n_ctx, state_dict)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.set_comet_model(comet_model)
model.set_comet_encoder(comet_text_encoder)
model.to(args.device)
result = evaluate(
args,
model,
tokenizer,
evaluate=False,
comet_text_encoder=comet_text_encoder,
comet_data_loader=comet_data_loader,
prefix=global_step
)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| abductive-commonsense-reasoning-master | anlg/run_lm_finetuning.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from comet.models.utils import prepare_position_embeddings
from pytorch_transformers import GPT2PreTrainedModel
from pytorch_transformers.modeling_bert import BertLayerNorm as LayerNorm
from pytorch_transformers.modeling_gpt2 import Block
class GPT2CometAttentiveModel(GPT2PreTrainedModel):
def __init__(self, config):
super(GPT2CometAttentiveModel, self).__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.comet_model = None
self.comet_encoder = None
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
self.wte = self._get_resized_embeddings(self.wte, new_num_tokens)
return self.wte
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def _comet_embs(self, comet_input, comet_mask):
batch_size, num_comet_rels = comet_input.size(0), comet_input.size(1)
comet_input = comet_input.view(batch_size * num_comet_rels, -1)
comet_mask = comet_mask.view(batch_size * num_comet_rels, -1).float()
comet_input_with_positions = prepare_position_embeddings(None, self.comet_encoder.encoder,
comet_input.unsqueeze(-1))
comet_embs = self.comet_model.transformer(comet_input_with_positions.unsqueeze(1),
sequence_mask=comet_mask)[:, -1, :]
return comet_embs.view(batch_size, num_comet_rels, -1)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, head_mask=None,
comet_input=None, comet_mask=None
):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length,
dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
if comet_input is not None:
comet_embs = self._comet_embs(comet_input.long(), comet_mask)
num_comet_rels = comet_input.size(1)
inputs_embeds[:, :num_comet_rels, :] = self.ln_f(comet_embs)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, layer_past, head_mask[i])
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
def set_comet_model(self, comet_model):
self.comet_model = comet_model
def set_comet_encoder(self, comet_encoder):
self.comet_encoder = comet_encoder
class GPT2CometLMHeadModel(GPT2PreTrainedModel):
def __init__(self, config):
super(GPT2CometLMHeadModel, self).__init__(config)
self.transformer = GPT2CometAttentiveModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def forward(self,
input_ids,
position_ids=None,
token_type_ids=None,
labels=None,
past=None,
head_mask=None,
comet_input=None,
comet_mask=None
):
transformer_outputs = self.transformer(input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
past=past,
head_mask=head_mask,
comet_input=comet_input,
comet_mask=comet_mask
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
def set_comet_model(self, comet_model):
self.transformer.set_comet_model(comet_model)
def set_comet_encoder(self, comet_encoder):
self.transformer.set_comet_encoder(comet_encoder)
def _resize_token_embeddings(self, new_num_tokens):
self.transformer.resize_token_embeddings(new_num_tokens)
| abductive-commonsense-reasoning-master | anlg/models.py |
abductive-commonsense-reasoning-master | anlg/__init__.py |
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import logging
import comet.interactive.functions as comet_interactive
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from anlg.models import GPT2CometLMHeadModel
from anlg.run_lm_finetuning import record_to_text_tokens_with_comet_pred, \
anli_record_to_gpt_prompt
from anlg.tokenizers import AnliGpt2Tokenizer, AnliCometGpt2Tokenizer
from utils.file_utils import read_lines, write_items, read_jsonl_lines
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
logging.getLogger("pytorch_transformers.tokenization_utils").setLevel(logging.CRITICAL)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in
(GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
'gpt2_for_anli': (GPT2CometLMHeadModel, AnliGpt2Tokenizer),
'gpt2_for_anli_comet': (GPT2CometLMHeadModel, AnliCometGpt2Tokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0,
is_xlnet=False, device='cpu', comet_input=None, comet_mask=None):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
if comet_input is not None:
comet_input = torch.tensor(comet_input, dtype=torch.long, device=device)
comet_input = comet_input.unsqueeze(0).repeat(num_samples, 1, 1)
comet_mask = torch.tensor(comet_mask, dtype=torch.float, device=device)
comet_mask = comet_mask.unsqueeze(0).repeat(num_samples, 1, 1)
generated = context
with torch.no_grad():
for _ in range(length):
inputs = {'input_ids': generated}
if comet_input is not None:
inputs['comet_input'] = comet_input
inputs['comet_mask'] = comet_mask
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat(
(generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]),
dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float,
device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask,
'target_mapping': target_mapping}
outputs = model(
**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1),
num_samples=num_samples)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
ALL_MODELS))
parser.add_argument("--input-file", type=str, default=None,
help="File to load instance prompts from")
parser.add_argument("--task", type=str, default=None,
help="Which task for file input. If None, prompt is read as raw text 1 prompt per line in input-file")
parser.add_argument("--output-file", type=str, default=None,
help="File to load instance prompts from")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--include_comet", default=False, type=bool,
help="To include comet predictions or not")
parser.add_argument("--comet_model_path", default="comet-model/atomic_pretrained_model.th",
type=str, help="Comet model path")
parser.add_argument("--comet_vocab_path", default="comet-vocab/", type=str,
help="Comet model path")
parser.add_argument("--comet_as_text", default=False, type=bool,
help="Comet feature encoded using text")
parser.add_argument("--restrict_comet", default=False, type=bool,
help="Restrict comet features to only o1's effect and o2's causes")
parser.add_argument("--num_samples", default=1, type=int, help="No. of samples to obtain.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
comet_text_encoder = None
if args.include_comet and not args.comet_as_text:
logging.info("Setting comet model")
opt, state_dict, vocab = comet_interactive.load_model_file(args.comet_model_path)
# print(opt)
comet_data_loader, comet_text_encoder = \
comet_interactive.load_data("atomic", opt, vocab, args.comet_vocab_path)
n_ctx = comet_data_loader.max_event + comet_data_loader.max_effect
n_vocab = len(comet_text_encoder.encoder) + n_ctx
if not torch.cuda.is_available():
comet_interactive.set_compute_mode("cpu")
comet_model = comet_interactive.make_model(opt, n_vocab, n_ctx, state_dict)
model.set_comet_model(comet_model)
model.set_comet_encoder(comet_text_encoder)
model.eval()
if args.length < 0 and model.config.max_position_embeddings > 0:
args.length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < args.length:
args.length = model.config.max_position_embeddings # No generation bigger than model size
elif args.length < 0:
args.length = MAX_LENGTH # avoid infinite loop
print(args)
def _prompt_to_gen(txt, comet_event_inputs, comet_attention_masks):
if args.model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
txt = (args.padding_text if args.padding_text else PADDING_TEXT) + txt
context_tokens = tokenizer.encode(txt)
out = sample_sequence(
model=model,
context=context_tokens,
length=args.length,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
is_xlnet=bool(args.model_type == "xlnet"),
comet_input=comet_event_inputs,
comet_mask=comet_attention_masks,
num_samples=args.num_samples
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.decode(out, clean_up_tokenization_spaces=True)
return text
if args.input_file is None:
while True:
raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
text = _prompt_to_gen(raw_text)
print(text)
if args.prompt:
break
else:
if args.task is None:
lines = read_lines(args.input_file)
generations = []
for l in lines:
generations.append(_prompt_to_gen(l))
write_items(generations, args.output_file)
elif args.task == "anli":
records = read_jsonl_lines(args.input_file)
idx = 0
for record in tqdm.tqdm(records):
input_text_tokens = None
comet_event_inputs = None
comet_attention_masks = None
if args.model_type == "gpt2_for_anli_comet":
input_text_tokens, comet_event_inputs, comet_attention_masks = \
record_to_text_tokens_with_comet_pred(
tokenizer=tokenizer,
record=record,
is_eval=True,
comet_as_text=args.comet_as_text,
include_comet=args.include_comet,
comet_text_encoder=comet_text_encoder,
restrict_comet=args.restrict_comet
)
elif args.model_type == "gpt2_for_anli":
input_text_tokens = anli_record_to_gpt_prompt(tokenizer=tokenizer, record=record, is_eval=True)
input_text = " ".join(input_text_tokens)
gen = _prompt_to_gen(input_text, comet_event_inputs, comet_attention_masks)
if args.model_type == "gpt2_for_anli":
period_idx = gen.find(".")
if period_idx != -1:
gen = gen[: period_idx]
if 'generations' not in record:
record['generations'] = {}
record['generations'][args.model_type] = [gen]
if idx < 5:
print("Input context format: {}".format(input_text_tokens))
if comet_event_inputs is not None:
print("Comet event input format: {}".format(comet_event_inputs))
print("Comet mask: {}".format(comet_attention_masks))
idx += 1
write_items([json.dumps(r) for r in records], args.output_file)
if __name__ == '__main__':
main()
| abductive-commonsense-reasoning-master | anlg/run_generation.py |
from comet.data.atomic import all_categories
from pytorch_transformers import GPT2Tokenizer
class AnliGpt2Tokenizer(GPT2Tokenizer):
def __init__(self,
vocab_file,
merges_file,
errors='replace',
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
bo1_token="<|beginobs1|>",
eo1_token="<|endobs1|>",
bo2_token="<|beginobs2|>",
eo2_token="<|endobs2|>",
bexpl_token="<|bexpl|>",
eexpl_token="<|eexpl|>",
**kwargs):
super(AnliGpt2Tokenizer, self).__init__(
vocab_file,
merges_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
**kwargs
)
self.bo1_token = bo1_token
self.eo1_token = eo1_token
self.bo2_token = bo2_token
self.eo2_token = eo2_token
self.bexpl_token = bexpl_token
self.eexpl_token = eexpl_token
self.add_special_tokens({
"additional_special_tokens": [self.bo1_token, self.eo1_token, self.bo2_token,
self.eo2_token, self.bexpl_token, self.eexpl_token]
})
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
pass
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = super().decode(token_ids, skip_special_tokens, clean_up_tokenization_spaces)
idx = text.find(self.eexpl_token)
if idx != -1:
text = text[:idx]
return text
class AnliCometGpt2Tokenizer(GPT2Tokenizer):
def add_special_tokens_single_sentence(self, token_ids):
pass
def __init__(self,
vocab_file,
merges_file,
errors='replace',
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
bo1_token="<|beginobs1|>",
eo1_token="<|endobs1|>",
bo2_token="<|beginobs2|>",
eo2_token="<|endobs2|>",
bexpl_token="<|bexpl|>",
eexpl_token="<|eexpl|>",
comet_token_px="<|personx|>",
comet_token_py="<|persony|>",
comet_none="<|none|>",
**kwargs):
super(AnliCometGpt2Tokenizer, self).__init__(
vocab_file,
merges_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
**kwargs
)
self.bo1_token = bo1_token
self.eo1_token = eo1_token
self.bo2_token = bo2_token
self.eo2_token = eo2_token
self.bexpl_token = bexpl_token
self.eexpl_token = eexpl_token
self.comet_token_px = comet_token_px
self.comet_token_py = comet_token_py
self.begin_tags = {}
self.end_tags = {}
self.comet_none = comet_none
all_special_tokens = [self.bo1_token,
self.eo1_token,
self.bo2_token,
self.eo2_token,
self.bexpl_token,
self.eexpl_token,
self.comet_token_px,
self.comet_token_py,
self.comet_none
]
for obs in ['obs1', 'obs2']:
for category in all_categories:
self.begin_tags[(obs, category)] = "<{}{}>".format(obs, category)
self.end_tags[(obs, category)] = "</{}{}>".format(obs, category)
all_special_tokens.append("<{}{}>".format(obs, category))
all_special_tokens.append("</{}{}>".format(obs, category))
self.add_special_tokens({
"additional_special_tokens": all_special_tokens
})
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
pass
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = super().decode(token_ids, skip_special_tokens, clean_up_tokenization_spaces)
idx = text.find(self.eexpl_token)
if idx != -1:
text = text[:idx]
return text
def category_begin_tag(self, obs, category):
return self.begin_tags[(obs, category)]
def category_end_tag(self, obs, category):
return self.end_tags[(obs, category)]
| abductive-commonsense-reasoning-master | anlg/tokenizers.py |
abductive-commonsense-reasoning-master | anlg/evaluation/__init__.py |
|
from anlg.evaluation.bleu.bleu import Bleu
from anlg.evaluation.meteor.meteor_nltk import Meteor
from anlg.evaluation.rouge.rouge import Rouge
from anlg.evaluation.cider.cider import Cider
from anlg.evaluation.bert_score.bert_score import BertScore
from collections import defaultdict
from argparse import ArgumentParser
import sys
import json
#reload(sys)
#sys.setdefaultencoding('utf-8')
class QGEvalCap:
def __init__(self, model_key, gts, res, results_file):
self.gts = gts
self.res = res
self.results_file = results_file
self.model_key = model_key
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr"),
(BertScore(), "Bert Score")
]
# =================================================
# Compute scores
# =================================================
scores_dict = {}
scores_dict["model_key"] = self.model_key
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f"%(m, sc))
output.append(sc)
scores_dict[m] = str(sc)
else:
print("%s: %0.5f"%(method, score))
output.append(score)
scores_dict[method] = score
with open(self.results_file, "a") as f:
f.write(json.dumps(scores_dict)+"\n")
return output
def eval(model_key, sources, references, predictions, results_file):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
for tup in sources:
pair = {}
pair['tokenized_sentence'] = tup
pairs.append(pair)
cnt = 0
for line in references:
pairs[cnt]['tokenized_question'] = line
cnt += 1
output = predictions
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from anlg.evaluation.eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
#res[key] = [pair['prediction']]
res[key] = pair['prediction']
## gts
gts[key].append(pair['tokenized_question'])
QGEval = QGEvalCap(model_key, gts, res, results_file)
return QGEval.evaluate()
def preprocess(file_name, keys):
with open(file_name) as f:
data = f.readlines()
generations = [json.loads(elem) for elem in data]
predictions = {}
references = {}
sources = {}
keys_list = keys if keys!=None else generations[0]["generations"].keys()
for key in keys_list:
references[key] = []
predictions[key] = []
sources[key] = []
for elem in generations:
label = elem["label"]
hyp = elem["hyp"+label]
for key in keys_list:
if key in elem["generations"]:
references[key].append(hyp)
predictions[key].append(elem["generations"][key])
sources[key].append((elem["obs1"], elem["obs2"]))
return sources, references, predictions
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-gen_file", "--gen_file", dest="gen_file", help="generations file with gold/references")
parser.add_argument("--keys", type=str, default=None, help="comma-separated list of model keys")
parser.add_argument("--results_file", default="eval_results.jsonl")
args = parser.parse_args()
print("scores: \n")
keys=None
if args.keys:
keys = args.keys.split(",")
sources, references, predictions = preprocess(args.gen_file, keys)
for key in references.keys():
print("\nEvaluating %s" %key)
eval(key, sources[key], references[key], predictions[key], args.results_file)
| abductive-commonsense-reasoning-master | anlg/evaluation/eval.py |
# Filename: cider.py
#
# Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric
# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
#
# Creation Date: Sun Feb 8 14:16:54 2015
#
# Authors: Ramakrishna Vedantam <[email protected]> and Tsung-Yi Lin <[email protected]>
from anlg.evaluation.cider.cider_scorer import CiderScorer
import pdb
class Cider:
"""
Main Class to compute the CIDEr metric
"""
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
# set cider to sum over 1 to 4-grams
self._n = n
# set the standard deviation parameter for gaussian penalty
self._sigma = sigma
def compute_score(self, gts, res):
"""
Main function to compute CIDEr score
:param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
cider_scorer += (hypo[0], ref)
(score, scores) = cider_scorer.compute_score()
return score, scores
def method(self):
return "CIDEr"
| abductive-commonsense-reasoning-master | anlg/evaluation/cider/cider.py |
__author__ = 'tylin'
| abductive-commonsense-reasoning-master | anlg/evaluation/cider/__init__.py |
#!/usr/bin/env python
# Tsung-Yi Lin <[email protected]>
# Ramakrishna Vedantam <[email protected]>
import copy
from collections import defaultdict
import numpy as np
import pdb
import math
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True)
class CiderScorer(object):
"""CIDEr scorer.
"""
def copy(self):
''' copy the refs.'''
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
''' singular instance '''
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.document_frequency = defaultdict(float)
self.cook_append(test, refs)
self.ref_len = None
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
self.ctest.append(cook_test(test)) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new CiderScorer instances
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
for refs in self.crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.items()]):
self.document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
def compute_cider(self):
def counts2vec(cnts):
"""
Function maps counts of ngram to vector of tfidf weights.
The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
The n-th entry of array denotes length of n-grams.
:param cnts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram,term_freq) in cnts.items():
# give word count 1 if it doesn't appear in reference corpus
df = np.log(max(1.0, self.document_frequency[ngram]))
# ngram index
n = len(ngram)-1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq)*(self.ref_len - df)
# compute norm for the vector. the norm will be used for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
'''
Compute the cosine similarity of two vectors.
:param vec_hyp: array of dictionary for vector corresponding to hypothesis
:param vec_ref: array of dictionary for vector corresponding to reference
:param norm_hyp: array of float for vector corresponding to hypothesis
:param norm_ref: array of float for vector corresponding to reference
:param length_hyp: int containing length of hypothesis
:param length_ref: int containing length of reference
:return: array of score for each n-grams cosine similarity
'''
delta = float(length_hyp - length_ref)
# measure consine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
# ngram
for (ngram,count) in vec_hyp[n].items():
# vrama91 : added clipping
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
val[n] /= (norm_hyp[n]*norm_ref[n])
assert(not math.isnan(val[n]))
# vrama91: added a length based gaussian penalty
val[n] *= np.e**(-(delta**2)/(2*self.sigma**2))
return val
# compute log reference length
self.ref_len = np.log(float(len(self.crefs)))
scores = []
for test, refs in zip(self.ctest, self.crefs):
# compute vector for test captions
vec, norm, length = counts2vec(test)
# compute vector for ref captions
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
vec_ref, norm_ref, length_ref = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
# change by vrama91 - mean of ngram scores, instead of sum
score_avg = np.mean(score)
# divide by number of references
score_avg /= len(refs)
# multiply score by 10
score_avg *= 10.0
# append score of an image to the score list
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
# compute idf
self.compute_doc_freq()
# assert to check document frequency
assert(len(self.ctest) >= max(self.document_frequency.values()))
# compute cider score
score = self.compute_cider()
# debug
# print score
return np.mean(np.array(score)), np.array(score)
| abductive-commonsense-reasoning-master | anlg/evaluation/cider/cider_scorer.py |
#!/usr/bin/env python
# Python wrapper for METEOR implementation, by Xinlei Chen
# Acknowledge Michael Denkowski for the generous discussion and help
import os
import sys
import nltk
from nltk.translate.meteor_score import meteor_score
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
#METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self):
pass
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
for i in imgIds:
assert(len(res[i]) == 1)
score = round(meteor_score(gts[i], res[i][0]), 4)
scores.append(score)
#print('{}\n'.format(eval_line))
#self.meteor_p.stdin.write('{}\n'.format(eval_line))
#print(self.meteor_p.stdout.readline().strip())
#for i in range(0,len(imgIds)):
# scores.append(float(self.meteor_p.stdout.readline().strip()))
#score = float(self.meteor_p.stdout.readline().strip())
#self.lock.release()
return sum(scores)/len(scores), scores
def method(self):
return "METEOR"
| abductive-commonsense-reasoning-master | anlg/evaluation/meteor/meteor_nltk.py |
__author__ = 'tylin'
| abductive-commonsense-reasoning-master | anlg/evaluation/meteor/__init__.py |
#!/usr/bin/env python
# Python wrapper for METEOR implementation, by Xinlei Chen
# Acknowledge Michael Denkowski for the generous discussion and help
import os
import sys
import subprocess
import threading
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, \
'-', '-', '-stdio', '-l', 'en',
'-norm',
# '-t', 'adq'
# '-p', '0.85 0.2 0.6 0.75' # alpha beta gamma delta'',
# '-a', 'data/paraphrase-en.gz', '-m', 'exact stem paraphrase']
]
self.meteor_p = subprocess.Popen(self.meteor_cmd, \
cwd=os.path.dirname(os.path.abspath(__file__)), \
stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
# Used to guarantee thread safety
self.lock = threading.Lock()
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert(len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
print('{}\n'.format(eval_line))
self.meteor_p.stdin.write('{}\n'.format(eval_line))
print(self.meteor_p.stdout.readline().strip())
for i in range(0,len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score, scores
def method(self):
return "METEOR"
def _stat(self, hypothesis_str, reference_list):
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
# print score_line
str_in = '{}\n'.format(score_line)
#self.meteor_p.communicate(str_in.encode('utf=8'))
self.meteor_p.stdin.write(str_in.encode('utf=8'))
return self.meteor_p.stdout.readline().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
# EVAL ||| stats
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
# bug fix: there are two values returned by the jar file, one average, and one all, so do it twice
# thanks for Andrej for pointing this out
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release()
| abductive-commonsense-reasoning-master | anlg/evaluation/meteor/meteor.py |
#!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
from anlg.evaluation.bleu.bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
#score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
#score, scores = bleu_scorer.compute_score(option='average', verbose=1)
# return (bleu, bleu_info)
return score, scores
def method(self):
return "Bleu"
| abductive-commonsense-reasoning-master | anlg/evaluation/bleu/bleu.py |
__author__ = 'tylin'
| abductive-commonsense-reasoning-master | anlg/evaluation/bleu/__init__.py |
#!/usr/bin/env python
# bleu_scorer.py
# David Chiang <[email protected]>
# Copyright (c) 2004-2006 University of Maryland. All rights
# reserved. Do not redistribute without permission from the
# author. Not for commercial use.
# Modified by:
# Hao Fang <[email protected]>
# Tsung-Yi Lin <[email protected]>
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
'''
import copy
import sys, math, re
from collections import defaultdict
def precook(s, n=4, out=False):
"""Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well."""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return (len(words), counts)
def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
reflen = []
maxcounts = {}
for ref in refs:
rl, counts = precook(ref, n)
reflen.append(rl)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
# Calculate effective reference sentence length.
if eff == "shortest":
reflen = min(reflen)
elif eff == "average":
reflen = float(sum(reflen))/len(reflen)
## lhuang: N.B.: leave reflen computaiton to the very end!!
## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design)
return (reflen, maxcounts)
def cook_test(test, tup, eff=None, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflen, refmaxcounts) = tup
testlen, counts = precook(test, n, True)
result = {}
# Calculate effective reference sentence length.
if eff == "closest":
result["reflen"] = min((abs(l-testlen), l) for l in reflen)[1]
else: ## i.e., "average" or "shortest" or None
result["reflen"] = reflen
result["testlen"] = testlen
result["guess"] = [max(0,testlen-k+1) for k in range(1,n+1)]
result['correct'] = [0]*n
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
class BleuScorer(object):
"""Bleu scorer.
"""
__slots__ = "n", "crefs", "ctest", "_score", "_ratio", "_testlen", "_reflen", "special_reflen"
# special_reflen is used in oracle (proportional effective ref len for a node).
def copy(self):
''' copy the refs.'''
new = BleuScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
new._score = None
return new
def __init__(self, test=None, refs=None, n=4, special_reflen=None):
''' singular instance '''
self.n = n
self.crefs = []
self.ctest = []
self.cook_append(test, refs)
self.special_reflen = special_reflen
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
cooked_test = cook_test(test, self.crefs[-1])
self.ctest.append(cooked_test) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
self._score = None ## need to recompute
def ratio(self, option=None):
self.compute_score(option=option)
return self._ratio
def score_ratio(self, option=None):
'''return (bleu, len_ratio) pair'''
return (self.fscore(option=option), self.ratio(option=option))
def score_ratio_str(self, option=None):
return "%.4f (%.2f)" % self.score_ratio(option)
def reflen(self, option=None):
self.compute_score(option=option)
return self._reflen
def testlen(self, option=None):
self.compute_score(option=option)
return self._testlen
def retest(self, new_test):
if type(new_test) is str:
new_test = [new_test]
assert len(new_test) == len(self.crefs), new_test
self.ctest = []
for t, rs in zip(new_test, self.crefs):
self.ctest.append(cook_test(t, rs))
self._score = None
return self
def rescore(self, new_test):
''' replace test(s) with new test(s), and returns the new score.'''
return self.retest(new_test).compute_score()
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new BleuScorer instances
self.cook_append(other[0], other[1])
else:
assert self.compatible(other), "incompatible BLEUs."
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
self._score = None ## need to recompute
return self
def compatible(self, other):
return isinstance(other, BleuScorer) and self.n == other.n
def single_reflen(self, option="average"):
return self._single_reflen(self.crefs[0][0], option)
def _single_reflen(self, reflens, option=None, testlen=None):
if option == "shortest":
reflen = min(reflens)
elif option == "average":
reflen = float(sum(reflens))/len(reflens)
elif option == "closest":
reflen = min((abs(l-testlen), l) for l in reflens)[1]
else:
assert False, "unsupported reflen option %s" % option
return reflen
def recompute_score(self, option=None, verbose=0):
self._score = None
return self.compute_score(option, verbose)
def compute_score(self, option=None, verbose=0):
n = self.n
small = 1e-9
tiny = 1e-15 ## so that if guess is 0 still return 0
bleu_list = [[] for _ in range(n)]
if self._score is not None:
return self._score
if option is None:
option = "average" if len(self.crefs) == 1 else "closest"
self._testlen = 0
self._reflen = 0
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
# for each sentence
for comps in self.ctest:
testlen = comps['testlen']
self._testlen += testlen
if self.special_reflen is None: ## need computation
reflen = self._single_reflen(comps['reflen'], option, testlen)
else:
reflen = self.special_reflen
self._reflen += reflen
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
# append per image bleu score
bleu = 1.
for k in range(n):
bleu *= (float(comps['correct'][k]) + tiny) \
/(float(comps['guess'][k]) + small)
bleu_list[k].append(bleu ** (1./(k+1)))
ratio = (testlen + tiny) / (reflen + small) ## N.B.: avoid zero division
if ratio < 1:
for k in range(n):
bleu_list[k][-1] *= math.exp(1 - 1/ratio)
if verbose > 1:
print(comps, reflen)
totalcomps['reflen'] = self._reflen
totalcomps['testlen'] = self._testlen
bleus = []
bleu = 1.
for k in range(n):
bleu *= float(totalcomps['correct'][k] + tiny) \
/ (totalcomps['guess'][k] + small)
bleus.append(bleu ** (1./(k+1)))
ratio = (self._testlen + tiny) / (self._reflen + small) ## N.B.: avoid zero division
if ratio < 1:
for k in range(n):
bleus[k] *= math.exp(1 - 1/ratio)
if verbose > 0:
print(totalcomps)
print("ratio:", ratio)
self._score = bleus
return self._score, bleu_list
| abductive-commonsense-reasoning-master | anlg/evaluation/bleu/bleu_scorer.py |
from bert_score import score
# Code for BertScore reused from original implementation: https://github.com/Tiiiger/bert_score
class BertScore:
def __init__(self):
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
hyp_input = []
ref_input = []
same_indices = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
hyp_input += [hypo[0]] * len(ref)
ref_input += ref
same_indices.append(len(ref_input))
p, r, f_scores = score.score(hyp_input, ref_input, bert="bert-base-uncased")
prev_idx = 0
aggreg_f1_scores = []
for idx in same_indices:
aggreg_f1_scores.append(f_scores[prev_idx: idx].mean().cpu().item())
prev_idx = idx
return sum(aggreg_f1_scores)/len(aggreg_f1_scores), aggreg_f1_scores
def method(self):
return "Bert Score"
| abductive-commonsense-reasoning-master | anlg/evaluation/bert_score/bert_score.py |
abductive-commonsense-reasoning-master | anlg/evaluation/bert_score/__init__.py |
|
import torch
from math import log
from itertools import chain
from collections import defaultdict, Counter
from multiprocessing import Pool
from functools import partial
from tqdm.auto import tqdm
__all__ = ['bert_types']
bert_types = [
'bert-base-uncased',
'bert-large-uncased',
'bert-base-cased',
'bert-large-cased',
'bert-base-multilingual-uncased',
'bert-base-multilingual-cased',
'bert-base-chinese',
]
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, :lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, :lens[i]] = 1
return padded, lens, mask
def bert_encode(model, x, attention_mask):
model.eval()
x_seg = torch.zeros_like(x, dtype=torch.long)
with torch.no_grad():
x_encoded_layers, pooled_output = model(x, x_seg, attention_mask=attention_mask, output_all_encoded_layers=False)
return x_encoded_layers
def process(a, tokenizer=None):
if not tokenizer is None:
a = ["[CLS]"]+tokenizer.tokenize(a)+["[SEP]"]
a = tokenizer.convert_tokens_to_ids(a)
return set(a)
def get_idf_dict(arr, tokenizer, nthreads=4):
"""
Returns mapping from word piece index to its inverse document frequency.
Args:
- :param: `arr` (list of str) : sentences to process.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `nthreads` (int) : number of CPU threads to use
"""
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process, tokenizer=tokenizer)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda : log((num_docs+1)/(1)))
idf_dict.update({idx:log((num_docs+1)/(c+1)) for (idx, c) in idf_count.items()})
return idf_dict
def collate_idf(arr, tokenize, numericalize, idf_dict,
pad="[PAD]", device='cuda:0'):
"""
Helper function that pads a list of sentences to hvae the same length and
loads idf score for words in the sentences.
Args:
- :param: `arr` (list of str): sentences to process.
- :param: `tokenize` : a function that takes a string and return list
of tokens.
- :param: `numericalize` : a function that takes a list of tokens and
return list of token indexes.
- :param: `idf_dict` (dict): mapping a word piece index to its
inverse document frequency
- :param: `pad` (str): the padding token.
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
arr = [["[CLS]"]+tokenize(a)+["[SEP]"] for a in arr]
arr = [numericalize(a) for a in arr]
idf_weights = [[idf_dict[i] for i in a] for a in arr]
pad_token = numericalize([pad])[0]
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded_idf, _, _ = padding(idf_weights, pad_token, dtype=torch.float)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, padded_idf, lens, mask
def get_bert_embedding(all_sens, model, tokenizer, idf_dict,
batch_size=-1, device='cuda:0'):
"""
Compute BERT embedding in batches.
Args:
- :param: `all_sens` (list of str) : sentences to encode.
- :param: `model` : a BERT model from `pytorch_pretrained_bert`.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `idf_dict` (dict) : mapping a word piece index to its
inverse document frequency
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
padded_sens, padded_idf, lens, mask = collate_idf(all_sens,
tokenizer.tokenize, tokenizer.convert_tokens_to_ids,
idf_dict,
device=device)
if batch_size == -1: batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = bert_encode(model, padded_sens[i:i+batch_size],
attention_mask=mask[i:i+batch_size])
# batch_embedding = torch.stack(batch_embedding)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=0)
return total_embedding, lens, mask, padded_idf
def greedy_cos_idf(ref_embedding, ref_lens, ref_masks, ref_idf,
hyp_embedding, hyp_lens, hyp_masks, hyp_idf):
"""
Compute greedy matching based on cosine similarity.
Args:
- :param: `ref_embedding` (torch.Tensor):
embeddings of reference sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `ref_lens` (list of int): list of reference sentence length.
- :param: `ref_masks` (torch.LongTensor): BxKxK, BERT attention mask for
reference sentences.
- :param: `ref_idf` (torch.Tensor): BxK, idf score of each word
piece in the reference setence
- :param: `hyp_embedding` (torch.Tensor):
embeddings of candidate sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `hyp_lens` (list of int): list of candidate sentence length.
- :param: `hyp_masks` (torch.LongTensor): BxKxK, BERT attention mask for
candidate sentences.
- :param: `hyp_idf` (torch.Tensor): BxK, idf score of each word
piece in the candidate setence
"""
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
batch_size = ref_embedding.size(0)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
masks = torch.bmm(hyp_masks.unsqueeze(2).float(), ref_masks.unsqueeze(1).float())
masks = masks.expand(batch_size, masks.size(1), masks.size(2))\
.contiguous().view_as(sim)
masks = masks.float().to(sim.device)
sim = sim * masks
word_precision = sim.max(dim=2)[0]
word_recall = sim.max(dim=1)[0]
hyp_idf.div_(hyp_idf.sum(dim=1, keepdim=True))
ref_idf.div_(ref_idf.sum(dim=1, keepdim=True))
precision_scale = hyp_idf.to(word_precision.device)
recall_scale = ref_idf.to(word_recall.device)
P = (word_precision * precision_scale).sum(dim=1)
R = (word_recall * recall_scale).sum(dim=1)
F = 2 * P * R / (P + R)
return P, R, F
def bert_cos_score_idf(model, refs, hyps, tokenizer, idf_dict,
verbose=False, batch_size=64, device='cuda:0'):
"""
Compute BERTScore.
Args:
- :param: `model` : a BERT model in `pytorch_pretrained_bert`
- :param: `refs` (list of str): reference sentences
- :param: `hyps` (list of str): candidate sentences
- :param: `tokenzier` : a BERT tokenizer corresponds to `model`
- :param: `idf_dict` : a dictionary mapping a word piece index to its
inverse document frequency
- :param: `verbose` (bool): turn on intermediate status update
- :param: `batch_size` (int): bert score processing batch size
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
preds = []
iter_range = range(0, len(refs), batch_size)
if verbose: iter_range = tqdm(iter_range)
for batch_start in iter_range:
batch_refs = refs[batch_start:batch_start+batch_size]
batch_hyps = hyps[batch_start:batch_start+batch_size]
ref_stats = get_bert_embedding(batch_refs, model, tokenizer, idf_dict,
device=device)
hyp_stats = get_bert_embedding(batch_hyps, model, tokenizer, idf_dict,
device=device)
P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats)
preds.append(torch.stack((P, R, F1), dim=1).cpu())
preds = torch.cat(preds, dim=0)
return preds
| abductive-commonsense-reasoning-master | anlg/evaluation/bert_score/utils.py |
import os
import time
import argparse
import torch
from collections import defaultdict
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from .utils import get_idf_dict, bert_cos_score_idf,\
get_bert_embedding, bert_types
__all__ = ['score', 'plot_example']
def score(cands, refs, bert="bert-base-multilingual-cased",
num_layers=8, verbose=False, no_idf=False, batch_size=64):
"""
BERTScore metric.
Args:
- :param: `cands` (list of str): candidate sentences
- :param: `refs` (list of str): reference sentences
- :param: `bert` (str): bert specification
- :param: `num_layers` (int): the layer of representation to use
- :param: `verbose` (bool): turn on intermediate status update
- :param: `no_idf` (bool): do not use idf weighting
- :param: `batch_size` (int): bert score processing batch size
"""
assert len(cands) == len(refs)
assert bert in bert_types
tokenizer = BertTokenizer.from_pretrained(bert)
model = BertModel.from_pretrained(bert)
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# drop unused layers
model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])
if no_idf:
idf_dict = defaultdict(lambda: 1.)
# set idf for [SEP] and [CLS] to 0
idf_dict[101] = 0
idf_dict[102] = 0
else:
if verbose:
print('preparing IDF dict...')
start = time.perf_counter()
idf_dict = get_idf_dict(refs, tokenizer)
if verbose:
print('done in {:.2f} seconds'.format(time.perf_counter() - start))
if verbose:
print('calculating scores...')
start = time.perf_counter()
all_preds = bert_cos_score_idf(model, refs, cands, tokenizer, idf_dict,
verbose=verbose, device=device, batch_size=batch_size)
P = all_preds[:, 0].cpu()
R = all_preds[:, 1].cpu()
F1 = all_preds[:, 2].cpu()
if verbose:
print('done in {:.2f} seconds'.format(time.perf_counter() - start))
return P, R, F1
def plot_example(h, r, verbose=False, bert="bert-base-multilingual-cased",
num_layers=8, fname=''):
"""
BERTScore metric.
Args:
- :param: `h` (str): a candidate sentence
- :param: `r` (str): a reference sentence
- :param: `verbose` (bool): turn on intermediate status update
- :param: `bert` (str): bert specification
- :param: `num_layers` (int): the layer of representation to use
"""
assert bert in bert_types
if verbose:
print('loading BERT model...')
tokenizer = BertTokenizer.from_pretrained(bert)
model = BertModel.from_pretrained(bert)
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
h_tokens = ['[CLS]'] + tokenizer.tokenize(h) + ['[SEP]']
r_tokens = ['[CLS]'] + tokenizer.tokenize(r) + ['[SEP]']
model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])
idf_dict = defaultdict(lambda: 1.)
ref_embedding, ref_lens, ref_masks, padded_idf = get_bert_embedding([r], model, tokenizer, idf_dict,
device=device)
hyp_embedding, ref_lens, ref_masks, padded_idf = get_bert_embedding([h], model, tokenizer, idf_dict,
device=device)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
batch_size = ref_embedding.size(1)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2)).cpu()
sim = sim.squeeze(0).numpy()
# remove [CLS] and [SEP] tokens
r_tokens = r_tokens[1:-1]
h_tokens = h_tokens[1:-1]
sim = sim[1:-1,1:-1]
fig, ax = plt.subplots(figsize=(len(r_tokens)*0.8, len(h_tokens)*0.8))
im = ax.imshow(sim, cmap='Blues')
# We want to show all ticks...
ax.set_xticks(np.arange(len(r_tokens)))
ax.set_yticks(np.arange(len(h_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(r_tokens, fontsize=10)
ax.set_yticklabels(h_tokens, fontsize=10)
plt.xlabel("Refernce", fontsize=10)
plt.ylabel("Candidate", fontsize=10)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(h_tokens)):
for j in range(len(r_tokens)):
text = ax.text(j, i, '{:.3f}'.format(sim[i, j]),
ha="center", va="center", color="k" if sim[i, j] < 0.6 else "w")
# P = sim.max(1).mean()
# R = sim.max(0).mean()
# F1 = 2 * P * R / (P + R)
fig.tight_layout()
# plt.title("BERT-F1: {:.3f}".format(F1), fontsize=10)
if fname != "":
print("Saved figure to file: ", fname+".png")
plt.savefig(fname+'.png', dpi=100)
plt.show()
| abductive-commonsense-reasoning-master | anlg/evaluation/bert_score/score.py |
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <[email protected]>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
| abductive-commonsense-reasoning-master | anlg/evaluation/rouge/rouge.py |
__author__ = 'vrama91'
| abductive-commonsense-reasoning-master | anlg/evaluation/rouge/__init__.py |
import argparse
import json
from transformers import BertTokenizer
import tqdm
from anli.data_processors import AnliProcessor
def data_processor_by_name(task_name):
if task_name == "anli":
return AnliProcessor()
def main(args):
data_dir = args.data_dir
bert_model = args.bert_model
task_name = args.task_name
threshold = args.threshold
data_processor = data_processor_by_name(task_name)
all_examples = data_processor.get_train_examples(data_dir) + \
data_processor.get_dev_examples(data_dir)
tokenizer = BertTokenizer.from_pretrained(bert_model)
segment_1_lengths = []
segment_2_lengths = []
for example in tqdm.tqdm(all_examples):
for option in example.get_option_segments():
context_tokens = tokenizer.tokenize(option['segment1'])
segment_1_lengths.append(len(context_tokens))
if "segment2" in option:
option_tokens = tokenizer.tokenize(option["segment2"])
segment_2_lengths.append(len(option_tokens))
m1 = max(segment_1_lengths)
m2 = 0
print("Max Segment 1: {}".format(m1))
if len(segment_2_lengths) > 0:
m2 = max(segment_2_lengths)
print("Max Segment 2: {}".format(m2))
s = [x + y for x, y in zip(segment_1_lengths, segment_2_lengths)]
print("Set max ctx >= {}".format(max(s) + 3))
num = sum([i > threshold for i in s])
print("No. more than {} = {}".format(threshold, num))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Finetune BERT model and save')
# Required Parameters
parser.add_argument('--data_dir', type=str, help='Location of data', default=None)
parser.add_argument('--bert_model', type=str, help='Bert model', default="bert-base-uncased")
parser.add_argument('--task_name', type=str, help='Bert model', default="anli")
parser.add_argument('--threshold', type=int, help='Threshold for truncation', default=256)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args) | abductive-commonsense-reasoning-master | anli/max_ctx_for_dataset.py |
import argparse
import json
import numpy as np
from utils.file_utils import read_jsonl_lines, read_lines
def _key(r):
return r['obs1'] + '||' + r['obs2']
def correct_middle(r):
return r['hyp' + r['label']]
def incorrect_middle(r):
if r['label'] == "1":
return r['hyp2']
else:
return r['hyp1']
def mean_word_lens(lst):
return round(np.mean([len(s.split()) for s in lst]),2)
def main(args):
input_file = args.input_file
labels_file = args.label_file
stories = read_jsonl_lines(input_file)
labels = read_lines(labels_file)
all_begins = []
all_endings = []
stories_by_key = {}
for s, label in zip(stories, labels):
s['label'] = label
key = _key(s)
if key not in stories_by_key:
stories_by_key[key] = []
stories_by_key[key].append(s)
all_begins.append(s['obs1'])
all_endings.append(s['obs2'])
num_correct_middles_per_story = []
num_incorrect_middles_per_story = []
all_correct_middles = []
all_incorrect_middles = []
all_begins = list(set(all_begins))
all_endings = list(set(all_endings))
for k, stories in stories_by_key.items():
num_correct_middles_per_story.append(len(set([correct_middle(r) for r in stories])))
num_incorrect_middles_per_story.append(len(set([incorrect_middle(r) for r in stories])))
all_correct_middles.extend(list(set([correct_middle(r) for r in stories])))
all_incorrect_middles.extend(list(set([incorrect_middle(r) for r in stories])))
print("No. of train stories: {}".format(len(stories_by_key)))
print("Mean of no. of correct middles = {}".format(round(np.mean(num_correct_middles_per_story), 2)))
print("Mean of no. of incorrect middles = {}".format(round(np.mean(num_incorrect_middles_per_story), 2)))
print("Mean of no. of words in correct middles = {}".format(mean_word_lens(all_correct_middles)))
print("Mean of no. of words in incorrect middles = {}".format(mean_word_lens(all_incorrect_middles)))
print("No. correct middles = {}".format(len(all_correct_middles)))
print("No. incorrect middles = {}".format(len(all_incorrect_middles)))
print("Mean of no. of words in Begins = {}".format(mean_word_lens(all_begins)))
print("Mean of no. of words in Endings = {}".format(mean_word_lens(all_endings)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Script to compute corpus satistics')
# Required Parameters
parser.add_argument('--input_file', type=str, help='Location of data', default=None)
parser.add_argument('--label_file', type=str, help='Location of data', default=None)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args) | abductive-commonsense-reasoning-master | anli/corpus_statistics.py |
abductive-commonsense-reasoning-master | anli/__init__.py |
|
import csv
import json
import logging
import os
from abc import ABC
import torch
from pytorch_transformers import BertTokenizer
from torch.utils.data import TensorDataset, SequentialSampler, DataLoader, RandomSampler
from utils.file_utils import read_lines, read_jsonl_lines
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class MultiFormatDataProcessor(DataProcessor, ABC):
@classmethod
def _read_tsv(cls, input_file, quotechar=None, delimiter="\t"):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
records = []
with open(input_file, "r") as f:
for line in f:
obj = json.loads(line)
records.append(obj)
return records
class McExample(object):
def get_option_segments(self):
raise NotImplementedError
class MultipleChoiceFeatures(object):
def __init__(self,
example_id,
option_features,
label=None):
self.example_id = example_id
self.option_features = self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for _, input_ids, input_mask, segment_ids in option_features
]
if label is not None:
self.label = int(label) - 1
else:
self.label = None
class AnliExample(object):
def __init__(self,
example_id,
beginning: str,
middle_options: list,
ending: str,
label=None):
self.example_id = example_id
self.beginning = beginning
self.ending = ending
self.middle_options = middle_options
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
lines = [
"example_id:\t{}".format(self.example_id),
"beginning:\t{}".format(self.beginning)
]
for idx, option in enumerate(self.middle_options):
lines.append("option{}:\t{}".format(idx, option))
lines.append("ending:\t{}".format(self.ending))
if self.label is not None:
lines.append("label:\t{}".format(self.label))
return ", ".join(lines)
def to_json(self):
return {
"story_id": self.example_id,
"obs1": self.beginning,
"obs2": self.ending,
"hyp1": self.middle_options[0],
"hyp2": self.middle_options[1],
"label": self.label
}
def to_middles_only_format(self):
return [
{
"segment1": self.middle_options[0]
},
{
"segment1": self.middle_options[1]
}
]
def to_middles_sequence_format(self):
return [
{
"segment1": self.middle_options[0],
"segment2": self.middle_options[1]
}
]
def to_bm_e_format(self):
return [{
"segment1": ' '.join([self.beginning, option]),
"segment2": self.ending
} for option in self.middle_options]
def to_b_me_format(self):
return [
{
"segment1": self.beginning,
"segment2": ' '.join([self.middle_options[0], self.ending])
},
{
"segment1": self.beginning,
"segment2": ' '.join([self.middle_options[1], self.ending])
}
]
def to_b2m_m2e_format(self):
return [
{
"segment1": self.beginning,
"segment2": self.middle_options[0]
},
{
"segment1": self.middle_options[0],
"segment2": self.ending
},
{
"segment1": self.beginning,
"segment2": self.middle_options[1]
},
{
"segment1": self.middle_options[1],
"segment2": self.ending
}
]
def to_b2m_bm2e_format(self):
return [
{
"segment1": self.beginning,
"segment2": self.middle_options[0]
},
{
"segment1": self.beginning + ' ' + self.middle_options[0],
"segment2": self.ending
},
{
"segment1": self.beginning,
"segment2": self.middle_options[1]
},
{
"segment1": self.beginning + ' ' + self.middle_options[1],
"segment2": self.ending
}
]
def to_b_m_e_format(self):
return [
{
"segment1": self.beginning,
},
{
"segment1": self.middle_options[0]
},
{
"segment1": self.ending
},
{
"segment1": self.beginning,
},
{
"segment1": self.middle_options[1]
},
{
"segment1": self.ending
}
]
def to_b2m_m2e_m1_m2_format(self):
return self.to_b2m_m2e_format() \
+ [
{
"segment1": self.middle_options[0],
},
{
"segment1": self.middle_options[1],
}
]
def to_b_m1_m2_e_format(self):
return [
{
"segment1": self.beginning
},
{
"segment1": self.middle_options[0]
},
{
"segment1": self.middle_options[1]
},
{
"segment1": self.ending
}
]
def to_b2m_format(self):
return [
{
"segment1": self.beginning,
"segment2": self.middle_options[0]
},
{
"segment1": self.beginning,
"segment2": self.middle_options[1]
}
]
def to_m2e_format(self):
return [
{
"segment1": self.middle_options[0],
"segment2": self.ending
},
{
"segment1": self.middle_options[1],
"segment2": self.ending
}
]
def get_option_segments(self):
return self.to_bm_e_format()
class AnliProcessor(MultiFormatDataProcessor):
"""Processor for the ANLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self.get_examples_from_file(
os.path.join(data_dir, "train.jsonl"),
os.path.join(data_dir, "train-labels.lst"),
"train"
)
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "dev.jsonl")))
return self.get_examples_from_file(
os.path.join(data_dir, "dev.jsonl"),
os.path.join(data_dir, "dev-labels.lst"),
"train"
)
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "test.jsonl")))
return self.get_examples_from_file(
os.path.join(data_dir, "test.jsonl"),
os.path.join(data_dir, "test-labels.lst"),
"train"
)
def get_examples_from_file(self, input_file, labels_file=None, split="predict"):
if labels_file is not None:
return self._create_examples(
self._read_jsonl(input_file),
read_lines(labels_file),
split
)
else:
return self._create_examples(
self._read_jsonl(input_file)
)
def get_labels(self):
"""See base class."""
return ["1", "2"]
def _create_examples(self, records, labels=None, set_type="predict"):
"""Creates examples for the training and dev sets."""
examples = []
if labels is None:
labels = [None] * len(records)
for (i, (record, label)) in enumerate(zip(records, labels)):
guid = "%s" % (record['story_id'])
beginning = record['obs1']
ending = record['obs2']
option1 = record['hyp1']
option2 = record['hyp2']
examples.append(
AnliExample(example_id=guid,
beginning=beginning,
middle_options=[option1, option2],
ending=ending,
label=label
)
)
return examples
def label_field(self):
return "label"
class AnliMultiDistractorProcessor(MultiFormatDataProcessor):
"""Multiple Distractor during training for the ANLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir,
"anli-train-multi-distractors.jsonl")))
return self.get_multi_distractor_examples_from_file(
os.path.join(data_dir, "anli-train-multi-distractors.jsonl")
)
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "dev.jsonl")))
return self.get_examples_from_file(
os.path.join(data_dir, "dev.jsonl"),
os.path.join(data_dir, "dev-labels.lst"),
"train"
)
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "test.jsonl")))
return self.get_examples_from_file(
os.path.join(data_dir, "test.jsonl"),
os.path.join(data_dir, "test-labels.lst"),
"test"
)
def get_multi_distractor_examples_from_file(self, input_file):
records = read_jsonl_lines(input_file)
labels = [r['label'] for r in records]
return self._create_examples(records, labels)
def get_examples_from_file(self, input_file, labels_file=None, split="predict"):
if labels_file is not None:
return self._create_examples(
self._read_jsonl(input_file),
read_lines(labels_file),
split
)
else:
return self._create_examples(
self._read_jsonl(input_file)
)
def get_labels(self):
"""See base class."""
return [str(item + 1) for item in range(10)]
def _create_examples(self, records, labels=None, set_type="predict"):
"""Creates examples for the training and dev sets."""
examples = []
if labels is None:
labels = [None] * len(records)
for (i, (record, label)) in enumerate(zip(records, labels)):
guid = "%s" % (record['story_id'])
beginning = record['obs1']
ending = record['obs2']
if 'choices' in record:
options = record['choices']
label = int(label) + 1
else:
options = [record['hyp1'], record['hyp2']]
examples.append(
AnliExample(example_id=guid,
beginning=beginning,
middle_options=options,
ending=ending,
label=label
)
)
return examples
@staticmethod
def label_field():
return "label"
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_multiple_choice_examples_to_features(examples: list,
tokenizer: BertTokenizer,
max_seq_length: int,
is_training: bool,
verbose: bool = False):
features = []
for idx, example in enumerate(examples):
option_features = []
for option in example.get_option_segments():
context_tokens = tokenizer.tokenize(option['segment1'])
if "segment2" in option:
option_tokens = tokenizer.tokenize(option["segment2"])
_truncate_seq_pair(context_tokens, option_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens + ["[SEP]"] + option_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens) + 2) + [1] * (len(option_tokens) + 1)
else:
context_tokens = context_tokens[0:(max_seq_length - 2)]
tokens = ["[CLS]"] + context_tokens + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
option_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if idx < 5 and verbose:
logger.info("*** Example ***")
logger.info(f"example_id: {example.example_id}")
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(
option_features):
logger.info(f"choice: {choice_idx}")
logger.info(f"tokens: {' '.join(tokens)}")
logger.info(f"input_ids: {' '.join(map(str, input_ids))}")
logger.info(f"input_mask: {' '.join(map(str, input_mask))}")
logger.info(f"segment_ids: {' '.join(map(str, segment_ids))}")
if is_training:
logger.info(f"label: {label}")
features.append(
MultipleChoiceFeatures(
example_id=example.example_id,
option_features=option_features,
label=label
)
)
return features
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def mc_examples_to_data_loader(examples: list,
tokenizer,
max_seq_length,
is_train,
batch_size,
is_predict=False,
verbose: bool = False):
features = convert_multiple_choice_examples_to_features(
examples, tokenizer, max_seq_length, is_train, verbose
)
if verbose:
logger.info(" Num examples = %d", len(examples))
all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long)
if not is_predict:
all_label = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
if is_train:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
return DataLoader(dataset, sampler=sampler, batch_size=batch_size)
| abductive-commonsense-reasoning-master | anli/data_processors.py |
import argparse
import json
import logging
import math
import os
import random
import numpy as np
import torch
from pytorch_transformers import BertTokenizer, PYTORCH_PRETRAINED_BERT_CACHE, \
BertForMultipleChoice, BertConfig, BertModel
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from tqdm import tqdm, trange
from pytorch_transformers import AdamW, WarmupLinearSchedule
from anli.data_processors import AnliProcessor, AnliMultiDistractorProcessor, mc_examples_to_data_loader
from utils.file_utils import write_items
from torch import nn
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
model_choice = BertForMultipleChoice
model_choice_map = {
'BertForMultipleChoice': BertForMultipleChoice,
}
def get_data_processor(task_name):
if task_name == "anli":
return AnliProcessor()
elif task_name == "anli_md":
return AnliMultiDistractorProcessor()
else:
raise Exception("Invalid task")
def _model_name(dir_name):
return os.path.join(dir_name, "pytorch_model.bin")
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def num_correct(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def train(data_dir, output_dir, data_processor, model_name_or_path, lr, batch_size, epochs,
finetuning_model,
max_seq_length, warmup_proportion, debug=False, tune_bert=True, gpu_id=0, tb_dir=None,
debug_samples=20, training_data_fraction=1.0, config_name=None):
if os.path.exists(output_dir) and os.listdir(output_dir):
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(output_dir))
os.makedirs(output_dir)
writer = None
if tb_dir is not None:
writer = SummaryWriter(tb_dir)
tokenizer = BertTokenizer.from_pretrained(model_name_or_path, do_lower_case=True)
train_examples = data_processor.get_train_examples(data_dir)
if training_data_fraction < 1.0:
num_train_examples = int(len(train_examples) * training_data_fraction)
train_examples = random.sample(train_examples, num_train_examples)
if debug:
logging.info("*****[DEBUG MODE]*****")
train_examples = train_examples[:debug_samples]
num_train_steps = int(
len(train_examples) / batch_size * epochs
)
# Pretrained Model
config = BertConfig.from_pretrained(
config_name if config_name else model_name_or_path,
num_labels=len(data_processor.get_labels()),
finetuning_task="anli"
)
model = model_choice_map[finetuning_model].from_pretrained(model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config
)
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else "cpu")
model.to(device)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
if writer:
params_to_log_on_tb = [(k, v) for k, v in model.named_parameters() if
not k.startswith("bert")]
t_total = num_train_steps
train_dataloader = mc_examples_to_data_loader(train_examples, tokenizer, max_seq_length, True,
batch_size, verbose=True)
optimizer = AdamW(optimizer_grouped_parameters, lr=lr, eps=1e-8)
scheduler = WarmupLinearSchedule(optimizer,
warmup_steps=math.floor(warmup_proportion * t_total),
t_total=t_total)
global_step = 0
logging.info("\n\n\n\n****** TRAINABLE PARAMETERS = {} ******** \n\n\n\n"
.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
for epoch_num in trange(int(epochs), desc="Epoch"):
model.train()
assert model.training
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
batch_tqdm = tqdm(train_dataloader)
current_correct = 0
for step, batch in enumerate(batch_tqdm):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
model_output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
loss = model_output[0]
logits = model_output[1]
current_correct += num_correct(logits.detach().cpu().numpy(),
label_ids.to('cpu').numpy())
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
loss.backward()
if (step + 1) % 1 == 0: # I don't know why this is here !!!
# modify learning rate with special warm up BERT uses
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if writer:
writer.add_scalar("loss", tr_loss / nb_tr_steps, global_step)
lrs = scheduler.get_lr()
writer.add_scalar("lr_pg_1", lrs[0], global_step)
writer.add_scalar("lr_pg_2", lrs[1], global_step)
for n, p in params_to_log_on_tb:
writer.add_histogram(n, p.clone().cpu().data.numpy(), global_step)
writer.add_histogram("model_logits", logits.clone().cpu().data.numpy(),
global_step)
batch_tqdm.set_description(
"Loss: {}; Iteration".format(round(tr_loss / nb_tr_steps, 3)))
tr_acc = current_correct / nb_tr_examples
# Call evaluate at the end of each epoch
result = evaluate(data_dir=data_dir,
output_dir=output_dir,
data_processor=data_processor,
model_name_or_path=model_name_or_path,
finetuning_model=finetuning_model,
max_seq_length=max_seq_length,
batch_size=batch_size,
debug=debug,
gpu_id=gpu_id,
model=model,
tokenizer=tokenizer,
verbose=False,
debug_samples=debug_samples
)
logging.info("****** EPOCH {} ******\n\n\n".format(epoch_num))
logging.info("Training Loss: {}".format(round(tr_loss / nb_tr_steps, 3)))
logging.info("Training Accuracy: {}".format(round(tr_acc, 3)))
logging.info("Validation Loss : {}".format(round(result['dev_eval_loss'], 3)))
logging.info("Validation Accuracy : {}".format(round(result['dev_eval_accuracy'], 3)))
logging.info("******")
if writer:
writer.add_scalar("dev_val_loss", result['dev_eval_loss'], global_step)
writer.add_scalar("dev_val_accuracy", result['dev_eval_accuracy'], global_step)
writer.add_scalar("dev_accuracy", tr_acc, global_step)
model_to_save = model.module if hasattr(model,
'module') else model # Only save the model it-self
output_model_file = _model_name(output_dir)
torch.save(model_to_save.state_dict(), output_model_file)
logging.info("Training Done. Saved model to: {}".format(output_model_file))
return output_model_file
def evaluate(data_dir, output_dir, data_processor, model_name_or_path, finetuning_model, max_seq_length,
batch_size,
debug=False, gpu_id=0, model=None, tokenizer=None, verbose=False, debug_samples=20,
eval_split="dev", config_name=None, metrics_out_file="metrics.json"):
if debug:
logging.info("*****[DEBUG MODE]*****")
eval_examples = data_processor.get_train_examples(data_dir)[:debug_samples]
else:
if eval_split == "dev":
eval_examples = data_processor.get_dev_examples(data_dir)
elif eval_split == "test":
eval_examples = data_processor.get_test_examples(data_dir)
if tokenizer is None:
tokenizer = BertTokenizer.from_pretrained(model_name_or_path, do_lower_case=True)
eval_dataloader = mc_examples_to_data_loader(examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
is_train=False,
batch_size=batch_size,
verbose=verbose
)
device = torch.device(gpu_id if torch.cuda.is_available() else "cpu")
# Load a trained model that you have fine-tuned
if model is None:
config = BertConfig.from_pretrained(
config_name if config_name else model_name_or_path,
num_labels=len(data_processor.get_labels()),
finetuning_task="anli"
)
model = model_choice_map[finetuning_model].from_pretrained(output_dir,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config
)
model.to(device)
model.eval()
assert not model.training
eval_loss, eval_correct = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
eval_predictions = []
eval_logits = []
eval_pred_probs = []
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
model_output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
tmp_eval_loss = model_output[0]
logits = model_output[1]
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_correct = num_correct(logits, label_ids)
eval_predictions.extend(np.argmax(logits, axis=1).tolist())
eval_logits.extend(logits.tolist())
eval_pred_probs.extend([_compute_softmax(list(l)) for l in logits])
eval_loss += tmp_eval_loss.item() # No need to compute mean again. CrossEntropyLoss does that by default.
nb_eval_steps += 1
eval_correct += tmp_eval_correct
nb_eval_examples += input_ids.size(0)
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_correct / nb_eval_examples
result = {}
if os.path.exists(metrics_out_file):
with open(metrics_out_file) as f:
existing_results = json.loads(f.read())
f.close()
result.update(existing_results)
result.update(
{
eval_split + '_eval_loss': eval_loss,
eval_split + '_eval_accuracy': eval_accuracy,
}
)
with open(metrics_out_file, "w") as writer:
writer.write(json.dumps(result))
if verbose:
logger.info("***** Eval results *****")
logging.info(json.dumps(result))
output_file = os.path.join(os.path.dirname(output_dir),
eval_split + "_output_predictions.jsonl")
predictions = []
for record, pred, logits, probs in zip(eval_examples, eval_predictions, eval_logits,
eval_pred_probs):
r_json = record.to_json()
r_json['prediction'] = data_processor.get_labels()[pred]
r_json['logits'] = logits
r_json['probs'] = probs
predictions.append(r_json)
write_items([json.dumps(r) for r in predictions], output_file)
return result
def predict(pred_input_file,
pred_output_file,
model_dir,
data_processor,
model_name_or_path,
max_seq_length,
batch_size,
gpu_id,
verbose,
finetuning_model,
config_name=None):
tokenizer = BertTokenizer.from_pretrained(model_name_or_path, do_lower_case=True)
pred_examples = data_processor.get_examples_from_file(pred_input_file)
pred_dataloader = mc_examples_to_data_loader(examples=pred_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
is_train=False,
is_predict=True,
batch_size=batch_size,
verbose=verbose
)
device = torch.device(gpu_id if torch.cuda.is_available() else "cpu")
# Load a trained model that you have fine-tuned
if torch.cuda.is_available():
model_state_dict = torch.load(_model_name(model_dir))
else:
model_state_dict = torch.load(_model_name(model_dir), map_location='cpu')
# Pretrained Model
config = BertConfig.from_pretrained(
config_name if config_name else model_name_or_path,
num_labels=len(data_processor.get_labels()),
finetuning_task="anli"
)
model = model_choice_map[finetuning_model].from_pretrained(
model_dir,
from_tf=bool('.ckpt' in model_name_or_path),
config=config
)
model.to(device)
model.eval()
assert not model.training
predictions = []
for input_ids, input_mask, segment_ids in tqdm(pred_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
model_output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = model_output[0]
logits = logits.detach().cpu().numpy()
predictions.extend(np.argmax(logits, axis=1).tolist())
write_items([idx + 1 for idx in predictions], pred_output_file)
def main(args):
output_dir = args.output_dir
seed = args.seed
model_name_or_path = args.model_name_or_path
data_dir = args.data_dir
task_name = args.task_name
lr = args.lr
batch_size = args.batch_size
epochs = args.epochs
max_seq_length = args.max_seq_length
warmup_proportion = args.warmup_proportion
mode = args.mode
finetuning_model = args.finetuning_model
debug = args.debug
tune_bert = not args.no_tune_bert
gpu_id = args.gpu_id
tb_dir = args.tb_dir
debug_samples = args.debug_samples
run_on_test = args.run_on_test
training_data_fraction = args.training_data_fraction
run_on_dev = True
metrics_out_file = args.metrics_out_file
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if mode is None or mode == "train":
train(data_dir=data_dir,
output_dir=output_dir,
data_processor=get_data_processor(task_name),
model_name_or_path=model_name_or_path,
lr=lr,
batch_size=batch_size,
epochs=epochs,
finetuning_model=finetuning_model,
max_seq_length=max_seq_length,
warmup_proportion=warmup_proportion,
debug=debug,
tune_bert=tune_bert,
gpu_id=gpu_id,
tb_dir=tb_dir,
debug_samples=debug_samples,
training_data_fraction=training_data_fraction
)
if mode is None or mode == "eval":
if run_on_dev:
evaluate(
data_dir=data_dir,
output_dir=output_dir,
data_processor=get_data_processor(task_name),
model_name_or_path=model_name_or_path,
finetuning_model=finetuning_model,
max_seq_length=max_seq_length,
batch_size=batch_size,
debug=debug,
gpu_id=gpu_id,
verbose=True,
debug_samples=debug_samples,
eval_split="dev",
metrics_out_file=metrics_out_file
)
if run_on_test:
logger.info("*******")
logger.info("!!!!!!! ----- RUNNING ON TEST ----- !!!!!")
logger.info("*******")
evaluate(
data_dir=data_dir,
output_dir=output_dir,
data_processor=get_data_processor(task_name),
model_name_or_path=model_name_or_path,
finetuning_model=finetuning_model,
max_seq_length=max_seq_length,
batch_size=batch_size,
debug=debug,
gpu_id=gpu_id,
verbose=True,
debug_samples=debug_samples,
eval_split="test",
metrics_out_file=metrics_out_file
)
if mode == "predict":
assert args.predict_input_file is not None and args.predict_output_file is not None
predict(
pred_input_file=args.predict_input_file,
pred_output_file=args.predict_output_file,
model_dir=output_dir,
data_processor=get_data_processor(task_name),
model_name_or_path=model_name_or_path,
max_seq_length=max_seq_length,
batch_size=batch_size,
gpu_id=gpu_id,
verbose=False,
finetuning_model=finetuning_model
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Finetune BERT model and save')
# Required Parameters
parser.add_argument('--data_dir', type=str, help='Location of data', default=None)
parser.add_argument('--task_name', type=str, help='Task Name. Currently supported: anli / '
'wsc', default=None)
parser.add_argument('--model_name_or_path',
type=str,
help="Bert pre-trained model selected for finetuned",
default=None)
parser.add_argument('--output_dir',
type=str,
help="Output directory to save model",
default=None)
parser.add_argument('--mode', type=str, default=None)
parser.add_argument('--finetuning_model', type=str, default='BertForMultipleChoice')
parser.add_argument('--eval_split', type=str, default="dev")
parser.add_argument('--run_on_test', action='store_true')
parser.add_argument('--input_file', action='store_true')
parser.add_argument('--predict_input_file', default=None)
parser.add_argument('--predict_output_file', default=None)
parser.add_argument('--metrics_out_file', default="metrics.json")
# Hyperparams
parser.add_argument('--lr', type=float, help="Learning rate", default=1e-5)
parser.add_argument('--batch_size', type=int, help="Batch size", default=4)
parser.add_argument('--epochs', type=int, help="Num epochs", default=3)
parser.add_argument('--training_data_fraction', type=float, default=1.0)
# Other parameters
parser.add_argument("--max_seq_length",
default=64,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--warmup_proportion',
type=float,
default=0.2,
help="Portion of training to perform warmup")
parser.add_argument('--debug', action='store_true')
parser.add_argument('--debug_samples', default=20, type=int)
parser.add_argument('--no_tune_bert', action='store_true')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--tb_dir', type=str, default=None)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args)
| abductive-commonsense-reasoning-master | anli/run_anli.py |
import argparse
import json
import torch
from pytorch_transformers import BertTokenizer, BertConfig, BertForMultipleChoice
from anli.data_processors import AnliExample, mc_examples_to_data_loader
from anli.run_anli import get_data_processor, model_choice_map
import numpy as np
def load_anli_model(model_name, saved_model_dir, device):
data_processor = get_data_processor("anli")
tokenizer = BertTokenizer.from_pretrained(model_name, do_lower_case=True)
# Pretrained Model
config = BertConfig.from_pretrained(
model_name,
num_labels=len(data_processor.get_labels()),
finetuning_task="anli"
)
model = BertForMultipleChoice.from_pretrained(
saved_model_dir,
from_tf=bool('.ckpt' in model_name),
config=config
)
model.to(device)
model.eval()
return data_processor, tokenizer, model
def _predict(tokenizer, model, obs1, obs2, hyp1, hyp2, device):
instance = AnliExample(example_id="demo-1",
beginning=obs1,
middle_options=[hyp1, hyp2],
ending=obs2,
label=None
)
data_loader = mc_examples_to_data_loader([instance], tokenizer, 68, False, 1, is_predict=True, verbose=False)
for input_ids, input_mask, segment_ids in data_loader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
model_output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = model_output[0]
logits = logits.detach().cpu().numpy()
answer = np.argmax(logits, axis=1).tolist()
return answer
def main(args):
device = torch.device(args.gpu_id if torch.cuda.is_available() else "cpu")
_, tokenizer, model = load_anli_model(args.model_name, args.saved_model_dir, device)
if args.interactive:
while True:
obs1 = input("Observation 1 >>> ")
obs2 = input("Observation 2 >>> ")
hyp1 = input("Hypothesis 1 >>> ")
hyp2 = input("Hypothesis 2 >>> ")
prediction = _predict(tokenizer, model, obs1, obs2, hyp1, hyp2, device)
if prediction == 0:
print("[Answer] Hyptothesis 1: {}".format(hyp1))
else:
print("[Answer] Hyptothesis 2: {}".format(hyp2))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Demo for a finetuned ANLI model.')
# Required Parameters
parser.add_argument('--model_name',
type=str,
help="Bert pre-trained model selected for finetuning.",
default="bert-large-uncased")
parser.add_argument('--saved_model_dir',
type=str,
help="Saved finetuned model dir.",
default=None,
required=True)
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--interactive', action='store_true')
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args)
| abductive-commonsense-reasoning-master | anli/demo.py |
abductive-commonsense-reasoning-master | anli/human_eval/__init__.py |
|
import argparse
import json
from utils.file_utils import read_jsonl_lines, write_items
from collections import Counter
import hashlib
def _hash(w):
return hashlib.sha1(w.encode()).hexdigest()
def main(args):
input_file = args.input_file
output_file = args.output_file
records = read_jsonl_lines(input_file)
per_story_votes = {}
per_story_workers = {}
per_story_per_vote_worktime = {}
story_id_field = 'Input.story_id'
for r in records:
if r[story_id_field] not in per_story_votes:
per_story_votes[r[story_id_field]] = []
per_story_workers[r[story_id_field]] = []
per_story_per_vote_worktime[r[story_id_field]] = []
per_story_votes[r[story_id_field]].append(r['Answer.Answer_radios'])
per_story_workers[r[story_id_field]].append(_hash(r['WorkerId']))
per_story_per_vote_worktime[r[story_id_field]].append(r['WorkTimeInSeconds'])
stories = []
correct = 0
done = set()
for r in records:
if r[story_id_field] in done:
continue
done.add(r[story_id_field])
assert len(per_story_votes[r[story_id_field]]) == 3
majority_vote = Counter(per_story_votes[r[story_id_field]]).most_common(1)[0][0]
stories.append({
'story_id': r[story_id_field],
'obs1': r['Input.obs1'],
'obs2': r['Input.obs2'],
'hyp1': r['Input.hyp1'],
'hyp2': r['Input.hyp2'],
'label': r['Input.label'],
'votes': per_story_votes[r[story_id_field]],
'majority_vote': majority_vote,
'workers': per_story_workers[r[story_id_field]],
'worktime': per_story_per_vote_worktime[r[story_id_field]]
})
if majority_vote == r['Input.label']:
correct += 1
print("Human performance = {}".format(correct / len(stories)))
print("No. of storeies = {}".format(len(stories)))
write_items([json.dumps(r) for r in stories], output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Script to compute corpus satistics')
# Required Parameters
parser.add_argument('--input_file', type=str, help='Location of human evaluation data from MTurk.', default=None)
parser.add_argument('--output_file', type=str, help='Location of file to save results.', default=None)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args) | abductive-commonsense-reasoning-master | anli/human_eval/compute_human_performance.py |
import argparse
import json
import pandas as pd
from pandas import DataFrame
from utils.file_utils import read_jsonl_lines, read_lines, write_items
def main(args):
dev_file = args.dev_file
dev_labels_file = args.dev_labels_file
output_file = args.output_file
records = read_jsonl_lines(dev_file)
labels = read_lines(dev_labels_file)
for r, l in zip(records, labels):
r['label'] = l
write_items([json.dumps(r) for r in records], output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Script to compute corpus satistics')
# Required Parameters
parser.add_argument('--dev_file', type=str, help='Location of dev data', default=None)
parser.add_argument('--dev_labels_file', type=str, help='Location of dev labels ', default=None)
parser.add_argument('--output_file', type=str, help='Location of output file ', default=None)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args) | abductive-commonsense-reasoning-master | anli/human_eval/prep_human_eval.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.