code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def from_env_step(cls, env_step, last_observation, agent_info,
episode_info):
"""Create a TimeStep from a EnvStep.
Args:
env_step (EnvStep): the env step returned by the environment.
last_observation (numpy.ndarray): A numpy array of shape
:math:`(O^*)` containing the observation for this time
step in the environment. These must conform to
:attr:`EnvStep.observation_space`.
The observation before applying the action.
agent_info (dict): A dict of arbitrary agent state information.
episode_info (dict): A dict of arbitrary information associated
with the whole episode.
Returns:
TimeStep: The TimeStep with all information of EnvStep plus the
agent info.
"""
return cls(env_spec=env_step.env_spec,
episode_info=episode_info,
observation=last_observation,
action=env_step.action,
reward=env_step.reward,
next_observation=env_step.observation,
env_info=env_step.env_info,
agent_info=agent_info,
step_type=env_step.step_type)
|
Create a TimeStep from a EnvStep.
Args:
env_step (EnvStep): the env step returned by the environment.
last_observation (numpy.ndarray): A numpy array of shape
:math:`(O^*)` containing the observation for this time
step in the environment. These must conform to
:attr:`EnvStep.observation_space`.
The observation before applying the action.
agent_info (dict): A dict of arbitrary agent state information.
episode_info (dict): A dict of arbitrary information associated
with the whole episode.
Returns:
TimeStep: The TimeStep with all information of EnvStep plus the
agent info.
|
from_env_step
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def concatenate(cls, *batches):
"""Concatenate two or more :class:`TimeStepBatch`s.
Args:
batches (list[TimeStepBatch]): Batches to concatenate.
Returns:
TimeStepBatch: The concatenation of the batches.
Raises:
ValueError: If no TimeStepBatches are provided.
"""
if len(batches) < 1:
raise ValueError('Please provide at least one TimeStepBatch to '
'concatenate')
episode_infos = {
k: np.concatenate([b.episode_infos[k] for b in batches])
for k in batches[0].episode_infos.keys()
}
env_infos = {
k: np.concatenate([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
return cls(
env_spec=batches[0].env_spec,
episode_infos=episode_infos,
observations=np.concatenate(
[batch.observations for batch in batches]),
actions=np.concatenate([batch.actions for batch in batches]),
rewards=np.concatenate([batch.rewards for batch in batches]),
next_observations=np.concatenate(
[batch.next_observations for batch in batches]),
env_infos=env_infos,
agent_infos=agent_infos,
step_types=np.concatenate([batch.step_types for batch in batches]))
|
Concatenate two or more :class:`TimeStepBatch`s.
Args:
batches (list[TimeStepBatch]): Batches to concatenate.
Returns:
TimeStepBatch: The concatenation of the batches.
Raises:
ValueError: If no TimeStepBatches are provided.
|
concatenate
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def split(self) -> List['TimeStepBatch']:
"""Split a :class:`~TimeStepBatch` into a list of :class:`~TimeStepBatch`s.
The opposite of concatenate.
Returns:
list[TimeStepBatch]: A list of :class:`TimeStepBatch`s, with one
:class:`~TimeStep` per :class:`~TimeStepBatch`.
"""
time_steps = []
for i in range(len(self.rewards)):
time_step = TimeStepBatch(
episode_infos={
k: np.asarray([v[i]])
for (k, v) in self.episode_infos.items()
},
env_spec=self.env_spec,
observations=np.asarray([self.observations[i]]),
actions=np.asarray([self.actions[i]]),
rewards=np.asarray([self.rewards[i]]),
next_observations=np.asarray([self.next_observations[i]]),
env_infos={
k: np.asarray([v[i]])
for (k, v) in self.env_infos.items()
},
agent_infos={
k: np.asarray([v[i]])
for (k, v) in self.agent_infos.items()
},
step_types=np.asarray([self.step_types[i]], dtype=StepType))
time_steps.append(time_step)
return time_steps
|
Split a :class:`~TimeStepBatch` into a list of :class:`~TimeStepBatch`s.
The opposite of concatenate.
Returns:
list[TimeStepBatch]: A list of :class:`TimeStepBatch`s, with one
:class:`~TimeStep` per :class:`~TimeStepBatch`.
|
split
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def to_time_step_list(self) -> List[Dict[str, np.ndarray]]:
"""Convert the batch into a list of dictionaries.
Breaks the :class:`~TimeStepBatch` into a list of single time step
sample dictionaries. len(rewards) (or the number of discrete time step)
dictionaries are returned
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
episode_infos (dict[str, np.ndarray]): A dict of numpy arrays
containing the episode-level information of each episode.
Each value of this dict must be a numpy array of shape
:math:`(S^*,)`. For example, in goal-conditioned
reinforcement learning this could contain the goal state
for each episode.
observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space
of the current environment).
actions (numpy.ndarray): Non-flattened array of actions. Must
have shape (batch_size, S^*) (the unflattened action
space of the
current environment).
rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was
observed by the agent after taking actions[i].
env_infos (dict): A dict arbitrary environment state
information.
agent_infos (dict): A dict of arbitrary agent state
information. For example, this may contain the
hidden states from an RNN policy.
step_types (numpy.ndarray): A numpy array of `StepType with
shape (batch_size,) containing the time step types for
all transitions in this batch.
"""
samples = []
for i in range(len(self.rewards)):
samples.append({
'episode_infos': {
k: np.asarray([v[i]])
for (k, v) in self.episode_infos.items()
},
'observations':
np.asarray([self.observations[i]]),
'actions':
np.asarray([self.actions[i]]),
'rewards':
np.asarray([self.rewards[i]]),
'next_observations':
np.asarray([self.next_observations[i]]),
'env_infos':
{k: np.asarray([v[i]])
for (k, v) in self.env_infos.items()},
'agent_infos':
{k: np.asarray([v[i]])
for (k, v) in self.agent_infos.items()},
'step_types':
np.asarray([self.step_types[i]])
})
return samples
|
Convert the batch into a list of dictionaries.
Breaks the :class:`~TimeStepBatch` into a list of single time step
sample dictionaries. len(rewards) (or the number of discrete time step)
dictionaries are returned
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
episode_infos (dict[str, np.ndarray]): A dict of numpy arrays
containing the episode-level information of each episode.
Each value of this dict must be a numpy array of shape
:math:`(S^*,)`. For example, in goal-conditioned
reinforcement learning this could contain the goal state
for each episode.
observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space
of the current environment).
actions (numpy.ndarray): Non-flattened array of actions. Must
have shape (batch_size, S^*) (the unflattened action
space of the
current environment).
rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was
observed by the agent after taking actions[i].
env_infos (dict): A dict arbitrary environment state
information.
agent_infos (dict): A dict of arbitrary agent state
information. For example, this may contain the
hidden states from an RNN policy.
step_types (numpy.ndarray): A numpy array of `StepType with
shape (batch_size,) containing the time step types for
all transitions in this batch.
|
to_time_step_list
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def from_time_step_list(cls, env_spec, ts_samples):
"""Create a :class:`~TimeStepBatch` from a list of time step dictionaries.
Args:
env_spec (EnvSpec): Specification for the environment from which
this data was sampled.
ts_samples (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
keys:
* episode_infos (dict[str, np.ndarray]): A dict of numpy arrays
containing the episode-level information of each episode.
Each value of this dict must be a numpy array of shape
:math:`(N, S^*)`. For example, in goal-conditioned
reinforcement learning this could contain the goal state
for each episode.
* observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space of the current environment).
* actions (numpy.ndarray): Non-flattened array of actions.
Must have shape (batch_size, S^*) (the unflattened action
space of the current environment).
* rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
* next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was observed by the agent after
taking actions[i].
* env_infos (dict): A dict arbitrary environment state
information.
* agent_infos (dict): A dict of arbitrary agent
state information. For example, this may contain the
hidden states from an RNN policy.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (batch_size,) containing the time step types for all
transitions in this batch.
Returns:
TimeStepBatch: The concatenation of samples.
Raises:
ValueError: If no dicts are provided.
"""
if len(ts_samples) < 1:
raise ValueError('Please provide at least one dict')
ts_batches = [
TimeStepBatch(episode_infos=sample['episode_infos'],
env_spec=env_spec,
observations=sample['observations'],
actions=sample['actions'],
rewards=sample['rewards'],
next_observations=sample['next_observations'],
env_infos=sample['env_infos'],
agent_infos=sample['agent_infos'],
step_types=sample['step_types'])
for sample in ts_samples
]
return TimeStepBatch.concatenate(*ts_batches)
|
Create a :class:`~TimeStepBatch` from a list of time step dictionaries.
Args:
env_spec (EnvSpec): Specification for the environment from which
this data was sampled.
ts_samples (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
keys:
* episode_infos (dict[str, np.ndarray]): A dict of numpy arrays
containing the episode-level information of each episode.
Each value of this dict must be a numpy array of shape
:math:`(N, S^*)`. For example, in goal-conditioned
reinforcement learning this could contain the goal state
for each episode.
* observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space of the current environment).
* actions (numpy.ndarray): Non-flattened array of actions.
Must have shape (batch_size, S^*) (the unflattened action
space of the current environment).
* rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
* next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was observed by the agent after
taking actions[i].
* env_infos (dict): A dict arbitrary environment state
information.
* agent_infos (dict): A dict of arbitrary agent
state information. For example, this may contain the
hidden states from an RNN policy.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (batch_size,) containing the time step types for all
transitions in this batch.
Returns:
TimeStepBatch: The concatenation of samples.
Raises:
ValueError: If no dicts are provided.
|
from_time_step_list
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def concatenate(cls, *batches):
"""Create a EpisodeBatch by concatenating EpisodeBatches.
Args:
batches (list[EpisodeBatch]): Batches to concatenate.
Returns:
EpisodeBatch: The concatenation of the batches.
"""
if __debug__:
for b in batches:
assert (set(b.env_infos.keys()) == set(
batches[0].env_infos.keys()))
assert (set(b.agent_infos.keys()) == set(
batches[0].agent_infos.keys()))
env_infos = {
k: np.concatenate([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
episode_infos = {
k: np.concatenate([b.episode_infos_by_episode[k] for b in batches])
for k in batches[0].episode_infos_by_episode.keys()
}
return cls(
episode_infos=episode_infos,
env_spec=batches[0].env_spec,
observations=np.concatenate(
[batch.observations for batch in batches]),
last_observations=np.concatenate(
[batch.last_observations for batch in batches]),
actions=np.concatenate([batch.actions for batch in batches]),
rewards=np.concatenate([batch.rewards for batch in batches]),
env_infos=env_infos,
agent_infos=agent_infos,
step_types=np.concatenate([batch.step_types for batch in batches]),
lengths=np.concatenate([batch.lengths for batch in batches]))
|
Create a EpisodeBatch by concatenating EpisodeBatches.
Args:
batches (list[EpisodeBatch]): Batches to concatenate.
Returns:
EpisodeBatch: The concatenation of the batches.
|
concatenate
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def _episode_ranges(self):
"""Iterate through start and stop indices for each episode.
Yields:
tuple[int, int]: Start index (inclusive) and stop index
(exclusive).
"""
start = 0
for length in self.lengths:
stop = start + length
yield (start, stop)
start = stop
|
Iterate through start and stop indices for each episode.
Yields:
tuple[int, int]: Start index (inclusive) and stop index
(exclusive).
|
_episode_ranges
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def split(self):
"""Split an EpisodeBatch into a list of EpisodeBatches.
The opposite of concatenate.
Returns:
list[EpisodeBatch]: A list of EpisodeBatches, with one
episode per batch.
"""
episodes = []
for i, (start, stop) in enumerate(self._episode_ranges()):
eps = EpisodeBatch(
env_spec=self.env_spec,
episode_infos=slice_nested_dict(self.episode_infos_by_episode,
i, i + 1),
observations=self.observations[start:stop],
last_observations=np.asarray([self.last_observations[i]]),
actions=self.actions[start:stop],
rewards=self.rewards[start:stop],
env_infos=slice_nested_dict(self.env_infos, start, stop),
agent_infos=slice_nested_dict(self.agent_infos, start, stop),
step_types=self.step_types[start:stop],
lengths=np.asarray([self.lengths[i]]))
episodes.append(eps)
return episodes
|
Split an EpisodeBatch into a list of EpisodeBatches.
The opposite of concatenate.
Returns:
list[EpisodeBatch]: A list of EpisodeBatches, with one
episode per batch.
|
split
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def to_list(self):
"""Convert the batch into a list of dictionaries.
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
* observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*) (the unflattened state
space of the current environment). observations[i] was
used by the agent to choose actions[i].
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i].
* actions (np.ndarray): Non-flattened array of actions. Must
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (T,) containing the time step types for all
transitions in this batch.
* episode_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `episode_info` arrays.
"""
episodes = []
for i, (start, stop) in enumerate(self._episode_ranges()):
episodes.append({
'episode_infos':
{k: v[i:i + 1]
for (k, v) in self.episode_infos.items()},
'observations':
self.observations[start:stop],
'next_observations':
np.concatenate((self.observations[1 + start:stop],
[self.last_observations[i]])),
'actions':
self.actions[start:stop],
'rewards':
self.rewards[start:stop],
'env_infos':
{k: v[start:stop]
for (k, v) in self.env_infos.items()},
'agent_infos':
{k: v[start:stop]
for (k, v) in self.agent_infos.items()},
'step_types':
self.step_types[start:stop]
})
return episodes
|
Convert the batch into a list of dictionaries.
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
* observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*) (the unflattened state
space of the current environment). observations[i] was
used by the agent to choose actions[i].
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i].
* actions (np.ndarray): Non-flattened array of actions. Must
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (T,) containing the time step types for all
transitions in this batch.
* episode_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `episode_info` arrays.
|
to_list
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def from_list(cls, env_spec, paths):
"""Create a EpisodeBatch from a list of episodes.
Args:
env_spec (EnvSpec): Specification for the environment from which
this data was sampled.
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]): Keys:
* episode_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `episode_info` arrays, each of shape (S^*).
* observations (np.ndarray): Non-flattened array of
observations. Typically has shape (T, S^*) (the unflattened
state space of the current environment). observations[i]
was used by the agent to choose actions[i]. observations
may instead have shape (T + 1, S^*).
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i]. Optional.
Note that to ensure all information from the environment
was preserved, observations[i] must have shape (T + 1,
S^*), or this key must be set. However, this method is
lenient and will "duplicate" the last observation if the
original last observation has been lost.
* actions (np.ndarray): Non-flattened array of actions. Must
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (T,) containing the time step types for all
transitions in this batch.
"""
lengths = np.asarray([len(p['rewards']) for p in paths])
if all(
len(path['observations']) == length + 1
for (path, length) in zip(paths, lengths)):
last_observations = np.asarray(
[p['observations'][-1] for p in paths])
observations = np.concatenate(
[p['observations'][:-1] for p in paths])
else:
# The number of observations and timesteps must match.
observations = np.concatenate([p['observations'] for p in paths])
if paths[0].get('next_observations') is not None:
last_observations = np.asarray(
[p['next_observations'][-1] for p in paths])
else:
last_observations = np.asarray(
[p['observations'][-1] for p in paths])
stacked_paths = concat_tensor_dict_list(paths)
episode_infos = stack_tensor_dict_list(
[path['episode_infos'] for path in paths])
# Temporary solution. This logic is not needed if algorithms process
# step_types instead of dones directly.
if 'dones' in stacked_paths and 'step_types' not in stacked_paths:
step_types = np.array([
StepType.TERMINAL if done else StepType.MID
for done in stacked_paths['dones']
],
dtype=StepType)
stacked_paths['step_types'] = step_types
del stacked_paths['dones']
return cls(env_spec=env_spec,
episode_infos=episode_infos,
observations=observations,
last_observations=last_observations,
actions=stacked_paths['actions'],
rewards=stacked_paths['rewards'],
env_infos=stacked_paths['env_infos'],
agent_infos=stacked_paths['agent_infos'],
step_types=stacked_paths['step_types'],
lengths=lengths)
|
Create a EpisodeBatch from a list of episodes.
Args:
env_spec (EnvSpec): Specification for the environment from which
this data was sampled.
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]): Keys:
* episode_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `episode_info` arrays, each of shape (S^*).
* observations (np.ndarray): Non-flattened array of
observations. Typically has shape (T, S^*) (the unflattened
state space of the current environment). observations[i]
was used by the agent to choose actions[i]. observations
may instead have shape (T + 1, S^*).
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i]. Optional.
Note that to ensure all information from the environment
was preserved, observations[i] must have shape (T + 1,
S^*), or this key must be set. However, this method is
lenient and will "duplicate" the last observation if the
original last observation has been lost.
* actions (np.ndarray): Non-flattened array of actions. Must
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (T,) containing the time step types for all
transitions in this batch.
|
from_list
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def next_observations(self):
r"""Get the observations seen after actions are performed.
In an :class:`~EpisodeBatch`, next_observations don't need to be stored
explicitly, since the next observation is already stored in
the batch.
Returns:
np.ndarray: The "next_observations" with shape
:math:`(N \bullet [T], O^*)`
"""
return np.concatenate(
tuple([
np.concatenate((eps.observations[1:], eps.last_observations))
for eps in self.split()
]))
|
Get the observations seen after actions are performed.
In an :class:`~EpisodeBatch`, next_observations don't need to be stored
explicitly, since the next observation is already stored in
the batch.
Returns:
np.ndarray: The "next_observations" with shape
:math:`(N \bullet [T], O^*)`
|
next_observations
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def episode_infos(self):
r"""Get the episode_infos.
In an :class:`~EpisodeBatch`, episode_infos only need to be stored once
per episode. However, the episode_infos field of
:class:`~TimeStepBatch` has shape :math:`(N \bullet [T])`. This method
expands episode_infos_by_episode (which have shape :math:`(N)`) to
:math:`(N \bullet [T])`.
Returns:
dict[str, np.ndarray]: The episode_infos each of length :math:`(N
\bullet [T])`.
"""
return {
key: np.concatenate([
np.repeat([v], length, axis=0)
for (v, length) in zip(val, self.lengths)
])
for (key, val) in self.episode_infos_by_episode.items()
}
|
Get the episode_infos.
In an :class:`~EpisodeBatch`, episode_infos only need to be stored once
per episode. However, the episode_infos field of
:class:`~TimeStepBatch` has shape :math:`(N \bullet [T])`. This method
expands episode_infos_by_episode (which have shape :math:`(N)`) to
:math:`(N \bullet [T])`.
Returns:
dict[str, np.ndarray]: The episode_infos each of length :math:`(N
\bullet [T])`.
|
episode_infos
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def observations_list(self):
"""Split observations into a list.
Returns:
list[np.ndarray]: Splitted list.
"""
obs_list = []
for start, stop in self._episode_ranges():
obs_list.append(self.observations[start:stop])
return obs_list
|
Split observations into a list.
Returns:
list[np.ndarray]: Splitted list.
|
observations_list
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def actions_list(self):
"""Split actions into a list.
Returns:
list[np.ndarray]: Splitted list.
"""
acts_list = []
for start, stop in self._episode_ranges():
acts_list.append(self.actions[start:stop])
return acts_list
|
Split actions into a list.
Returns:
list[np.ndarray]: Splitted list.
|
actions_list
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def padded_agent_infos(self):
"""Padded agent infos.
Returns:
dict[str, np.ndarray]: Padded agent infos. Each value must have
shape with :math:`(N, max_episode_length)` or
:math:`(N, max_episode_length, S^*)`.
"""
return {
k: pad_batch_array(arr, self.lengths,
self.env_spec.max_episode_length)
for (k, arr) in self.agent_infos.items()
}
|
Padded agent infos.
Returns:
dict[str, np.ndarray]: Padded agent infos. Each value must have
shape with :math:`(N, max_episode_length)` or
:math:`(N, max_episode_length, S^*)`.
|
padded_agent_infos
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def padded_env_infos(self):
"""Padded env infos.
Returns:
dict[str, np.ndarray]: Padded env infos. Each value must have
shape with :math:`(N, max_episode_length)` or
:math:`(N, max_episode_length, S^*)`.
"""
return {
k: pad_batch_array(arr, self.lengths,
self.env_spec.max_episode_length)
for (k, arr) in self.env_infos.items()
}
|
Padded env infos.
Returns:
dict[str, np.ndarray]: Padded env infos. Each value must have
shape with :math:`(N, max_episode_length)` or
:math:`(N, max_episode_length, S^*)`.
|
padded_env_infos
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def _space_soft_contains(space, element):
"""Check that a space has the same dimensionality as an element.
If the space's dimensionality is not available, check that the space
contains the element.
Args:
space (akro.Space or gym.Space): Space to check
element (object): Element to check in space.
Returns:
bool: True iff the element was "matched" the space.
"""
if space.contains(element):
return True
elif hasattr(space, 'flat_dim'):
return space.flat_dim == np.prod(element.shape)
else:
return False
|
Check that a space has the same dimensionality as an element.
If the space's dimensionality is not available, check that the space
contains the element.
Args:
space (akro.Space or gym.Space): Space to check
element (object): Element to check in space.
Returns:
bool: True iff the element was "matched" the space.
|
_space_soft_contains
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def check_timestep_batch(batch, array_type, ignored_fields=()):
"""Check a TimeStepBatch of any array type that has .shape.
Args:
batch (TimeStepBatch): Batch of timesteps.
array_type (type): Array type.
ignored_fields (set[str]): Set of fields to ignore checking on.
Raises:
ValueError: If an invariant of TimeStepBatch is broken.
"""
# pylint:disable=too-many-branches
fields = {
field: getattr(batch, field)
for field in [
'env_spec', 'rewards', 'rewards', 'observations', 'actions',
'next_observations', 'step_types', 'agent_infos', 'episode_infos',
'env_infos'
] if field not in ignored_fields
}
env_spec = fields.get('env_spec', None)
inferred_batch_size = None
inferred_batch_size_field = None
for field, value in fields.items():
if field in [
'observations', 'actions', 'rewards', 'next_observations',
'step_types'
]:
if not isinstance(value, array_type):
raise ValueError(f'{field} is not of type {array_type!r}')
if hasattr(value, 'shape'):
if inferred_batch_size is None:
inferred_batch_size = value.shape[0]
inferred_batch_size_field = field
elif value.shape[0] != inferred_batch_size:
raise ValueError(
f'{field} has batch size {value.shape[0]}, but '
f'must have batch size {inferred_batch_size} '
f'to match {inferred_batch_size_field}')
if env_spec and field in ['observations', 'next_observations']:
if not _space_soft_contains(env_spec.observation_space,
value[0]):
raise ValueError(
f'Each {field[:-1]} has shape {value[0].shape} '
f'but must match the observation_space '
f'{env_spec.observation_space}')
if (isinstance(value[0], np.ndarray)
and not env_spec.observation_space.contains(value[0])):
warnings.warn(
f'Observation {value[0]!r} is outside '
f'observation_space {env_spec.observation_space}')
if env_spec and field == 'actions':
if not _space_soft_contains(env_spec.action_space, value[0]):
raise ValueError(
f'Each {field[:-1]} has shape {value[0].shape} '
f'but must match the action_space '
f'{env_spec.action_space}')
if field in ['rewards', 'step_types']:
if value.shape != (inferred_batch_size, ):
raise ValueError(f'{field} has shape {value.shape} '
f'but must have batch size '
f'{inferred_batch_size} to match '
f'{inferred_batch_size_field}')
if field in ['agent_infos', 'env_infos', 'episode_infos']:
for key, val in value.items():
if not isinstance(val, (array_type, dict)):
raise ValueError(
f'Entry {key!r} in {field} is of type {type(val)}'
f'but must be {array_type!r} or dict')
if hasattr(val, 'shape'):
if val.shape[0] != inferred_batch_size:
raise ValueError(
f'Entry {key!r} in {field} has batch size '
f'{val.shape[0]} but must have batch size '
f'{inferred_batch_size} to match '
f'{inferred_batch_size_field}')
if (field == 'step_types' and isinstance(value, np.ndarray)
and # Only numpy arrays support custom dtypes.
value.dtype != StepType):
raise ValueError(
f'step_types has dtype {value.dtype} but must have '
f'dtype StepType')
|
Check a TimeStepBatch of any array type that has .shape.
Args:
batch (TimeStepBatch): Batch of timesteps.
array_type (type): Array type.
ignored_fields (set[str]): Set of fields to ignore checking on.
Raises:
ValueError: If an invariant of TimeStepBatch is broken.
|
check_timestep_batch
|
python
|
rlworkgroup/garage
|
src/garage/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py
|
MIT
|
def render_modes(self):
"""list: A list of string representing the supported render modes.
See render() for a list of modes.
"""
|
list: A list of string representing the supported render modes.
See render() for a list of modes.
|
render_modes
|
python
|
rlworkgroup/garage
|
src/garage/_environment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_environment.py
|
MIT
|
def step(self, action):
"""Steps the environment with the action and returns a `EnvStep`.
If the environment returned the last `EnvStep` of a sequence (either
of type TERMINAL or TIMEOUT) at the previous step, this call to
`step()` will start a new sequence and `action` will be ignored.
If `spec.max_episode_length` is reached after applying the action
and the environment has not terminated the episode, `step()` should
return a `EnvStep` with `step_type==StepType.TIMEOUT`.
If possible, update the visualization display as well.
Args:
action (object): A NumPy array, or a nested dict, list or tuple
of arrays conforming to `action_space`.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
"""
|
Steps the environment with the action and returns a `EnvStep`.
If the environment returned the last `EnvStep` of a sequence (either
of type TERMINAL or TIMEOUT) at the previous step, this call to
`step()` will start a new sequence and `action` will be ignored.
If `spec.max_episode_length` is reached after applying the action
and the environment has not terminated the episode, `step()` should
return a `EnvStep` with `step_type==StepType.TIMEOUT`.
If possible, update the visualization display as well.
Args:
action (object): A NumPy array, or a nested dict, list or tuple
of arrays conforming to `action_space`.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/_environment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_environment.py
|
MIT
|
def render(self, mode):
"""Renders the environment.
The set of supported modes varies per environment. By convention,
if mode is:
* rgb_array: Return an `numpy.ndarray` with shape (x, y, 3) and type
uint8, representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
* ansi: Return a string (str) or `StringIO.StringIO` containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Make sure that your class's `render_modes` includes the list of
supported modes.
For example:
.. code-block:: python
class MyEnv(Environment):
def render_modes(self):
return ['rgb_array', 'ansi']
def render(self, mode):
if mode == 'rgb_array':
return np.array(...) # return RGB frame for video
elif mode == 'ansi':
... # return text output
else:
raise ValueError('Supported render modes are {}, but '
'got render mode {} instead.'.format(
self.render_modes, mode))
Args:
mode (str): the mode to render with. The string must be present in
`self.render_modes`.
"""
|
Renders the environment.
The set of supported modes varies per environment. By convention,
if mode is:
* rgb_array: Return an `numpy.ndarray` with shape (x, y, 3) and type
uint8, representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
* ansi: Return a string (str) or `StringIO.StringIO` containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Make sure that your class's `render_modes` includes the list of
supported modes.
For example:
.. code-block:: python
class MyEnv(Environment):
def render_modes(self):
return ['rgb_array', 'ansi']
def render(self, mode):
if mode == 'rgb_array':
return np.array(...) # return RGB frame for video
elif mode == 'ansi':
... # return text output
else:
raise ValueError('Supported render modes are {}, but '
'got render mode {} instead.'.format(
self.render_modes, mode))
Args:
mode (str): the mode to render with. The string must be present in
`self.render_modes`.
|
render
|
python
|
rlworkgroup/garage
|
src/garage/_environment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_environment.py
|
MIT
|
def visualize(self):
"""Creates a visualization of the environment.
This function should be called **only once** after `reset()` to set up
the visualization display. The visualization should be updated
when the environment is changed (i.e. when `step()` is called.)
Calling `close()` will deallocate any resources and close any
windows created by `visualize()`. If `close()` is not explicitly
called, the visualization will be closed when the environment is
destructed (i.e. garbage collected).
"""
|
Creates a visualization of the environment.
This function should be called **only once** after `reset()` to set up
the visualization display. The visualization should be updated
when the environment is changed (i.e. when `step()` is called.)
Calling `close()` will deallocate any resources and close any
windows created by `visualize()`. If `close()` is not explicitly
called, the visualization will be closed when the environment is
destructed (i.e. garbage collected).
|
visualize
|
python
|
rlworkgroup/garage
|
src/garage/_environment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_environment.py
|
MIT
|
def close(self):
"""Closes the environment.
This method should close all windows invoked by `visualize()`.
Override this function in your subclass to perform any necessary
cleanup.
Environments will automatically `close()` themselves when they are
garbage collected or when the program exits.
"""
|
Closes the environment.
This method should close all windows invoked by `visualize()`.
Override this function in your subclass to perform any necessary
cleanup.
Environments will automatically `close()` themselves when they are
garbage collected or when the program exits.
|
close
|
python
|
rlworkgroup/garage
|
src/garage/_environment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_environment.py
|
MIT
|
def __getattr__(self, name):
"""Forward getattr request to wrapped environment.
Args:
name (str): attr (str): attribute name
Returns:
object: the wrapped attribute.
Raises:
AttributeError: if the requested attribute is a private attribute,
or if the requested attribute is not found in the
wrapped environment.
"""
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
if not hasattr(self._env, name):
raise AttributeError('Attribute {} is not found'.format(name))
return getattr(self._env, name)
|
Forward getattr request to wrapped environment.
Args:
name (str): attr (str): attribute name
Returns:
object: the wrapped attribute.
Raises:
AttributeError: if the requested attribute is a private attribute,
or if the requested attribute is not found in the
wrapped environment.
|
__getattr__
|
python
|
rlworkgroup/garage
|
src/garage/_environment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_environment.py
|
MIT
|
def make_optimizer(optimizer_type, module=None, **kwargs):
"""Create an optimizer for pyTorch & tensorflow algos.
Args:
optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.
This can be an optimizer type such as 'torch.optim.Adam' or a
tuple of type and dictionary, where dictionary contains arguments
to initialize the optimizer e.g. (torch.optim.Adam, {'lr' : 1e-3})
module (optional): If the optimizer type is a `torch.optimizer`.
The `torch.nn.Module` module whose parameters needs to be optimized
must be specify.
kwargs (dict): Other keyword arguments to initialize optimizer. This
is not used when `optimizer_type` is tuple.
Returns:
torch.optim.Optimizer: Constructed optimizer.
Raises:
ValueError: Raises value error when `optimizer_type` is tuple, and
non-default argument is passed in `kwargs`.
"""
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
if module is not None:
return opt_type(module.parameters(), **opt_args)
else:
return opt_type(**opt_args)
opt_args = {
k: v.val if isinstance(v, _Default) else v
for k, v in kwargs.items()
}
if module is not None:
return optimizer_type(module.parameters(), **opt_args)
else:
return optimizer_type(**opt_args)
|
Create an optimizer for pyTorch & tensorflow algos.
Args:
optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.
This can be an optimizer type such as 'torch.optim.Adam' or a
tuple of type and dictionary, where dictionary contains arguments
to initialize the optimizer e.g. (torch.optim.Adam, {'lr' : 1e-3})
module (optional): If the optimizer type is a `torch.optimizer`.
The `torch.nn.Module` module whose parameters needs to be optimized
must be specify.
kwargs (dict): Other keyword arguments to initialize optimizer. This
is not used when `optimizer_type` is tuple.
Returns:
torch.optim.Optimizer: Constructed optimizer.
Raises:
ValueError: Raises value error when `optimizer_type` is tuple, and
non-default argument is passed in `kwargs`.
|
make_optimizer
|
python
|
rlworkgroup/garage
|
src/garage/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_functions.py
|
MIT
|
def rollout(env,
agent,
*,
max_episode_length=np.inf,
animated=False,
pause_per_frame=None,
deterministic=False):
"""Sample a single episode of the agent in the environment.
Args:
agent (Policy): Policy used to select actions.
env (Environment): Environment to perform actions in.
max_episode_length (int): If the episode reaches this many timesteps,
it is truncated.
animated (bool): If true, render the environment after each step.
pause_per_frame (float): Time to sleep between steps. Only relevant if
animated == true.
deterministic (bool): If true, use the mean action returned by the
stochastic policy instead of sampling from the returned action
distribution.
Returns:
dict[str, np.ndarray or dict]: Dictionary, with keys:
* observations(np.array): Flattened array of observations.
There should be one more of these than actions. Note that
observations[i] (for i < len(observations) - 1) was used by the
agent to choose actions[i]. Should have shape
:math:`(T + 1, S^*)`, i.e. the unflattened observation space of
the current environment.
* actions(np.array): Non-flattened array of actions. Should have
shape :math:`(T, S^*)`, i.e. the unflattened action space of
the current environment.
* rewards(np.array): Array of rewards of shape :math:`(T,)`, i.e. a
1D array of length timesteps.
* agent_infos(Dict[str, np.array]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos(Dict[str, np.array]): Dictionary of stacked,
non-flattened `env_info` arrays.
* dones(np.array): Array of termination signals.
"""
env_steps = []
agent_infos = []
observations = []
last_obs, episode_infos = env.reset()
agent.reset()
episode_length = 0
if animated:
env.visualize()
while episode_length < (max_episode_length or np.inf):
if pause_per_frame is not None:
time.sleep(pause_per_frame)
a, agent_info = agent.get_action(last_obs)
if deterministic and 'mean' in agent_info:
a = agent_info['mean']
es = env.step(a)
env_steps.append(es)
observations.append(last_obs)
agent_infos.append(agent_info)
episode_length += 1
if es.last:
break
last_obs = es.observation
return dict(
episode_infos=episode_infos,
observations=np.array(observations),
actions=np.array([es.action for es in env_steps]),
rewards=np.array([es.reward for es in env_steps]),
agent_infos=stack_tensor_dict_list(agent_infos),
env_infos=stack_tensor_dict_list([es.env_info for es in env_steps]),
dones=np.array([es.terminal for es in env_steps]),
)
|
Sample a single episode of the agent in the environment.
Args:
agent (Policy): Policy used to select actions.
env (Environment): Environment to perform actions in.
max_episode_length (int): If the episode reaches this many timesteps,
it is truncated.
animated (bool): If true, render the environment after each step.
pause_per_frame (float): Time to sleep between steps. Only relevant if
animated == true.
deterministic (bool): If true, use the mean action returned by the
stochastic policy instead of sampling from the returned action
distribution.
Returns:
dict[str, np.ndarray or dict]: Dictionary, with keys:
* observations(np.array): Flattened array of observations.
There should be one more of these than actions. Note that
observations[i] (for i < len(observations) - 1) was used by the
agent to choose actions[i]. Should have shape
:math:`(T + 1, S^*)`, i.e. the unflattened observation space of
the current environment.
* actions(np.array): Non-flattened array of actions. Should have
shape :math:`(T, S^*)`, i.e. the unflattened action space of
the current environment.
* rewards(np.array): Array of rewards of shape :math:`(T,)`, i.e. a
1D array of length timesteps.
* agent_infos(Dict[str, np.array]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos(Dict[str, np.array]): Dictionary of stacked,
non-flattened `env_info` arrays.
* dones(np.array): Array of termination signals.
|
rollout
|
python
|
rlworkgroup/garage
|
src/garage/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_functions.py
|
MIT
|
def obtain_evaluation_episodes(policy,
env,
max_episode_length=1000,
num_eps=100,
deterministic=True):
"""Sample the policy for num_eps episodes and return average values.
Args:
policy (Policy): Policy to use as the actor when gathering samples.
env (Environment): The environement used to obtain episodes.
max_episode_length (int): Maximum episode length. The episode will
truncated when length of episode reaches max_episode_length.
num_eps (int): Number of episodes.
deterministic (bool): Whether the a deterministic approach is used
in rollout.
Returns:
EpisodeBatch: Evaluation episodes, representing the best current
performance of the algorithm.
"""
episodes = []
# Use a finite length rollout for evaluation.
with click.progressbar(range(num_eps), label='Evaluating') as pbar:
for _ in pbar:
eps = rollout(env,
policy,
max_episode_length=max_episode_length,
deterministic=deterministic)
episodes.append(eps)
return EpisodeBatch.from_list(env.spec, episodes)
|
Sample the policy for num_eps episodes and return average values.
Args:
policy (Policy): Policy to use as the actor when gathering samples.
env (Environment): The environement used to obtain episodes.
max_episode_length (int): Maximum episode length. The episode will
truncated when length of episode reaches max_episode_length.
num_eps (int): Number of episodes.
deterministic (bool): Whether the a deterministic approach is used
in rollout.
Returns:
EpisodeBatch: Evaluation episodes, representing the best current
performance of the algorithm.
|
obtain_evaluation_episodes
|
python
|
rlworkgroup/garage
|
src/garage/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_functions.py
|
MIT
|
def log_multitask_performance(itr, batch, discount, name_map=None):
r"""Log performance of episodes from multiple tasks.
Args:
itr (int): Iteration number to be logged.
batch (EpisodeBatch): Batch of episodes. The episodes should have
either the "task_name" or "task_id" `env_infos`. If the "task_name"
is not present, then `name_map` is required, and should map from
task id's to task names.
discount (float): Discount used in computing returns.
name_map (dict[int, str] or None): Mapping from task id's to task
names. Optional if the "task_name" environment info is present.
Note that if provided, all tasks listed in this map will be logged,
even if there are no episodes present for them.
Returns:
numpy.ndarray: Undiscounted returns averaged across all tasks. Has
shape :math:`(N \bullet [T])`.
"""
eps_by_name = defaultdict(list)
for eps in batch.split():
task_name = '__unnamed_task__'
if 'task_name' in eps.env_infos:
task_name = eps.env_infos['task_name'][0]
elif 'task_id' in eps.env_infos:
name_map = {} if name_map is None else name_map
task_id = eps.env_infos['task_id'][0]
task_name = name_map.get(task_id, 'Task #{}'.format(task_id))
eps_by_name[task_name].append(eps)
if name_map is None:
task_names = eps_by_name.keys()
else:
task_names = name_map.values()
for task_name in task_names:
if task_name in eps_by_name:
episodes = eps_by_name[task_name]
log_performance(itr,
EpisodeBatch.concatenate(*episodes),
discount,
prefix=task_name)
else:
with tabular.prefix(task_name + '/'):
tabular.record('Iteration', itr)
tabular.record('NumEpisodes', 0)
tabular.record('AverageDiscountedReturn', np.nan)
tabular.record('AverageReturn', np.nan)
tabular.record('StdReturn', np.nan)
tabular.record('MaxReturn', np.nan)
tabular.record('MinReturn', np.nan)
tabular.record('TerminationRate', np.nan)
tabular.record('SuccessRate', np.nan)
return log_performance(itr, batch, discount=discount, prefix='Average')
|
Log performance of episodes from multiple tasks.
Args:
itr (int): Iteration number to be logged.
batch (EpisodeBatch): Batch of episodes. The episodes should have
either the "task_name" or "task_id" `env_infos`. If the "task_name"
is not present, then `name_map` is required, and should map from
task id's to task names.
discount (float): Discount used in computing returns.
name_map (dict[int, str] or None): Mapping from task id's to task
names. Optional if the "task_name" environment info is present.
Note that if provided, all tasks listed in this map will be logged,
even if there are no episodes present for them.
Returns:
numpy.ndarray: Undiscounted returns averaged across all tasks. Has
shape :math:`(N \bullet [T])`.
|
log_multitask_performance
|
python
|
rlworkgroup/garage
|
src/garage/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_functions.py
|
MIT
|
def log_performance(itr, batch, discount, prefix='Evaluation'):
"""Evaluate the performance of an algorithm on a batch of episodes.
Args:
itr (int): Iteration number.
batch (EpisodeBatch): The episodes to evaluate with.
discount (float): Discount value, from algorithm's property.
prefix (str): Prefix to add to all logged keys.
Returns:
numpy.ndarray: Undiscounted returns.
"""
returns = []
undiscounted_returns = []
termination = []
success = []
for eps in batch.split():
returns.append(discount_cumsum(eps.rewards, discount))
undiscounted_returns.append(sum(eps.rewards))
termination.append(
float(
any(step_type == StepType.TERMINAL
for step_type in eps.step_types)))
if 'success' in eps.env_infos:
success.append(float(eps.env_infos['success'].any()))
average_discounted_return = np.mean([rtn[0] for rtn in returns])
with tabular.prefix(prefix + '/'):
tabular.record('Iteration', itr)
tabular.record('NumEpisodes', len(returns))
tabular.record('AverageDiscountedReturn', average_discounted_return)
tabular.record('AverageReturn', np.mean(undiscounted_returns))
tabular.record('StdReturn', np.std(undiscounted_returns))
tabular.record('MaxReturn', np.max(undiscounted_returns))
tabular.record('MinReturn', np.min(undiscounted_returns))
tabular.record('TerminationRate', np.mean(termination))
if success:
tabular.record('SuccessRate', np.mean(success))
return undiscounted_returns
|
Evaluate the performance of an algorithm on a batch of episodes.
Args:
itr (int): Iteration number.
batch (EpisodeBatch): The episodes to evaluate with.
discount (float): Discount value, from algorithm's property.
prefix (str): Prefix to add to all logged keys.
Returns:
numpy.ndarray: Undiscounted returns.
|
log_performance
|
python
|
rlworkgroup/garage
|
src/garage/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/_functions.py
|
MIT
|
def __init__(self, desc='4x4', max_episode_length=None):
"""Initialize the environment.
Args:
desc (str): grid configuration key.
max_episode_length (int): The maximum steps allowed for an episode.
"""
if isinstance(desc, str):
desc = MAPS[desc]
desc = np.array(list(map(list, desc)))
desc[desc == '.'] = 'F'
desc[desc == 'o'] = 'H'
desc[desc == 'x'] = 'W'
self._desc = desc
self._n_row, self._n_col = desc.shape
(start_x, ), (start_y, ) = np.nonzero(desc == 'S')
self._start_state = start_x * self._n_col + start_y
self._state = None
self._domain_fig = None
self._step_cnt = None
self._max_episode_length = max_episode_length
self._action_space = akro.Discrete(4)
self._observation_space = akro.Discrete(self._n_row * self._n_col)
self._spec = EnvSpec(action_space=self.action_space,
observation_space=self.observation_space,
max_episode_length=max_episode_length)
|
Initialize the environment.
Args:
desc (str): grid configuration key.
max_episode_length (int): The maximum steps allowed for an episode.
|
__init__
|
python
|
rlworkgroup/garage
|
src/garage/envs/grid_world_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/grid_world_env.py
|
MIT
|
def step(self, action):
"""Steps the environment.
action map:
0: left
1: down
2: right
3: up
Args:
action (int): an int encoding the action
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
NotImplementedError: if a next step in self._desc does not match
known state type.
"""
if self._step_cnt is None:
raise RuntimeError('reset() must be called before step()!')
possible_next_states = self._get_possible_next_states(
self._state, action)
probs = [x[1] for x in possible_next_states]
next_state_idx = np.random.choice(len(probs), p=probs)
next_state = possible_next_states[next_state_idx][0]
next_x = next_state // self._n_col
next_y = next_state % self._n_col
next_state_type = self._desc[next_x, next_y]
if next_state_type == 'H':
done = True
reward = 0.0
elif next_state_type in ['F', 'S']:
done = False
reward = 0.0
elif next_state_type == 'G':
done = True
reward = 1.0
else:
raise NotImplementedError
self._state = next_state
self._step_cnt += 1
step_type = StepType.get_step_type(
step_cnt=self._step_cnt,
max_episode_length=self._max_episode_length,
done=done)
if step_type in (StepType.TERMINAL, StepType.TIMEOUT):
self._step_cnt = None
return EnvStep(env_spec=self.spec,
action=action,
reward=reward,
observation=next_state,
env_info={},
step_type=step_type)
|
Steps the environment.
action map:
0: left
1: down
2: right
3: up
Args:
action (int): an int encoding the action
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
NotImplementedError: if a next step in self._desc does not match
known state type.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/grid_world_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/grid_world_env.py
|
MIT
|
def render(self, mode):
"""Renders the environment.
Args:
mode (str): the mode to render with. The string must be present in
`Environment.render_modes`.
"""
|
Renders the environment.
Args:
mode (str): the mode to render with. The string must be present in
`Environment.render_modes`.
|
render
|
python
|
rlworkgroup/garage
|
src/garage/envs/grid_world_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/grid_world_env.py
|
MIT
|
def _get_possible_next_states(self, state, action):
"""Return possible next states and their probabilities.
Only next states with nonzero probabilities will be returned.
Args:
state (list): start state
action (int): action
Returns:
list: a list of pairs (s', p(s'|s,a))
"""
x = state // self._n_col
y = state % self._n_col
coords = np.array([x, y])
increments = np.array([[0, -1], [1, 0], [0, 1], [-1, 0]])
next_coords = np.clip(coords + increments[action], [0, 0],
[self._n_row - 1, self._n_col - 1])
next_state = next_coords[0] * self._n_col + next_coords[1]
state_type = self._desc[x, y]
next_state_type = self._desc[next_coords[0], next_coords[1]]
if next_state_type == 'W' or state_type == 'H' or state_type == 'G':
return [(state, 1.)]
else:
return [(next_state, 1.)]
|
Return possible next states and their probabilities.
Only next states with nonzero probabilities will be returned.
Args:
state (list): start state
action (int): action
Returns:
list: a list of pairs (s', p(s'|s,a))
|
_get_possible_next_states
|
python
|
rlworkgroup/garage
|
src/garage/envs/grid_world_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/grid_world_env.py
|
MIT
|
def __new__(cls, *args, **kwargs):
"""Returns environment specific wrapper based on input environment type.
Args:
*args: Positional arguments
**kwargs: Keyword arguments
Returns:
garage.envs.bullet.BulletEnv: if the environment is a bullet-based
environment. Else returns a garage.envs.GymEnv
"""
# pylint: disable=import-outside-toplevel
# Determine if the input env is a bullet-based gym environment
env = None
if 'env' in kwargs: # env passed as a keyword arg
env = kwargs['env']
elif len(args) >= 1:
# env passed as a positional arg
env = args[0]
if isinstance(env, gym.Env):
if env.spec and hasattr(env.spec,
'id') and env.spec.id.find('Bullet') >= 0:
from garage.envs.bullet import BulletEnv
return BulletEnv(*args, **kwargs)
elif isinstance(env, str):
if 'Bullet' in env:
from garage.envs.bullet import BulletEnv
return BulletEnv(*args, **kwargs)
return super(GymEnv, cls).__new__(cls)
|
Returns environment specific wrapper based on input environment type.
Args:
*args: Positional arguments
**kwargs: Keyword arguments
Returns:
garage.envs.bullet.BulletEnv: if the environment is a bullet-based
environment. Else returns a garage.envs.GymEnv
|
__new__
|
python
|
rlworkgroup/garage
|
src/garage/envs/gym_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/gym_env.py
|
MIT
|
def step(self, action):
"""Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
RuntimeError: if underlying environment outputs inconsistent
env_info keys.
"""
if self._step_cnt is None:
raise RuntimeError('reset() must be called before step()!')
observation, reward, done, info = self._env.step(action)
if self._visualize:
self._env.render(mode='human')
reward = float(reward) if not isinstance(reward, float) else reward
self._step_cnt += 1
step_type = StepType.get_step_type(
step_cnt=self._step_cnt,
max_episode_length=self._max_episode_length,
done=done)
# gym envs that are wrapped in TimeLimit wrapper modify
# the done/termination signal to be true whenever a time
# limit expiration occurs. The following statement sets
# the done signal to be True only if caused by an
# environment termination, and not a time limit
# termination. The time limit termination signal
# will be saved inside env_infos as
# 'GymEnv.TimeLimitTerminated'
if 'TimeLimit.truncated' in info or step_type == StepType.TIMEOUT:
info['GymEnv.TimeLimitTerminated'] = True
info['TimeLimit.truncated'] = info.get('TimeLimit.truncated', True)
step_type = StepType.TIMEOUT
else:
info['TimeLimit.truncated'] = False
info['GymEnv.TimeLimitTerminated'] = False
if step_type in (StepType.TERMINAL, StepType.TIMEOUT):
self._step_cnt = None
# check that env_infos are consistent
if not self._env_info:
self._env_info = {k: type(info[k]) for k in info}
elif self._env_info.keys() != info.keys():
raise RuntimeError('GymEnv outputs inconsistent env_info keys.')
if not self.spec.observation_space.contains(observation):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if self.spec.observation_space.flat_dim != np.prod(
observation.shape):
raise RuntimeError('GymEnv observation shape does not '
'conform to its observation_space')
return EnvStep(env_spec=self.spec,
action=action,
reward=reward,
observation=observation,
env_info=info,
step_type=step_type)
|
Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
RuntimeError: if underlying environment outputs inconsistent
env_info keys.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/gym_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/gym_env.py
|
MIT
|
def _close_viewer_window(self):
"""Close viewer window.
Unfortunately, some gym environments don't close the viewer windows
properly, which leads to "out of memory" issues when several of
these environments are tested one after the other.
This method searches for the viewer object of type MjViewer, Viewer
or SimpleImageViewer, based on environment, and if the environment
is wrapped in other environment classes, it performs depth search
in those as well.
This method can be removed once OpenAI solves the issue.
"""
# We need to do some strange things here to fix-up flaws in gym
# pylint: disable=import-outside-toplevel
if hasattr(self._env, 'spec') and self._env.spec:
if any(package in getattr(self._env.spec, 'entry_point', '')
for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER):
# This import is not in the header to avoid a MuJoCo dependency
# with non-MuJoCo environments that use this base class.
try:
from mujoco_py.mjviewer import MjViewer
import glfw
except ImportError:
# If we can't import mujoco_py, we must not have an
# instance of a class that we know how to close here.
return
if (hasattr(self._env, 'viewer')
and isinstance(self._env.viewer, MjViewer)):
glfw.destroy_window(self._env.viewer.window)
elif any(package in getattr(self._env.spec, 'entry_point', '')
for package in KNOWN_GYM_NOT_CLOSE_VIEWER):
if hasattr(self._env, 'viewer'):
from gym.envs.classic_control.rendering import (
Viewer, SimpleImageViewer)
if (isinstance(self._env.viewer,
(SimpleImageViewer, Viewer))):
self._env.viewer.close()
|
Close viewer window.
Unfortunately, some gym environments don't close the viewer windows
properly, which leads to "out of memory" issues when several of
these environments are tested one after the other.
This method searches for the viewer object of type MjViewer, Viewer
or SimpleImageViewer, based on environment, and if the environment
is wrapped in other environment classes, it performs depth search
in those as well.
This method can be removed once OpenAI solves the issue.
|
_close_viewer_window
|
python
|
rlworkgroup/garage
|
src/garage/envs/gym_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/gym_env.py
|
MIT
|
def __getattr__(self, name):
"""Handle function calls wrapped environment.
Args:
name (str): attribute name
Returns:
object: the wrapped attribute.
Raises:
AttributeError: if the requested attribute is a private
attribute, or if the requested attribute is not found in the
wrapped environment.
"""
if name.startswith('_'):
raise AttributeError(
'attempted to get missing private attribute {}'.format(name))
if not hasattr(self._env, name):
raise AttributeError('Attribute {} is not found'.format(name))
return getattr(self._env, name)
|
Handle function calls wrapped environment.
Args:
name (str): attribute name
Returns:
object: the wrapped attribute.
Raises:
AttributeError: if the requested attribute is a private
attribute, or if the requested attribute is not found in the
wrapped environment.
|
__getattr__
|
python
|
rlworkgroup/garage
|
src/garage/envs/gym_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/gym_env.py
|
MIT
|
def sample_tasks(self, n_tasks):
"""Samples n_tasks tasks.
Part of the set_task environment protocol. To call this method, a
benchmark must have been passed in at environment construction.
Args:
n_tasks (int): Number of tasks to sample.
Returns:
dict[str,object]: Task object to pass back to `set_task`.
"""
assert self._constructed_from_benchmark
tasks = []
while len(tasks) < n_tasks:
if self._next_env == len(self._env_list):
self._next_env = 0
self._next_task_index += 1
env_name = self._env_list[self._next_env]
self._next_env += 1
env_tasks = self._tasks_by_env[env_name]
if self._next_task_index >= len(env_tasks):
random.shuffle(env_tasks)
self._next_task_index = 0
tasks.append(env_tasks[self._next_task_index])
return tasks
|
Samples n_tasks tasks.
Part of the set_task environment protocol. To call this method, a
benchmark must have been passed in at environment construction.
Args:
n_tasks (int): Number of tasks to sample.
Returns:
dict[str,object]: Task object to pass back to `set_task`.
|
sample_tasks
|
python
|
rlworkgroup/garage
|
src/garage/envs/metaworld_set_task_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/metaworld_set_task_env.py
|
MIT
|
def set_task(self, task):
"""Set the task.
Part of the set_task environment protocol.
Args:
task (dict[str,object]): Task object from `sample_tasks`.
"""
# Mixing train and test is probably a mistake
assert self._kind is None or self._kind == task['kind']
self._benchmark = task['benchmark']
self._kind = task['kind']
self._add_env_onehot = task['add_env_onehot']
if not self._inner_tasks:
self._fill_tasks()
self._current_task = task['inner']
self._construct_env_if_needed()
self._current_env.set_task(task['inner'])
self._current_env.reset()
|
Set the task.
Part of the set_task environment protocol.
Args:
task (dict[str,object]): Task object from `sample_tasks`.
|
set_task
|
python
|
rlworkgroup/garage
|
src/garage/envs/metaworld_set_task_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/metaworld_set_task_env.py
|
MIT
|
def _fill_tasks(self):
"""Fill out _tasks after the benchmark is set.
Raises:
ValueError: If kind is not set to "train" or "test"
"""
if self._add_env_onehot:
if (self._kind == 'test'
or 'metaworld.ML' in repr(type(self._benchmark))):
raise ValueError('add_env_onehot should only be used with '
'multi-task benchmarks, not ' +
repr(self._benchmark))
self._tasks = []
if self._kind is None:
return
if self._kind == 'test':
self._inner_tasks = self._benchmark.test_tasks
self._classes = self._benchmark.test_classes
elif self._kind == 'train':
self._inner_tasks = self._benchmark.train_tasks
self._classes = self._benchmark.train_classes
else:
raise ValueError('kind should be either "train" or "test", not ' +
repr(self._kind))
self._env_list = list(self._classes.keys())
if self._add_env_onehot:
self._task_indices = {
env_name: index
for (index, env_name) in enumerate(self._classes.keys())
}
self._tasks_by_env = {}
for inner in self._inner_tasks:
task = {
'kind': self._kind,
'benchmark': self._benchmark,
'add_env_onehot': self._add_env_onehot,
'inner': inner,
}
self._tasks_by_env.setdefault(inner.env_name, []).append(task)
|
Fill out _tasks after the benchmark is set.
Raises:
ValueError: If kind is not set to "train" or "test"
|
_fill_tasks
|
python
|
rlworkgroup/garage
|
src/garage/envs/metaworld_set_task_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/metaworld_set_task_env.py
|
MIT
|
def round_robin_strategy(num_tasks, last_task=None):
"""A function for sampling tasks in round robin fashion.
Args:
num_tasks (int): Total number of tasks.
last_task (int): Previously sampled task.
Returns:
int: task id.
"""
if last_task is None:
return 0
return (last_task + 1) % num_tasks
|
A function for sampling tasks in round robin fashion.
Args:
num_tasks (int): Total number of tasks.
last_task (int): Previously sampled task.
Returns:
int: task id.
|
round_robin_strategy
|
python
|
rlworkgroup/garage
|
src/garage/envs/multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/multi_env_wrapper.py
|
MIT
|
def observation_space(self):
"""Observation space.
Returns:
akro.Box: Observation space.
"""
if self._mode == 'vanilla':
return self._env.observation_space
elif self._mode == 'add-onehot':
task_lb, task_ub = self.task_space.bounds
env_lb, env_ub = self._env.observation_space.bounds
return akro.Box(np.concatenate([env_lb, task_lb]),
np.concatenate([env_ub, task_ub]))
else: # self._mode == 'del-onehot'
env_lb, env_ub = self._env.observation_space.bounds
num_tasks = self._num_tasks
return akro.Box(env_lb[:-num_tasks], env_ub[:-num_tasks])
|
Observation space.
Returns:
akro.Box: Observation space.
|
observation_space
|
python
|
rlworkgroup/garage
|
src/garage/envs/multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/multi_env_wrapper.py
|
MIT
|
def task_space(self):
"""Task Space.
Returns:
akro.Box: Task space.
"""
one_hot_ub = np.ones(self.num_tasks)
one_hot_lb = np.zeros(self.num_tasks)
return akro.Box(one_hot_lb, one_hot_ub)
|
Task Space.
Returns:
akro.Box: Task space.
|
task_space
|
python
|
rlworkgroup/garage
|
src/garage/envs/multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/multi_env_wrapper.py
|
MIT
|
def active_task_index(self):
"""Index of active task env.
Returns:
int: Index of active task.
"""
if hasattr(self._env, 'active_task_index'):
return self._env.active_task_index
else:
return self._active_task_index
|
Index of active task env.
Returns:
int: Index of active task.
|
active_task_index
|
python
|
rlworkgroup/garage
|
src/garage/envs/multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/multi_env_wrapper.py
|
MIT
|
def step(self, action):
"""Step the active task env.
Args:
action (object): object to be passed in Environment.reset(action)
Returns:
EnvStep: The environment step resulting from the action.
"""
es = self._env.step(action)
if self._mode == 'add-onehot':
obs = np.concatenate([es.observation, self._active_task_one_hot()])
elif self._mode == 'del-onehot':
obs = es.observation[:-self._num_tasks]
else: # self._mode == 'vanilla'
obs = es.observation
env_info = es.env_info
if 'task_id' not in es.env_info:
env_info['task_id'] = self._active_task_index
if self._env_names is not None:
env_info['task_name'] = self._env_names[self._active_task_index]
return EnvStep(env_spec=self.spec,
action=action,
reward=es.reward,
observation=obs,
env_info=env_info,
step_type=es.step_type)
|
Step the active task env.
Args:
action (object): object to be passed in Environment.reset(action)
Returns:
EnvStep: The environment step resulting from the action.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/multi_env_wrapper.py
|
MIT
|
def _active_task_one_hot(self):
"""One-hot representation of active task.
Returns:
numpy.ndarray: one-hot representation of active task
"""
one_hot = np.zeros(self.task_space.shape)
index = self.active_task_index or 0
one_hot[index] = self.task_space.high[index]
return one_hot
|
One-hot representation of active task.
Returns:
numpy.ndarray: one-hot representation of active task
|
_active_task_one_hot
|
python
|
rlworkgroup/garage
|
src/garage/envs/multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/multi_env_wrapper.py
|
MIT
|
def step(self, action):
"""Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
"""
if isinstance(self.action_space, akro.Box):
# rescale the action when the bounds are not inf
lb, ub = self.action_space.low, self.action_space.high
if np.all(lb != -np.inf) and np.all(ub != -np.inf):
scaled_action = lb + (action + self._expected_action_scale) * (
0.5 * (ub - lb) / self._expected_action_scale)
scaled_action = np.clip(scaled_action, lb, ub)
else:
scaled_action = action
else:
scaled_action = action
es = self._env.step(scaled_action)
next_obs = es.observation
reward = es.reward
if self._normalize_obs:
next_obs = self._apply_normalize_obs(next_obs)
if self._normalize_reward:
reward = self._apply_normalize_reward(reward)
return EnvStep(env_spec=es.env_spec,
action=action,
reward=reward * self._scale_reward,
observation=next_obs,
env_info=es.env_info,
step_type=es.step_type)
|
Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/normalized_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/normalized_env.py
|
MIT
|
def _apply_normalize_obs(self, obs):
"""Compute normalized observation.
Args:
obs (np.ndarray): Observation.
Returns:
np.ndarray: Normalized observation.
"""
self._update_obs_estimate(obs)
flat_obs = self._env.observation_space.flatten(obs)
normalized_obs = (flat_obs -
self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)
if not self._flatten_obs:
normalized_obs = self._env.observation_space.unflatten(
self._env.observation_space, normalized_obs)
return normalized_obs
|
Compute normalized observation.
Args:
obs (np.ndarray): Observation.
Returns:
np.ndarray: Normalized observation.
|
_apply_normalize_obs
|
python
|
rlworkgroup/garage
|
src/garage/envs/normalized_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/normalized_env.py
|
MIT
|
def _apply_normalize_reward(self, reward):
"""Compute normalized reward.
Args:
reward (float): Reward.
Returns:
float: Normalized reward.
"""
self._update_reward_estimate(reward)
return reward / (np.sqrt(self._reward_var) + 1e-8)
|
Compute normalized reward.
Args:
reward (float): Reward.
Returns:
float: Normalized reward.
|
_apply_normalize_reward
|
python
|
rlworkgroup/garage
|
src/garage/envs/normalized_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/normalized_env.py
|
MIT
|
def step(self, action):
"""Step the environment.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment
has been
constructed and `reset()` has not been called.
"""
if self._step_cnt is None:
raise RuntimeError('reset() must be called before step()!')
# enforce action space
a = action.copy() # NOTE: we MUST copy the action before modifying it
a = np.clip(a, self.action_space.low, self.action_space.high)
self._point = np.clip(self._point + a, -self._arena_size,
self._arena_size)
if self._visualize:
print(self.render('ascii'))
dist = np.linalg.norm(self._point - self._goal)
succ = dist < np.linalg.norm(self.action_space.low)
# dense reward
reward = -dist
# done bonus
if succ:
reward += self._done_bonus
# Type conversion
if not isinstance(reward, float):
reward = float(reward)
# sometimes we don't want to terminate
done = succ and not self._never_done
obs = np.concatenate([self._point, (dist, )])
self._step_cnt += 1
step_type = StepType.get_step_type(
step_cnt=self._step_cnt,
max_episode_length=self._max_episode_length,
done=done)
if step_type in (StepType.TERMINAL, StepType.TIMEOUT):
self._step_cnt = None
return EnvStep(env_spec=self.spec,
action=action,
reward=reward,
observation=obs,
env_info={
'task': self._task,
'success': succ
},
step_type=step_type)
|
Step the environment.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment
has been
constructed and `reset()` has not been called.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/point_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/point_env.py
|
MIT
|
def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, np.ndarray]]: A list of "tasks", where each task is
a dictionary containing a single key, "goal", mapping to a
point in 2D space.
"""
goals = np.random.uniform(-2, 2, size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
|
Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, np.ndarray]]: A list of "tasks", where each task is
a dictionary containing a single key, "goal", mapping to a
point in 2D space.
|
sample_tasks
|
python
|
rlworkgroup/garage
|
src/garage/envs/point_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/point_env.py
|
MIT
|
def set_task(self, task):
"""Reset with a task.
Args:
task (dict[str, np.ndarray]): A task (a dictionary containing a
single key, "goal", which should be a point in 2D space).
"""
self._task = task
self._goal = task['goal']
|
Reset with a task.
Args:
task (dict[str, np.ndarray]): A task (a dictionary containing a
single key, "goal", which should be a point in 2D space).
|
set_task
|
python
|
rlworkgroup/garage
|
src/garage/envs/point_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/point_env.py
|
MIT
|
def step(self, action):
"""gym.Env step for the active task env.
Args:
action (np.ndarray): Action performed by the agent in the
environment.
Returns:
tuple:
np.ndarray: Agent's observation of the current environment.
float: Amount of reward yielded by previous action.
bool: True iff the episode has ended.
dict[str, np.ndarray]: Contains auxiliary diagnostic
information about this time-step.
"""
es = super().step(action)
if self._task_name is not None:
es.env_info['task_name'] = self._task_name
if self._task_id is not None:
es.env_info['task_id'] = self._task_id
return es
|
gym.Env step for the active task env.
Args:
action (np.ndarray): Action performed by the agent in the
environment.
Returns:
tuple:
np.ndarray: Agent's observation of the current environment.
float: Amount of reward yielded by previous action.
bool: True iff the episode has ended.
dict[str, np.ndarray]: Contains auxiliary diagnostic
information about this time-step.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/task_name_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/task_name_wrapper.py
|
MIT
|
def step(self, action):
"""Environment step for the active task env.
Args:
action (np.ndarray): Action performed by the agent in the
environment.
Returns:
EnvStep: The environment step resulting from the action.
"""
es = self._env.step(action)
obs = es.observation
oh_obs = self._obs_with_one_hot(obs)
env_info = es.env_info
env_info['task_id'] = self._task_index
return EnvStep(env_spec=self.spec,
action=action,
reward=es.reward,
observation=oh_obs,
env_info=env_info,
step_type=es.step_type)
|
Environment step for the active task env.
Args:
action (np.ndarray): Action performed by the agent in the
environment.
Returns:
EnvStep: The environment step resulting from the action.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/task_onehot_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/task_onehot_wrapper.py
|
MIT
|
def _obs_with_one_hot(self, obs):
"""Concatenate observation and task one-hot.
Args:
obs (numpy.ndarray): observation
Returns:
numpy.ndarray: observation + task one-hot.
"""
one_hot = np.zeros(self._n_total_tasks)
one_hot[self._task_index] = 1.0
return np.concatenate([obs, one_hot])
|
Concatenate observation and task one-hot.
Args:
obs (numpy.ndarray): observation
Returns:
numpy.ndarray: observation + task one-hot.
|
_obs_with_one_hot
|
python
|
rlworkgroup/garage
|
src/garage/envs/task_onehot_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/task_onehot_wrapper.py
|
MIT
|
def wrap_env_list(cls, envs):
"""Wrap a list of environments, giving each environment a one-hot.
This is the primary way of constructing instances of this class.
It's mostly useful when training multi-task algorithms using a
multi-task aware sampler.
For example:
'''
.. code-block:: python
envs = get_mt10_envs()
wrapped = TaskOnehotWrapper.wrap_env_list(envs)
sampler = trainer.make_sampler(LocalSampler, env=wrapped)
'''
Args:
envs (list[Environment]): List of environments to wrap. Note
that the
order these environments are passed in determines the value of
their one-hot encoding. It is essential that this list is
always in the same order, or the resulting encodings will be
inconsistent.
Returns:
list[TaskOnehotWrapper]: The wrapped environments.
"""
n_total_tasks = len(envs)
wrapped = []
for i, env in enumerate(envs):
wrapped.append(cls(env, task_index=i, n_total_tasks=n_total_tasks))
return wrapped
|
Wrap a list of environments, giving each environment a one-hot.
This is the primary way of constructing instances of this class.
It's mostly useful when training multi-task algorithms using a
multi-task aware sampler.
For example:
'''
.. code-block:: python
envs = get_mt10_envs()
wrapped = TaskOnehotWrapper.wrap_env_list(envs)
sampler = trainer.make_sampler(LocalSampler, env=wrapped)
'''
Args:
envs (list[Environment]): List of environments to wrap. Note
that the
order these environments are passed in determines the value of
their one-hot encoding. It is essential that this list is
always in the same order, or the resulting encodings will be
inconsistent.
Returns:
list[TaskOnehotWrapper]: The wrapped environments.
|
wrap_env_list
|
python
|
rlworkgroup/garage
|
src/garage/envs/task_onehot_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/task_onehot_wrapper.py
|
MIT
|
def wrap_env_cons_list(cls, env_cons):
"""Wrap a list of environment constructors, giving each a one-hot.
This function is useful if you want to avoid constructing any
environments in the main experiment process, and are using a multi-task
aware remote sampler (i.e. `~RaySampler`).
For example:
'''
.. code-block:: python
env_constructors = get_mt10_env_cons()
wrapped = TaskOnehotWrapper.wrap_env_cons_list(env_constructors)
env_updates = [NewEnvUpdate(wrapped_con)
for wrapped_con in wrapped]
sampler = trainer.make_sampler(RaySampler, env=env_updates)
'''
Args:
env_cons (list[Callable[Environment]]): List of environment
constructor
to wrap. Note that the order these constructors are passed in
determines the value of their one-hot encoding. It is essential
that this list is always in the same order, or the resulting
encodings will be inconsistent.
Returns:
list[Callable[TaskOnehotWrapper]]: The wrapped environments.
"""
n_total_tasks = len(env_cons)
wrapped = []
for i, con in enumerate(env_cons):
# Manually capture this value of i by introducing a new scope.
wrapped.append(lambda i=i, con=con: cls(
con(), task_index=i, n_total_tasks=n_total_tasks))
return wrapped
|
Wrap a list of environment constructors, giving each a one-hot.
This function is useful if you want to avoid constructing any
environments in the main experiment process, and are using a multi-task
aware remote sampler (i.e. `~RaySampler`).
For example:
'''
.. code-block:: python
env_constructors = get_mt10_env_cons()
wrapped = TaskOnehotWrapper.wrap_env_cons_list(env_constructors)
env_updates = [NewEnvUpdate(wrapped_con)
for wrapped_con in wrapped]
sampler = trainer.make_sampler(RaySampler, env=env_updates)
'''
Args:
env_cons (list[Callable[Environment]]): List of environment
constructor
to wrap. Note that the order these constructors are passed in
determines the value of their one-hot encoding. It is essential
that this list is always in the same order, or the resulting
encodings will be inconsistent.
Returns:
list[Callable[TaskOnehotWrapper]]: The wrapped environments.
|
wrap_env_cons_list
|
python
|
rlworkgroup/garage
|
src/garage/envs/task_onehot_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/task_onehot_wrapper.py
|
MIT
|
def __init__(self, env, name=None):
"""Create a DMControlEnv.
Args:
env (dm_control.suite.Task): The wrapped dm_control environment.
name (str): Name of the environment.
"""
self._env = env
self._name = name or type(env.task).__name__
self._viewer = None
self._step_cnt = None
self._max_episode_length = 1e12
if self._env._step_limit != np.Inf:
self._max_episode_length = int(self._env._step_limit)
# action space
action_spec = self._env.action_spec()
if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or
np.inf in action_spec.maximum):
self._action_space = akro.Discrete(np.prod(action_spec.shape))
else:
self._action_space = akro.Box(low=action_spec.minimum,
high=action_spec.maximum,
dtype=np.float32)
# observation_space
flat_dim = _flat_shape(self._env.observation_spec())
self._observation_space = akro.Box(low=-np.inf,
high=np.inf,
shape=[flat_dim],
dtype=np.float32)
# spec
self._spec = EnvSpec(action_space=self.action_space,
observation_space=self.observation_space,
max_episode_length=self._max_episode_length)
|
Create a DMControlEnv.
Args:
env (dm_control.suite.Task): The wrapped dm_control environment.
name (str): Name of the environment.
|
__init__
|
python
|
rlworkgroup/garage
|
src/garage/envs/dm_control/dm_control_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/dm_control/dm_control_env.py
|
MIT
|
def step(self, action):
"""Steps the environment with the action and returns a `EnvStep`.
Args:
action (object): input action
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
"""
if self._step_cnt is None:
raise RuntimeError('reset() must be called before step()!')
dm_time_step = self._env.step(action)
if self._viewer:
self._viewer.render()
observation = flatten_observation(
dm_time_step.observation)['observations']
self._step_cnt += 1
# Determine step type
step_type = None
if dm_time_step.step_type == dm_StepType.MID:
if self._step_cnt >= self._max_episode_length:
step_type = StepType.TIMEOUT
else:
step_type = StepType.MID
elif dm_time_step.step_type == dm_StepType.LAST:
step_type = StepType.TERMINAL
if step_type in (StepType.TERMINAL, StepType.TIMEOUT):
self._step_cnt = None
return EnvStep(env_spec=self.spec,
action=action,
reward=dm_time_step.reward,
observation=observation,
env_info=dm_time_step.observation,
step_type=step_type)
|
Steps the environment with the action and returns a `EnvStep`.
Args:
action (object): input action
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/dm_control/dm_control_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/dm_control/dm_control_env.py
|
MIT
|
def render(self, mode):
"""Render the environment.
Args:
mode (str): render mode.
Returns:
np.ndarray: if mode is 'rgb_array', else return None.
Raises:
ValueError: if mode is not supported.
"""
self._validate_render_mode(mode)
if mode == 'rgb_array':
return self._env.physics.render()
return None
|
Render the environment.
Args:
mode (str): render mode.
Returns:
np.ndarray: if mode is 'rgb_array', else return None.
Raises:
ValueError: if mode is not supported.
|
render
|
python
|
rlworkgroup/garage
|
src/garage/envs/dm_control/dm_control_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/dm_control/dm_control_env.py
|
MIT
|
def __getstate__(self):
"""See `Object.__getstate__`.
Returns:
dict: dict of the class.
"""
d = self.__dict__.copy()
d['_viewer'] = None
return d
|
See `Object.__getstate__`.
Returns:
dict: dict of the class.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/envs/dm_control/dm_control_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/dm_control/dm_control_env.py
|
MIT
|
def step(self, action):
"""Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Raises:
ValueError: If the current direction is not 1.0 or -1.0.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_dir (float): Target direction. 1.0 for forwards,
-1.0 for backwards.
"""
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = self._task['direction'] * forward_vel
ctrl_cost = 0.5 * 1e-1 * np.sum(np.square(action))
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
if self._task['direction'] == 1.:
task_name = 'fowrad'
elif self._task['direction'] == -1.:
task_name = 'backward'
else:
raise ValueError('task direction should be 1. or -1.')
infos = dict(reward_forward=np.asarray([forward_reward]),
reward_ctrl=np.asarray([-ctrl_cost]),
task_dir=np.asarray([self._task['direction']]),
task_name=task_name)
return observation, reward, done, infos
|
Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Raises:
ValueError: If the current direction is not 1.0 or -1.0.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_dir (float): Target direction. 1.0 for forwards,
-1.0 for backwards.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/mujoco/half_cheetah_dir_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/mujoco/half_cheetah_dir_env.py
|
MIT
|
def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "direction", mapping to -1
or 1.
"""
directions = (
2 * self.np_random.binomial(1, p=0.5, size=(num_tasks, )) - 1)
tasks = [{'direction': direction} for direction in directions]
return tasks
|
Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "direction", mapping to -1
or 1.
|
sample_tasks
|
python
|
rlworkgroup/garage
|
src/garage/envs/mujoco/half_cheetah_dir_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/mujoco/half_cheetah_dir_env.py
|
MIT
|
def _get_obs(self):
"""Get a low-dimensional observation of the state.
Returns:
np.ndarray: Contains the flattened angle quaternion, angular
velocity quaternion, and cartesian position.
"""
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
self.get_body_com('torso').flat,
]).astype(np.float32).flatten()
|
Get a low-dimensional observation of the state.
Returns:
np.ndarray: Contains the flattened angle quaternion, angular
velocity quaternion, and cartesian position.
|
_get_obs
|
python
|
rlworkgroup/garage
|
src/garage/envs/mujoco/half_cheetah_env_meta_base.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/mujoco/half_cheetah_env_meta_base.py
|
MIT
|
def step(self, action):
"""Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_vel (float): Target velocity.
Usually between 0 and 2.
"""
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = -1.0 * abs(forward_vel - self._task['velocity'])
ctrl_cost = 0.5 * 1e-1 * np.sum(np.square(action))
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
infos = dict(reward_forward=np.asarray([forward_reward]),
reward_ctrl=np.asarray([-ctrl_cost]),
task_vel=np.asarray([self._task['velocity']]))
return observation, reward, done, infos
|
Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_vel (float): Target velocity.
Usually between 0 and 2.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/mujoco/half_cheetah_vel_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/mujoco/half_cheetah_vel_env.py
|
MIT
|
def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "velocity", mapping to a
value between 0 and 2.
"""
velocities = self.np_random.uniform(0.0, 2.0, size=(num_tasks, ))
tasks = [{'velocity': velocity} for velocity in velocities]
return tasks
|
Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "velocity", mapping to a
value between 0 and 2.
|
sample_tasks
|
python
|
rlworkgroup/garage
|
src/garage/envs/mujoco/half_cheetah_vel_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/mujoco/half_cheetah_vel_env.py
|
MIT
|
def reset(self, **kwargs):
"""
gym.Env reset function.
Reset only when lives are lost.
"""
if self._was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step
obs, _, _, _ = self.env.step(0)
self._lives = self.env.unwrapped.ale.lives()
return obs
|
gym.Env reset function.
Reset only when lives are lost.
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/episodic_life.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/episodic_life.py
|
MIT
|
def reset(self, **kwargs):
"""gym.Env reset function.
Args:
kwargs (dict): extra arguments passed to gym.Env.reset()
Returns:
np.ndarray: next observation.
"""
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
|
gym.Env reset function.
Args:
kwargs (dict): extra arguments passed to gym.Env.reset()
Returns:
np.ndarray: next observation.
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/fire_reset.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/fire_reset.py
|
MIT
|
def step(self, action):
"""See gym.Env.
Args:
action (np.ndarray): Action conforming to action_space
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
"""
obs, reward, done, info = self.env.step(action)
return _color_to_grayscale(obs), reward, done, info
|
See gym.Env.
Args:
action (np.ndarray): Action conforming to action_space
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/grayscale.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/grayscale.py
|
MIT
|
def _color_to_grayscale(obs):
"""Convert a 3-channel color observation image to grayscale and uint8.
Args:
obs (np.ndarray): Observation array, conforming to observation_space
Returns:
np.ndarray: 1-channel grayscale version of obs, represented as uint8
"""
with warnings.catch_warnings():
# Suppressing warning for possible precision loss when converting
# from float64 to uint8
warnings.simplefilter('ignore')
return img_as_ubyte(color.rgb2gray((obs)))
|
Convert a 3-channel color observation image to grayscale and uint8.
Args:
obs (np.ndarray): Observation array, conforming to observation_space
Returns:
np.ndarray: 1-channel grayscale version of obs, represented as uint8
|
_color_to_grayscale
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/grayscale.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/grayscale.py
|
MIT
|
def step(self, action):
"""Repeat action, sum reward, and max over last two observations.
Args:
action (int): action to take in the atari environment.
Returns:
np.ndarray: observation of shape :math:`(O*,)` representating
the max values over the last two oservations.
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
"""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
elif i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
|
Repeat action, sum reward, and max over last two observations.
Args:
action (int): action to take in the atari environment.
Returns:
np.ndarray: observation of shape :math:`(O*,)` representating
the max values over the last two oservations.
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/max_and_skip.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/max_and_skip.py
|
MIT
|
def step(self, action):
"""gym.Env step function.
Performs one action step in the enviornment.
Args:
action (np.ndarray): Action of shape :math:`(A*, )`
to pass to the environment.
Returns:
np.ndarray: Pixel observation of shape :math:`(O*, )`
from the wrapped environment.
float : Amount of reward returned after previous action.
bool : Whether the episode has ended, in which case further step()
calls will return undefined results.
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning).
"""
obs, reward, done, info = self.env.step(action)
return obs['pixels'], reward, done, info
|
gym.Env step function.
Performs one action step in the enviornment.
Args:
action (np.ndarray): Action of shape :math:`(A*, )`
to pass to the environment.
Returns:
np.ndarray: Pixel observation of shape :math:`(O*, )`
from the wrapped environment.
float : Amount of reward returned after previous action.
bool : Whether the episode has ended, in which case further step()
calls will return undefined results.
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning).
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/pixel_observation.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/pixel_observation.py
|
MIT
|
def reset(self):
"""gym.Env reset function.
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
"""
observation = self.env.reset()
self._frames.clear()
for _ in range(self._n_frames):
self._frames.append(observation)
return self._stack_frames()
|
gym.Env reset function.
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/stack_frames.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/stack_frames.py
|
MIT
|
def step(self, action):
"""gym.Env step function.
Args:
action (int): index of the action to take.
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
"""
new_observation, reward, done, info = self.env.step(action)
self._frames.append(new_observation)
return self._stack_frames(), reward, done, info
|
gym.Env step function.
Args:
action (int): index of the action to take.
Returns:
np.ndarray: Observation conforming to observation_space
float: Reward for this step
bool: Termination signal
dict: Extra information from the environment.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/envs/wrappers/stack_frames.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/envs/wrappers/stack_frames.py
|
MIT
|
def query_yes_no(question, default='yes'):
"""Ask a yes/no question via raw_input() and return their answer.
Args:
question (str): Printed to user.
default (str or None): Default if user just hits enter.
Raises:
ValueError: If the provided default is invalid.
Returns:
bool: True for "yes"y answers, False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
|
Ask a yes/no question via raw_input() and return their answer.
Args:
question (str): Printed to user.
default (str or None): Default if user just hits enter.
Raises:
ValueError: If the provided default is invalid.
Returns:
bool: True for "yes"y answers, False for "no".
|
query_yes_no
|
python
|
rlworkgroup/garage
|
src/garage/examples/sim_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/sim_policy.py
|
MIT
|
def step_bullet_kuka_env(n_steps=1000):
"""Load, step, and visualize a Bullet Kuka environment.
Args:
n_steps (int): number of steps to run.
"""
# Construct the environment
env = GymEnv(gym.make('KukaBulletEnv-v0', renders=True, isDiscrete=True))
# Reset the environment and launch the viewer
env.reset()
env.visualize()
step_count = 0
es = env.step(env.action_space.sample())
while not es.last and step_count < n_steps:
es = env.step(env.action_space.sample())
step_count += 1
|
Load, step, and visualize a Bullet Kuka environment.
Args:
n_steps (int): number of steps to run.
|
step_bullet_kuka_env
|
python
|
rlworkgroup/garage
|
src/garage/examples/step_bullet_kuka_env.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/step_bullet_kuka_env.py
|
MIT
|
def cem_cartpole(ctxt=None, seed=1):
"""Train CEM with Cartpole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
n_samples = 20
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = CEM(env_spec=env.spec,
policy=policy,
sampler=sampler,
best_frac=0.05,
n_samples=n_samples)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=1000)
|
Train CEM with Cartpole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
cem_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/np/cem_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/np/cem_cartpole.py
|
MIT
|
def cma_es_cartpole(ctxt=None, seed=1):
"""Train CMA_ES with Cartpole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
n_samples = 20
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = CMAES(env_spec=env.spec,
policy=policy,
sampler=sampler,
n_samples=n_samples)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=1000)
|
Train CMA_ES with Cartpole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
cma_es_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/np/cma_es_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/np/cma_es_cartpole.py
|
MIT
|
def train(self, trainer):
"""Get samples and train the policy.
Args:
trainer (Trainer): Trainer.
"""
for epoch in trainer.step_epochs():
samples = trainer.obtain_samples(epoch)
log_performance(epoch,
EpisodeBatch.from_list(self.env_spec, samples),
self._discount)
self._train_once(epoch, samples)
|
Get samples and train the policy.
Args:
trainer (Trainer): Trainer.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/examples/np/tutorial_cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/np/tutorial_cem.py
|
MIT
|
def _train_once(self, epoch, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
epoch (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
float: The average return of epoch cycle.
"""
returns = []
for path in paths:
returns.append(discount_cumsum(path['rewards'], self._discount))
avg_return = np.mean(np.concatenate(returns))
self._all_avg_returns.append(avg_return)
if (epoch + 1) % self._n_samples == 0:
avg_rtns = np.array(self._all_avg_returns)
best_inds = np.argsort(-avg_rtns)[:self._n_best]
best_params = np.array(self._all_params)[best_inds]
self._cur_mean = best_params.mean(axis=0)
self._cur_std = best_params.std(axis=0)
self.policy.set_param_values(self._cur_mean)
avg_return = max(self._all_avg_returns)
self._all_avg_returns.clear()
self._all_params.clear()
self._cur_params = self._sample_params(epoch)
self._all_params.append(self._cur_params.copy())
self.policy.set_param_values(self._cur_params)
return avg_return
|
Perform one step of policy optimization given one batch of samples.
Args:
epoch (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
float: The average return of epoch cycle.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/examples/np/tutorial_cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/np/tutorial_cem.py
|
MIT
|
def _sample_params(self, epoch):
"""Return sample parameters.
Args:
epoch (int): Epoch number.
Returns:
np.ndarray: A numpy array of parameter values.
"""
extra_var_mult = max(1.0 - epoch / self._extra_decay_time, 0)
sample_std = np.sqrt(
np.square(self._cur_std) +
np.square(self._extra_std) * extra_var_mult)
return np.random.standard_normal(len(
self._cur_mean)) * sample_std + self._cur_mean
|
Return sample parameters.
Args:
epoch (int): Epoch number.
Returns:
np.ndarray: A numpy array of parameter values.
|
_sample_params
|
python
|
rlworkgroup/garage
|
src/garage/examples/np/tutorial_cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/np/tutorial_cem.py
|
MIT
|
def tutorial_cem(ctxt=None):
"""Train CEM with Cartpole-v1 environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
"""
set_seed(100)
with TFTrainer(ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(env.spec)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = SimpleCEM(env.spec, policy, sampler)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=1000)
|
Train CEM with Cartpole-v1 environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
|
tutorial_cem
|
python
|
rlworkgroup/garage
|
src/garage/examples/np/tutorial_cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/np/tutorial_cem.py
|
MIT
|
def ddpg_pendulum(ctxt=None, seed=1):
"""Train DDPG with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('InvertedDoublePendulum-v2')
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
ddpg = DDPG(env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
trainer.setup(algo=ddpg, env=env)
trainer.train(n_epochs=500, batch_size=100)
|
Train DDPG with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
ddpg_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/ddpg_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/ddpg_pendulum.py
|
MIT
|
def dqn_cartpole(ctxt=None, seed=1):
"""Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GymEnv('CartPole-v0')
replay_buffer = PathBuffer(capacity_in_transitions=int(1e4))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=steps_per_epoch,
qf_lr=1e-4,
discount=1.0,
min_buffer_size=int(1e3),
double_q=True,
n_train_steps=500,
target_network_update_freq=1,
buffer_batch_size=32)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
|
Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
dqn_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/dqn_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/dqn_cartpole.py
|
MIT
|
def dqn_pong(ctxt=None, seed=1, buffer_size=int(5e4), max_episode_length=500):
"""Train DQN on PongNoFrameskip-v4 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
buffer_size (int): Number of timesteps to store in replay buffer.
max_episode_length (int): Maximum length of an episode, after which an
episode is considered complete. This is used during testing to
minimize the memory required to store a single episode.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
n_epochs = 100
steps_per_epoch = 20
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = gym.make('PongNoFrameskip-v4')
env = env.unwrapped
env = Noop(env, noop_max=30)
env = MaxAndSkip(env, skip=4)
env = EpisodicLife(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireReset(env)
env = Grayscale(env)
env = Resize(env, 84, 84)
env = ClipReward(env)
env = StackFrames(env, 4)
env = GymEnv(env, is_image=True, max_episode_length=max_episode_length)
replay_buffer = PathBuffer(capacity_in_transitions=buffer_size)
qf = DiscreteCNNQFunction(env_spec=env.spec,
filters=(
(32, (8, 8)),
(64, (4, 4)),
(64, (3, 3)),
),
strides=(4, 2, 1),
dueling=False) # yapf: disable
policy = DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
sampler=sampler,
qf_lr=1e-4,
discount=0.99,
min_buffer_size=int(1e4),
double_q=False,
n_train_steps=500,
steps_per_epoch=steps_per_epoch,
target_network_update_freq=2,
buffer_batch_size=32)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
|
Train DQN on PongNoFrameskip-v4 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
buffer_size (int): Number of timesteps to store in replay buffer.
max_episode_length (int): Maximum length of an episode, after which an
episode is considered complete. This is used during testing to
minimize the memory required to store a single episode.
|
dqn_pong
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/dqn_pong.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/dqn_pong.py
|
MIT
|
def erwr_cartpole(ctxt=None, seed=1):
"""Train with ERWR on CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = ERWR(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99)
trainer.setup(algo=algo, env=env)
trainer.train(n_epochs=100, batch_size=10000, plot=False)
|
Train with ERWR on CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
erwr_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/erwr_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/erwr_cartpole.py
|
MIT
|
def her_ddpg_fetchreach(ctxt=None, seed=1):
"""Train DDPG + HER on the goal-conditioned FetchReach env.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('FetchReach-v1')
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name='Policy',
hidden_sizes=[256, 256, 256],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(
env_spec=env.spec,
name='QFunction',
hidden_sizes=[256, 256, 256],
hidden_nonlinearity=tf.nn.relu,
)
# pylint: disable=no-member
replay_buffer = HERReplayBuffer(capacity_in_transitions=int(1e6),
replay_k=4,
reward_fn=env.compute_reward,
env_spec=env.spec)
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
ddpg = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-3,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
target_update_tau=0.01,
steps_per_epoch=50,
n_train_steps=40,
discount=0.95,
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
buffer_batch_size=256,
)
trainer.setup(algo=ddpg, env=env)
trainer.train(n_epochs=50, batch_size=256)
|
Train DDPG + HER on the goal-conditioned FetchReach env.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
her_ddpg_fetchreach
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/her_ddpg_fetchreach.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/her_ddpg_fetchreach.py
|
MIT
|
def multi_env_ppo(ctxt=None, seed=1):
"""Train PPO on two Atari environments simultaneously.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env1 = normalize(GymEnv('Adventure-ram-v4'))
env2 = normalize(GymEnv('Alien-ram-v4'))
env = MultiEnvWrapper([env1, env2])
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
))
trainer.setup(algo, env)
trainer.train(n_epochs=120, batch_size=2048, plot=False)
|
Train PPO on two Atari environments simultaneously.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
multi_env_ppo
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/multi_env_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/multi_env_ppo.py
|
MIT
|
def multi_env_trpo(ctxt=None, seed=1):
"""Train TRPO on two different PointEnv instances.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env1 = normalize(PointEnv(goal=(-1., 0.), max_episode_length=100))
env2 = normalize(PointEnv(goal=(1., 0.), max_episode_length=100))
env = MultiEnvWrapper([env1, env2])
policy = GaussianMLPPolicy(env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0)
trainer.setup(algo, env)
trainer.train(n_epochs=40, batch_size=2048, plot=False)
|
Train TRPO on two different PointEnv instances.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
multi_env_trpo
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/multi_env_trpo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/multi_env_trpo.py
|
MIT
|
def ppo_memorize_digits(ctxt=None,
seed=1,
batch_size=4000,
max_episode_length=100):
"""Train PPO on MemorizeDigits-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_episode_length (int): Max number of timesteps in an episode.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(
GymEnv('MemorizeDigits-v0',
is_image=True,
max_episode_length=max_episode_length))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=(
(32, (5, 5)),
(64, (3, 3)),
(64, (2, 2)),
),
strides=(4, 2, 1),
padding='VALID',
hidden_sizes=(256, )) # yapf: disable
baseline = GaussianCNNBaseline(
env_spec=env.spec,
filters=(
(32, (5, 5)),
(64, (3, 3)),
(64, (2, 2)),
),
strides=(4, 2, 1),
padding='VALID',
hidden_sizes=(256, ),
use_trust_region=True) # yapf: disable
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
))
trainer.setup(algo, env)
trainer.train(n_epochs=1000, batch_size=batch_size)
|
Train PPO on MemorizeDigits-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_episode_length (int): Max number of timesteps in an episode.
|
ppo_memorize_digits
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/ppo_memorize_digits.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/ppo_memorize_digits.py
|
MIT
|
def ppo_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = normalize(GymEnv('InvertedDoublePendulum-v2'))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(32, 32),
use_trust_region=True,
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
# NOTE: make sure when setting entropy_method to 'max', set
# center_adv to False and turn off policy gradient. See
# tf.algos.NPO for detailed documentation.
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
trainer.setup(algo, env)
trainer.train(n_epochs=120, batch_size=2048, plot=False)
|
Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
ppo_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/ppo_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/ppo_pendulum.py
|
MIT
|
def reps_gym_cartpole(ctxt=None, seed=1):
"""Train REPS with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v0')
policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=4000, plot=False)
|
Train REPS with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
reps_gym_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/reps_gym_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/reps_gym_cartpole.py
|
MIT
|
def rl2_ppo_halfcheetah(ctxt, seed, max_episode_length, meta_batch_size,
n_epochs, episode_per_task):
"""Train PPO with HalfCheetah environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_episode_length (int): Maximum length of a single episode.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
tasks = task_sampler.SetTaskSampler(
HalfCheetahVelEnv,
wrapper=lambda env, _: RL2Env(
GymEnv(env, max_episode_length=max_episode_length)))
env_spec = RL2Env(
GymEnv(HalfCheetahVelEnv(),
max_episode_length=max_episode_length)).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
envs = tasks.sample(meta_batch_size)
sampler = LocalSampler(
agents=policy,
envs=envs,
max_episode_length=env_spec.max_episode_length,
is_tf_worker=True,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
algo = RL2PPO(meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
sampler=sampler,
episodes_per_trial=episode_per_task,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False)
trainer.setup(algo, envs)
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_episode_length *
meta_batch_size)
|
Train PPO with HalfCheetah environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_episode_length (int): Maximum length of a single episode.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
|
rl2_ppo_halfcheetah
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/rl2_ppo_halfcheetah.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/rl2_ppo_halfcheetah.py
|
MIT
|
def rl2_ppo_halfcheetah_meta_test(ctxt, seed, meta_batch_size, n_epochs,
episode_per_task):
"""Perform meta-testing on RL2PPO with HalfCheetah environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
max_episode_length = 150
tasks = task_sampler.SetTaskSampler(
HalfCheetahVelEnv,
wrapper=lambda env, _: RL2Env(
GymEnv(env, max_episode_length=max_episode_length)))
env_spec = RL2Env(
GymEnv(HalfCheetahVelEnv(),
max_episode_length=max_episode_length)).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
meta_evaluator = MetaEvaluator(test_task_sampler=tasks,
n_exploration_eps=10,
n_test_episodes=10,
n_test_tasks=5)
envs = tasks.sample(meta_batch_size)
sampler = LocalSampler(
agents=policy,
envs=envs,
max_episode_length=env_spec.max_episode_length,
is_tf_worker=True,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
algo = RL2PPO(meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
episodes_per_trial=episode_per_task,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=10)
trainer.setup(algo, envs)
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_episode_length *
meta_batch_size)
|
Perform meta-testing on RL2PPO with HalfCheetah environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
|
rl2_ppo_halfcheetah_meta_test
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/rl2_ppo_halfcheetah_meta_test.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/rl2_ppo_halfcheetah_meta_test.py
|
MIT
|
def rl2_ppo_metaworld_ml10(ctxt, seed, meta_batch_size, n_epochs,
episode_per_task):
"""Train RL2 PPO with ML10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
ml10 = metaworld.ML10()
tasks = MetaWorldTaskSampler(ml10, 'train', lambda env, _: RL2Env(env))
test_task_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=MetaWorldSetTaskEnv(
ml10, 'test'),
wrapper=lambda env, _: RL2Env(env))
meta_evaluator = MetaEvaluator(test_task_sampler=test_task_sampler)
env_updates = tasks.sample(10)
env = env_updates[0]()
env_spec = env.spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
envs = tasks.sample(meta_batch_size)
sampler = LocalSampler(
agents=policy,
envs=envs,
max_episode_length=env_spec.max_episode_length,
is_tf_worker=True,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
algo = RL2PPO(meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
meta_evaluator=meta_evaluator,
episodes_per_trial=episode_per_task)
trainer.setup(algo, envs)
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task *
env_spec.max_episode_length * meta_batch_size)
|
Train RL2 PPO with ML10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
|
rl2_ppo_metaworld_ml10
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/rl2_ppo_metaworld_ml10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/rl2_ppo_metaworld_ml10.py
|
MIT
|
def rl2_ppo_metaworld_ml1_push(ctxt, seed, meta_batch_size, n_epochs,
episode_per_task):
"""Train RL2 PPO with ML1 environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
ml1 = metaworld.ML1('push-v1')
task_sampler = MetaWorldTaskSampler(ml1, 'train',
lambda env, _: RL2Env(env))
env = task_sampler.sample(1)[0]()
test_task_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=MetaWorldSetTaskEnv(ml1, 'test'),
wrapper=lambda env, _: RL2Env(env))
env_spec = env.spec
with TFTrainer(snapshot_config=ctxt) as trainer:
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
meta_evaluator = MetaEvaluator(test_task_sampler=test_task_sampler)
baseline = LinearFeatureBaseline(env_spec=env_spec)
envs = task_sampler.sample(meta_batch_size)
sampler = LocalSampler(
agents=policy,
envs=envs,
max_episode_length=env_spec.max_episode_length,
is_tf_worker=True,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
algo = RL2PPO(meta_batch_size=meta_batch_size,
task_sampler=task_sampler,
env_spec=env_spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
meta_evaluator=meta_evaluator,
episodes_per_trial=episode_per_task)
trainer.setup(algo, envs)
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task *
env_spec.max_episode_length * meta_batch_size)
|
Train RL2 PPO with ML1 environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
|
rl2_ppo_metaworld_ml1_push
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/rl2_ppo_metaworld_ml1_push.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/rl2_ppo_metaworld_ml1_push.py
|
MIT
|
def rl2_ppo_metaworld_ml45(ctxt, seed, meta_batch_size, n_epochs,
episode_per_task):
"""Train PPO with ML45 environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
ml45 = metaworld.ML45()
tasks = MetaWorldTaskSampler(ml45, 'train', lambda env, _: RL2Env(env))
test_task_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=MetaWorldSetTaskEnv(ml45, 'test'),
wrapper=lambda env, _: RL2Env(env))
with TFTrainer(snapshot_config=ctxt) as trainer:
env = tasks.sample(45)[0]()
env_spec = env.spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
meta_evaluator = MetaEvaluator(test_task_sampler=test_task_sampler,
n_exploration_eps=10,
n_test_episodes=10,
n_test_tasks=5)
envs = tasks.sample(meta_batch_size)
sampler = LocalSampler(
agents=policy,
envs=envs,
max_episode_length=env_spec.max_episode_length,
is_tf_worker=True,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
algo = RL2PPO(meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(batch_size=32, ),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
meta_evaluator=meta_evaluator,
episodes_per_trial=10)
trainer.setup(algo, envs)
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task *
env_spec.max_episode_length * meta_batch_size)
|
Train PPO with ML45 environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
|
rl2_ppo_metaworld_ml45
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/rl2_ppo_metaworld_ml45.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/rl2_ppo_metaworld_ml45.py
|
MIT
|
def rl2_trpo_halfcheetah(ctxt, seed, max_episode_length, meta_batch_size,
n_epochs, episode_per_task):
"""Train TRPO with HalfCheetah environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
max_episode_length (int): Maximum length of a single episode.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
tasks = task_sampler.SetTaskSampler(
HalfCheetahVelEnv,
wrapper=lambda env, _: RL2Env(
GymEnv(env, max_episode_length=max_episode_length)))
env_spec = RL2Env(
GymEnv(HalfCheetahVelEnv(),
max_episode_length=max_episode_length)).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
envs = tasks.sample(meta_batch_size)
sampler = LocalSampler(
agents=policy,
envs=envs,
max_episode_length=env_spec.max_episode_length,
is_tf_worker=True,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
algo = RL2TRPO(meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
sampler=sampler,
episodes_per_trial=episode_per_task,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHVP(
base_eps=1e-5)))
trainer.setup(algo, envs)
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_episode_length *
meta_batch_size)
|
Train TRPO with HalfCheetah environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
max_episode_length (int): Maximum length of a single episode.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
|
rl2_trpo_halfcheetah
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/rl2_trpo_halfcheetah.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/rl2_trpo_halfcheetah.py
|
MIT
|
def td3_pendulum(ctxt=None, seed=1):
"""Wrap TD3 training task in the run_task function.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
n_epochs = 500
steps_per_epoch = 20
sampler_batch_size = 250
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GymEnv('InvertedDoublePendulum-v2')
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddGaussianNoise(env.spec,
policy,
total_timesteps=num_timesteps,
max_sigma=0.1,
min_sigma=0.1)
qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',
env_spec=env.spec,
hidden_sizes=[400, 300],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',
env_spec=env.spec,
hidden_sizes=[400, 300],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
td3 = TD3(env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
qf2=qf2,
replay_buffer=replay_buffer,
sampler=sampler,
target_update_tau=1e-2,
steps_per_epoch=steps_per_epoch,
n_train_steps=1,
discount=0.99,
buffer_batch_size=100,
min_buffer_size=1e4,
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
trainer.setup(td3, env)
trainer.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
|
Wrap TD3 training task in the run_task function.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
td3_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/td3_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/td3_pendulum.py
|
MIT
|
def te_ppo_mt10(ctxt, seed, n_epochs, batch_size_per_task, n_tasks):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
"""
set_seed(seed)
mt10 = metaworld.MT10()
train_task_sampler = MetaWorldTaskSampler(mt10,
'train',
lambda env, _: normalize(env),
add_env_onehot=False)
assert n_tasks % 10 == 0
assert n_tasks <= 500
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
latent_length = 4
inference_window = 6
batch_size = batch_size_per_task * len(envs)
policy_ent_coeff = 2e-2
encoder_ent_coeff = 2e-4
inference_ce_coeff = 5e-2
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
with TFTrainer(snapshot_config=ctxt) as trainer:
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 10),
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=TaskEmbeddingWorker)
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
inference=inference,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
|
Train Task Embedding PPO with PointEnv.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
|
te_ppo_mt10
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/te_ppo_metaworld_mt10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/te_ppo_metaworld_mt10.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.