python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RL interface code."""
| dm_control-main | dm_control/rl/__init__.py |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Control Environment tests."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control.rl import control
from dm_env import specs
import mock
import numpy as np
_CONSTANT_REWARD_VALUE = 1.0
_CONSTANT_OBSERVATION = {'observations': np.asarray(_CONSTANT_REWARD_VALUE)}
_ACTION_SPEC = specs.BoundedArray(
shape=(1,), dtype=float, minimum=0.0, maximum=1.0)
_OBSERVATION_SPEC = {'observations': specs.Array(shape=(), dtype=float)}
class EnvironmentTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._task = mock.Mock(spec=control.Task)
self._task.initialize_episode = mock.Mock()
self._task.get_observation = mock.Mock(return_value=_CONSTANT_OBSERVATION)
self._task.get_reward = mock.Mock(return_value=_CONSTANT_REWARD_VALUE)
self._task.get_termination = mock.Mock(return_value=None)
self._task.action_spec = mock.Mock(return_value=_ACTION_SPEC)
self._task.observation_spec.side_effect = NotImplementedError()
self._physics = mock.Mock(spec=control.Physics)
self._physics.time = mock.Mock(return_value=0.0)
self._physics.reset_context = mock.MagicMock()
self._env = control.Environment(physics=self._physics, task=self._task)
def test_environment_calls(self):
self._env.action_spec()
self._task.action_spec.assert_called_with(self._physics)
self._env.reset()
self._task.initialize_episode.assert_called_with(self._physics)
self._task.get_observation.assert_called_with(self._physics)
action = [1]
time_step = self._env.step(action)
self._task.before_step.assert_called()
self._task.after_step.assert_called_with(self._physics)
self._task.get_termination.assert_called_with(self._physics)
self.assertEqual(_CONSTANT_REWARD_VALUE, time_step.reward)
@parameterized.parameters(
{'physics_timestep': .01, 'control_timestep': None,
'expected_steps': 1000},
{'physics_timestep': .01, 'control_timestep': .05,
'expected_steps': 5000})
def test_timeout(self, expected_steps, physics_timestep, control_timestep):
self._physics.timestep.return_value = physics_timestep
time_limit = expected_steps * (control_timestep or physics_timestep)
env = control.Environment(
physics=self._physics, task=self._task, time_limit=time_limit,
control_timestep=control_timestep)
time_step = env.reset()
steps = 0
while not time_step.last():
time_step = env.step([1])
steps += 1
self.assertEqual(steps, expected_steps)
self.assertTrue(time_step.last())
time_step = env.step([1])
self.assertTrue(time_step.first())
def test_observation_spec(self):
observation_spec = self._env.observation_spec()
self.assertEqual(_OBSERVATION_SPEC, observation_spec)
def test_redundant_args_error(self):
with self.assertRaises(ValueError):
control.Environment(physics=self._physics, task=self._task,
n_sub_steps=2, control_timestep=0.1)
def test_control_timestep(self):
self._physics.timestep.return_value = .002
env = control.Environment(
physics=self._physics, task=self._task, n_sub_steps=5)
self.assertEqual(.01, env.control_timestep())
def test_flatten_observations(self):
multimodal_obs = dict(_CONSTANT_OBSERVATION)
multimodal_obs['sensor'] = np.zeros(7, dtype=bool)
self._task.get_observation = mock.Mock(return_value=multimodal_obs)
env = control.Environment(
physics=self._physics, task=self._task, flat_observation=True)
timestep = env.reset()
self.assertLen(timestep.observation, 1)
self.assertEqual(timestep.observation[control.FLAT_OBSERVATION_KEY].size,
1 + 7)
class ComputeNStepsTest(parameterized.TestCase):
@parameterized.parameters((0.2, 0.1, 2), (.111, .111, 1), (100, 5, 20),
(0.03, 0.005, 6))
def testComputeNSteps(self, control_timestep, physics_timestep, expected):
steps = control.compute_n_steps(control_timestep, physics_timestep)
self.assertEqual(expected, steps)
@parameterized.parameters((3, 2), (.003, .00101))
def testComputeNStepsFailures(self, control_timestep, physics_timestep):
with self.assertRaises(ValueError):
control.compute_n_steps(control_timestep, physics_timestep)
if __name__ == '__main__':
absltest.main()
| dm_control-main | dm_control/rl/control_test.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
| dm_control-main | dm_control/entities/__init__.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer entities corresponding to robots."""
| dm_control-main | dm_control/entities/manipulators/__init__.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Abstract base classes for robot arms and hands."""
import abc
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import define
from dm_control.composer.observation import observable
from dm_control.mujoco.wrapper import mjbindings
from dm_control.utils import inverse_kinematics
import numpy as np
DOWN_QUATERNION = np.array([0., 0.70710678118, 0.70710678118, 0.])
_INVALID_JOINTS_ERROR = (
'All non-hinge joints must have limits. Model contains the following '
'non-hinge joints which are unbounded:\n{invalid_str}')
class RobotArm(composer.Robot, metaclass=abc.ABCMeta):
"""The abstract base class for robotic arms."""
def _build_observables(self):
return JointsObservables(self)
@property
def attachment_site(self):
return self.wrist_site
def _get_joint_pos_sampling_bounds(self, physics):
"""Returns lower and upper bounds for sampling arm joint positions.
Args:
physics: An `mjcf.Physics` instance.
Returns:
A (2, num_joints) numpy array containing (lower, upper) position bounds.
For hinge joints without limits the bounds are defined as [0, 2pi].
Raises:
RuntimeError: If the model contains unlimited joints that are not hinges.
"""
bound_joints = physics.bind(self.joints)
limits = np.array(bound_joints.range, copy=True)
is_hinge = bound_joints.type == mjbindings.enums.mjtJoint.mjJNT_HINGE
is_limited = bound_joints.limited.astype(bool)
invalid = ~is_hinge & ~is_limited # All non-hinge joints must have limits.
if any(invalid):
invalid_str = '\n'.join(str(self.joints[i]) for i in np.where(invalid)[0])
raise RuntimeError(_INVALID_JOINTS_ERROR.format(invalid_str=invalid_str))
# For unlimited hinges we sample positions between 0 and 2pi.
limits[is_hinge & ~is_limited] = 0., 2*np.pi
return limits.T
def randomize_arm_joints(self, physics, random_state):
"""Randomizes the qpos of all arm joints.
The ranges of qpos values is determined from the MJCF model.
Args:
physics: A `mujoco.Physics` instance.
random_state: An `np.random.RandomState` instance.
"""
lower, upper = self._get_joint_pos_sampling_bounds(physics)
physics.bind(self.joints).qpos = random_state.uniform(lower, upper)
def set_site_to_xpos(self, physics, random_state, site, target_pos,
target_quat=None, max_ik_attempts=10):
"""Moves the arm so that a site occurs at the specified location.
This function runs the inverse kinematics solver to find a configuration
arm joints for which the pinch site occurs at the specified location in
Cartesian coordinates.
Args:
physics: A `mujoco.Physics` instance.
random_state: An `np.random.RandomState` instance.
site: Either a `mjcf.Element` or a string specifying the full name
of the site whose position is being set.
target_pos: The desired Cartesian location of the site.
target_quat: (optional) The desired orientation of the site, expressed
as a quaternion. If `None`, the default orientation is to point
vertically downwards.
max_ik_attempts: (optional) Maximum number of attempts to make at finding
a solution satisfying `target_pos` and `target_quat`. The joint
positions will be randomized after each unsuccessful attempt.
Returns:
A boolean indicating whether the desired configuration is obtained.
Raises:
ValueError: If site is neither a string nor an `mjcf.Element`.
"""
if isinstance(site, mjcf.Element):
site_name = site.full_identifier
elif isinstance(site, str):
site_name = site
else:
raise ValueError('site should either be a string or mjcf.Element: got {}'
.format(site))
if target_quat is None:
target_quat = DOWN_QUATERNION
lower, upper = self._get_joint_pos_sampling_bounds(physics)
arm_joint_names = [joint.full_identifier for joint in self.joints]
for _ in range(max_ik_attempts):
result = inverse_kinematics.qpos_from_site_pose(
physics=physics,
site_name=site_name,
target_pos=target_pos,
target_quat=target_quat,
joint_names=arm_joint_names,
rot_weight=2,
inplace=True)
success = result.success
# Canonicalise the angle to [0, 2*pi]
if success:
for arm_joint, low, high in zip(self.joints, lower, upper):
arm_joint_mj = physics.bind(arm_joint)
while arm_joint_mj.qpos >= high:
arm_joint_mj.qpos -= 2*np.pi
while arm_joint_mj.qpos < low:
arm_joint_mj.qpos += 2*np.pi
if arm_joint_mj.qpos > high:
success = False
break
# If succeeded or only one attempt, break and do not randomize joints.
if success or max_ik_attempts <= 1:
break
else:
self.randomize_arm_joints(physics, random_state)
return success
@property
@abc.abstractmethod
def joints(self):
"""Returns the joint elements of the arm."""
raise NotImplementedError
@property
@abc.abstractmethod
def wrist_site(self):
"""Returns the wrist site element of the arm."""
raise NotImplementedError
class JointsObservables(composer.Observables):
"""Observables common to all robot arms."""
@define.observable
def joints_pos(self):
return observable.MJCFFeature('qpos', self._entity.joints)
@define.observable
def joints_vel(self):
return observable.MJCFFeature('qvel', self._entity.joints)
class RobotHand(composer.Robot, metaclass=abc.ABCMeta):
"""The abstract base class for robotic hands."""
@abc.abstractmethod
def set_grasp(self, physics, close_factors):
"""Sets the finger position to the desired positions.
Args:
physics: An instance of `mjcf.Physics`.
close_factors: A number or list of numbers defining the desired grasp
position of each finger. A value of 0 corresponds to fully opening a
finger, while a value of 1 corresponds to fully closing it. If a single
number is specified, the same position is applied to all fingers.
"""
@property
@abc.abstractmethod
def tool_center_point(self):
"""Returns the tool center point element of the hand."""
| dm_control-main | dm_control/entities/manipulators/base.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the Jaco arm class."""
import itertools
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control import mjcf
from dm_control.entities.manipulators import kinova
from dm_control.entities.manipulators.kinova import jaco_arm
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
mjlib = mjbindings.mjlib
class JacoArmTest(parameterized.TestCase):
def test_can_compile_and_step_model(self):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
physics.step()
def test_can_attach_hand(self):
arm = kinova.JacoArm()
hand = kinova.JacoHand()
arm.attach(hand)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
physics.step()
# TODO(b/159974149): Investigate why the mass does not match the datasheet.
@unittest.expectedFailure
def test_mass(self):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
mass = physics.bind(arm.mjcf_model.worldbody).subtreemass
expected_mass = 4.4
self.assertAlmostEqual(mass, expected_mass)
@parameterized.parameters([
dict(actuator_index=0,
control_input=0,
expected_velocity=0.),
dict(actuator_index=0,
control_input=jaco_arm._LARGE_JOINT_MAX_VELOCITY,
expected_velocity=jaco_arm._LARGE_JOINT_MAX_VELOCITY),
dict(actuator_index=4,
control_input=jaco_arm._SMALL_JOINT_MAX_VELOCITY,
expected_velocity=jaco_arm._SMALL_JOINT_MAX_VELOCITY),
dict(actuator_index=0,
control_input=-jaco_arm._LARGE_JOINT_MAX_VELOCITY,
expected_velocity=-jaco_arm._LARGE_JOINT_MAX_VELOCITY),
dict(actuator_index=0,
control_input=2*jaco_arm._LARGE_JOINT_MAX_VELOCITY, # Test clipping
expected_velocity=jaco_arm._LARGE_JOINT_MAX_VELOCITY),
])
def test_velocity_actuation(
self, actuator_index, control_input, expected_velocity):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
actuator = arm.actuators[actuator_index]
bound_actuator = physics.bind(actuator)
bound_joint = physics.bind(actuator.joint)
acceleration_threshold = 1e-6
with physics.model.disable('contact', 'gravity'):
bound_actuator.ctrl = control_input
# Step until the joint has stopped accelerating.
while abs(bound_joint.qacc) > acceleration_threshold:
physics.step()
self.assertAlmostEqual(bound_joint.qvel[0], expected_velocity, delta=0.01)
@parameterized.parameters([
dict(joint_index=0, min_expected_torque=1.7, max_expected_torque=5.2),
dict(joint_index=5, min_expected_torque=0.8, max_expected_torque=7.0)])
def test_backdriving_torque(
self, joint_index, min_expected_torque, max_expected_torque):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
bound_joint = physics.bind(arm.joints[joint_index])
torque = min_expected_torque * 0.8
velocity_threshold = 0.1*2*np.pi/60. # 0.1 RPM
torque_increment = 0.01
seconds_per_torque_increment = 1.
max_torque = max_expected_torque * 1.1
while torque < max_torque:
# Ensure that no other forces are acting on the arm.
with physics.model.disable('gravity', 'contact', 'actuation'):
# Reset the simulation so that the initial velocity is zero.
physics.reset()
bound_joint.qfrc_applied = torque
while physics.time() < seconds_per_torque_increment:
physics.step()
if bound_joint.qvel[0] >= velocity_threshold:
self.assertBetween(torque, min_expected_torque, max_expected_torque)
return
# If we failed to accelerate the joint to the target velocity within the
# time limit we'll reset the simulation and increase the torque.
torque += torque_increment
self.fail('Torque of {} Nm insufficient to backdrive joint.'.format(torque))
@parameterized.parameters([
dict(joint_pos=0., expected_obs=[0., 1.]),
dict(joint_pos=-0.5*np.pi, expected_obs=[-1., 0.]),
dict(joint_pos=np.pi, expected_obs=[0., -1.]),
dict(joint_pos=10*np.pi, expected_obs=[0., 1.])])
def test_joints_pos_observables(self, joint_pos, expected_obs):
joint_index = 0
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
physics.bind(arm.joints).qpos[joint_index] = joint_pos
actual_obs = arm.observables.joints_pos(physics)[joint_index]
np.testing.assert_array_almost_equal(expected_obs, actual_obs)
@parameterized.parameters(
dict(joint_index=idx, applied_torque=t)
for idx, t in itertools.product([0, 2, 4], [0., -6.8, 30.5]))
def test_joints_torque_observables(self, joint_index, applied_torque):
arm = kinova.JacoArm()
joint = arm.joints[joint_index]
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
with physics.model.disable('gravity', 'limit', 'contact', 'actuation'):
# Apply a cartesian torque to the body containing the joint. We use
# `xfrc_applied` rather than `qfrc_applied` because forces in
# `qfrc_applied` are not measured by the torque sensor).
physics.bind(joint.parent).xfrc_applied[3:] = (
applied_torque * physics.bind(joint).xaxis)
observed_torque = arm.observables.joints_torque(physics)[joint_index]
# Note the change in sign, since the sensor measures torques in the
# child->parent direction.
self.assertAlmostEqual(observed_torque, -applied_torque, delta=0.1)
class JacoHandTest(parameterized.TestCase):
def test_can_compile_and_step_model(self):
hand = kinova.JacoHand()
physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model)
physics.step()
# TODO(b/159974149): Investigate why the mass does not match the datasheet.
@unittest.expectedFailure
def test_hand_mass(self):
hand = kinova.JacoHand()
physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model)
mass = physics.bind(hand.mjcf_model.worldbody).subtreemass
expected_mass = 0.727
self.assertAlmostEqual(mass, expected_mass)
def test_grip_force(self):
arena = composer.Arena()
hand = kinova.JacoHand()
arena.attach(hand)
# A sphere with a touch sensor for measuring grip force.
prop_model = mjcf.RootElement(model='grip_target')
prop_model.worldbody.add('geom', type='sphere', size=[0.02])
touch_site = prop_model.worldbody.add('site', type='sphere', size=[0.025])
touch_sensor = prop_model.sensor.add('touch', site=touch_site)
prop = composer.ModelWrapperEntity(prop_model)
# Add some slide joints to allow movement of the target in the XY plane.
# This helps the contact solver to converge more reliably.
prop_frame = arena.attach(prop)
prop_frame.add('joint', name='slide_x', type='slide', axis=(1, 0, 0))
prop_frame.add('joint', name='slide_y', type='slide', axis=(0, 1, 0))
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
bound_pinch_site = physics.bind(hand.pinch_site)
bound_actuators = physics.bind(hand.actuators)
bound_joints = physics.bind(hand.joints)
bound_touch = physics.bind(touch_sensor)
# Position the grip target at the pinch site.
prop.set_pose(physics, position=bound_pinch_site.xpos)
# Close the fingers with as much force as the actuators will allow.
bound_actuators.ctrl = bound_actuators.ctrlrange[:, 1]
# Run the simulation forward until the joints stop moving.
physics.step()
qvel_thresh = 1e-3 # radians / s
while max(abs(bound_joints.qvel)) > qvel_thresh:
physics.step()
expected_min_grip_force = 20.
expected_max_grip_force = 30.
grip_force = bound_touch.sensordata
self.assertBetween(
grip_force, expected_min_grip_force, expected_max_grip_force,
msg='Expected grip force to be between {} and {} N, got {} N.'.format(
expected_min_grip_force, expected_max_grip_force, grip_force))
@parameterized.parameters([dict(opening=True), dict(opening=False)])
def test_finger_travel_time(self, opening):
hand = kinova.JacoHand()
physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model)
bound_actuators = physics.bind(hand.actuators)
bound_joints = physics.bind(hand.joints)
min_ctrl, max_ctrl = bound_actuators.ctrlrange.T
min_qpos, max_qpos = bound_joints.range.T
# Measure the time taken for the finger joints to traverse 99.9% of their
# total range.
qpos_tol = 1e-3 * (max_qpos - min_qpos)
if opening:
hand.set_grasp(physics=physics, close_factors=1.) # Fully closed.
np.testing.assert_array_almost_equal(bound_joints.qpos, max_qpos)
target_pos = min_qpos # Fully open.
ctrl = min_ctrl # Open the fingers as fast as the actuators will allow.
else:
hand.set_grasp(physics=physics, close_factors=0.) # Fully open.
np.testing.assert_array_almost_equal(bound_joints.qpos, min_qpos)
target_pos = max_qpos # Fully closed.
ctrl = max_ctrl # Close the fingers as fast as the actuators will allow.
# Run the simulation until all joints have reached their target positions.
bound_actuators.ctrl = ctrl
while np.any(abs(bound_joints.qpos - target_pos) > qpos_tol):
with physics.model.disable('gravity'):
physics.step()
expected_travel_time = 1.2 # Seconds.
self.assertAlmostEqual(physics.time(), expected_travel_time, delta=0.1)
@parameterized.parameters([
dict(pos=np.r_[0., 0., 0.3], quat=np.r_[0., 1., 0., 1.]),
dict(pos=np.r_[0., -0.1, 0.5], quat=np.r_[1., 1., 0., 0.]),
])
def test_pinch_site_observables(self, pos, quat):
arm = kinova.JacoArm()
hand = kinova.JacoHand()
arena = composer.Arena()
arm.attach(hand)
arena.attach(arm)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
# Normalize the quaternion.
quat /= np.linalg.norm(quat)
# Drive the arm so that the pinch site is at the desired position and
# orientation.
success = arm.set_site_to_xpos(
physics=physics,
random_state=np.random.RandomState(0),
site=hand.pinch_site,
target_pos=pos,
target_quat=quat)
self.assertTrue(success)
# Check that the observations are as expected.
observed_pos = hand.observables.pinch_site_pos(physics)
np.testing.assert_allclose(observed_pos, pos, atol=1e-3)
observed_rmat = hand.observables.pinch_site_rmat(physics).reshape(3, 3)
expected_rmat = np.empty((3, 3), np.double)
mjlib.mju_quat2Mat(expected_rmat.ravel(), quat)
difference_rmat = observed_rmat.dot(expected_rmat.T)
# `difference_rmat` might not be perfectly orthonormal, which could lead to
# an invalid value being passed to arccos.
u, _, vt = np.linalg.svd(difference_rmat, full_matrices=False)
ortho_difference_rmat = u.dot(vt)
angular_difference = np.arccos((np.trace(ortho_difference_rmat) - 1) / 2)
self.assertLess(angular_difference, 1e-3)
if __name__ == '__main__':
absltest.main()
| dm_control-main | dm_control/entities/manipulators/kinova/kinova_test.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module containing the standard Jaco hand."""
import collections
import os
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
from dm_control.entities.manipulators import base
from dm_control.entities.manipulators.kinova import assets_path
_JACO_HAND_XML_PATH = os.path.join(assets_path.KINOVA_ROOT, 'jaco_hand.xml')
_HAND_BODY = 'hand'
_PINCH_SITE = 'pinchsite'
_GRIP_SITE = 'gripsite'
class JacoHand(base.RobotHand):
"""A composer entity representing a Jaco hand."""
def _build(self,
name=None,
use_pinch_site_as_tcp=False):
"""Initializes the JacoHand.
Args:
name: String, the name of this robot. Used as a prefix in the MJCF name
name attributes.
use_pinch_site_as_tcp: (optional) A boolean, if `True` the pinch site
will be used as the tool center point. If `False` the grip site is used.
"""
self._mjcf_root = mjcf.from_path(_JACO_HAND_XML_PATH)
if name:
self._mjcf_root.model = name
# Find MJCF elements that will be exposed as attributes.
self._bodies = self.mjcf_model.find_all('body')
self._tool_center_point = self._mjcf_root.find(
'site', _PINCH_SITE if use_pinch_site_as_tcp else _GRIP_SITE)
self._joints = self._mjcf_root.find_all('joint')
self._hand_geoms = list(self._mjcf_root.find('body', _HAND_BODY).geom)
self._finger_geoms = [geom for geom in self._mjcf_root.find_all('geom')
if geom.name and geom.name.startswith('finger')]
self._grip_site = self._mjcf_root.find('site', _GRIP_SITE)
self._pinch_site = self._mjcf_root.find('site', _PINCH_SITE)
# Add actuators.
self._finger_actuators = [
_add_velocity_actuator(joint) for joint in self._joints]
def _build_observables(self):
return JacoHandObservables(self)
@property
def tool_center_point(self):
"""Tool center point for the Jaco hand."""
return self._tool_center_point
@property
def joints(self):
"""List of joint elements."""
return self._joints
@property
def actuators(self):
"""List of finger actuators."""
return self._finger_actuators
@property
def hand_geom(self):
"""List of geoms belonging to the hand."""
return self._hand_geoms
@property
def finger_geoms(self):
"""List of geoms belonging to the fingers."""
return self._finger_geoms
@property
def grip_site(self):
"""Grip site."""
return self._grip_site
@property
def pinch_site(self):
"""Pinch site."""
return self._pinch_site
@property
def pinch_site_pos_sensor(self):
"""Sensor that returns the cartesian position of the pinch site."""
return self._pinch_site_pos_sensor
@property
def pinch_site_quat_sensor(self):
"""Sensor that returns the orientation of the pinch site as a quaternion."""
return self._pinch_site_quat_sensor
@property
def mjcf_model(self):
"""Returns the `mjcf.RootElement` object corresponding to this robot."""
return self._mjcf_root
def set_grasp(self, physics, close_factors):
"""Sets the finger position to the desired positions.
Args:
physics: An instance of `mjcf.Physics`.
close_factors: A number or list of numbers defining the desired grasp
position of each finger. A value of 0 corresponds to fully opening a
finger, while a value of 1 corresponds to fully closing it. If a single
number is specified, the same position is applied to all fingers.
"""
if not isinstance(close_factors, collections.abc.Iterable):
close_factors = (close_factors,) * len(self.joints)
for joint, finger_factor in zip(self.joints, close_factors):
joint_mj = physics.bind(joint)
min_value, max_value = joint_mj.range
joint_mj.qpos = min_value + (max_value - min_value) * finger_factor
physics.after_reset()
# Set target joint velocities to zero.
physics.bind(self.actuators).ctrl = 0
def _add_velocity_actuator(joint):
"""Adds a velocity actuator to a joint, returns the new MJCF element."""
# These parameters were adjusted to achieve a grip force of ~25 N and a finger
# closing time of ~1.2 s, as specified in the datasheet for the hand.
gain = 10.
forcerange = (-1., 1.)
ctrlrange = (-5., 5.) # Based on Kinova's URDF.
return joint.root.actuator.add(
'velocity',
joint=joint,
name=joint.name,
kv=gain,
ctrllimited=True,
ctrlrange=ctrlrange,
forcelimited=True,
forcerange=forcerange)
class JacoHandObservables(base.JointsObservables):
"""Observables for the Jaco hand."""
@composer.observable
def pinch_site_pos(self):
"""The position of the pinch site, in global coordinates."""
return observable.MJCFFeature('xpos', self._entity.pinch_site)
@composer.observable
def pinch_site_rmat(self):
"""The rotation matrix of the pinch site in global coordinates."""
return observable.MJCFFeature('xmat', self._entity.pinch_site)
| dm_control-main | dm_control/entities/manipulators/kinova/jaco_hand.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer models of Kinova robots."""
from dm_control.entities.manipulators.kinova.jaco_arm import JacoArm
from dm_control.entities.manipulators.kinova.jaco_hand import JacoHand
| dm_control-main | dm_control/entities/manipulators/kinova/__init__.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helper module that specifies the path to Kinova assets."""
import importlib
import os
_DM_CONTROL_ROOT = os.path.dirname(
importlib.util.find_spec('dm_control').origin)
KINOVA_ROOT = os.path.join(_DM_CONTROL_ROOT, 'third_party/kinova')
| dm_control-main | dm_control/entities/manipulators/kinova/assets_path.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module containing the Jaco robot class."""
import os
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import define
from dm_control.composer.observation import observable
from dm_control.entities.manipulators import base
from dm_control.entities.manipulators.kinova import assets_path
import numpy as np
_JACO_ARM_XML_PATH = os.path.join(assets_path.KINOVA_ROOT, 'jaco_arm.xml')
_LARGE_JOINTS = ('joint_1', 'joint_2', 'joint_3')
_SMALL_JOINTS = ('joint_4', 'joint_5', 'joint_6')
_ALL_JOINTS = _LARGE_JOINTS + _SMALL_JOINTS
_WRIST_SITE = 'wristsite'
# These are the peak torque limits taken from Kinova's datasheet:
# https://www.kinovarobotics.com/sites/default/files/AS-ACT-KA58-KA75-SP-INT-EN%20201804-1.2%20%28KINOVA%E2%84%A2%20Actuator%20series%20KA75%2B%20KA-58%20Specifications%29.pdf
_LARGE_JOINT_MAX_TORQUE = 30.5
_SMALL_JOINT_MAX_TORQUE = 6.8
# On the real robot these limits are imposed by the actuator firmware. It's
# technically possible to exceed them via the low-level API, but this can reduce
# the lifetime of the actuators.
_LARGE_JOINT_MAX_VELOCITY = np.deg2rad(36.)
_SMALL_JOINT_MAX_VELOCITY = np.deg2rad(48.)
# The velocity actuator gain is a very rough estimate, and should be considered
# a placeholder for proper system identification.
_VELOCITY_GAIN = 500.
class JacoArm(base.RobotArm):
"""A composer entity representing a Jaco arm."""
def _build(self, name=None):
"""Initializes the JacoArm.
Args:
name: String, the name of this robot. Used as a prefix in the MJCF name
name attributes.
"""
self._mjcf_root = mjcf.from_path(_JACO_ARM_XML_PATH)
if name:
self._mjcf_root.model = name
# Find MJCF elements that will be exposed as attributes.
self._joints = [self._mjcf_root.find('joint', name) for name in _ALL_JOINTS]
self._wrist_site = self._mjcf_root.find('site', _WRIST_SITE)
self._bodies = self.mjcf_model.find_all('body')
# Add actuators.
self._actuators = [_add_velocity_actuator(joint) for joint in self._joints]
# Add torque sensors.
self._joint_torque_sensors = [
_add_torque_sensor(joint) for joint in self._joints]
def _build_observables(self):
return JacoArmObservables(self)
@property
def joints(self):
"""List of joint elements belonging to the arm."""
return self._joints
@property
def actuators(self):
"""List of actuator elements belonging to the arm."""
return self._actuators
@property
def joint_torque_sensors(self):
"""List of torque sensors for each joint belonging to the arm."""
return self._joint_torque_sensors
@property
def wrist_site(self):
"""Wrist site of the arm (attachment point for the hand)."""
return self._wrist_site
@property
def mjcf_model(self):
"""Returns the `mjcf.RootElement` object corresponding to this robot."""
return self._mjcf_root
def _add_velocity_actuator(joint):
"""Adds a velocity actuator to a joint, returns the new MJCF element."""
if joint.name in _LARGE_JOINTS:
max_torque = _LARGE_JOINT_MAX_TORQUE
max_velocity = _LARGE_JOINT_MAX_VELOCITY
elif joint.name in _SMALL_JOINTS:
max_torque = _SMALL_JOINT_MAX_TORQUE
max_velocity = _SMALL_JOINT_MAX_VELOCITY
else:
raise ValueError('`joint.name` must be one of {}, got {!r}.'
.format(_ALL_JOINTS, joint.name))
return joint.root.actuator.add(
'velocity',
joint=joint,
name=joint.name,
kv=_VELOCITY_GAIN,
ctrllimited=True,
ctrlrange=(-max_velocity, max_velocity),
forcelimited=True,
forcerange=(-max_torque, max_torque))
def _add_torque_sensor(joint):
"""Adds a torque sensor to a joint, returns the new MJCF element."""
site = joint.parent.add(
'site', size=[1e-3], group=composer.SENSOR_SITES_GROUP,
name=joint.name+'_site')
return joint.root.sensor.add('torque', site=site, name=joint.name+'_torque')
class JacoArmObservables(base.JointsObservables):
"""Jaco arm obserables."""
@define.observable
def joints_pos(self):
# Because most of the Jaco arm joints are unlimited, we return the joint
# angles as sine/cosine pairs so that the observations are bounded.
def get_sin_cos_joint_angles(physics):
joint_pos = physics.bind(self._entity.joints).qpos
return np.vstack([np.sin(joint_pos), np.cos(joint_pos)]).T
return observable.Generic(get_sin_cos_joint_angles)
@define.observable
def joints_torque(self):
# MuJoCo's torque sensors are 3-axis, but we are only interested in torques
# acting about the axis of rotation of the joint. We therefore project the
# torques onto the joint axis.
def get_torques(physics):
torques = physics.bind(self._entity.joint_torque_sensors).sensordata
joint_axes = physics.bind(self._entity.joints).axis
return np.einsum('ij,ij->i', torques.reshape(-1, 3), joint_axes)
return observable.Generic(get_torques)
| dm_control-main | dm_control/entities/manipulators/kinova/jaco_arm.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer entities corresponding to props.
A "prop" is typically a non-actuated entity representing an object in the world.
"""
from dm_control.entities.props.duplo import Duplo
from dm_control.entities.props.position_detector import PositionDetector
from dm_control.entities.props.primitive import Primitive
| dm_control-main | dm_control/entities/props/__init__.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Detects the presence of registered entities within a cuboidal region."""
from dm_control import composer
from dm_control import mjcf
import numpy as np
_RENDERED_HEIGHT_IN_2D_MODE = 0.01
def _ensure_3d(pos):
# Pad the array with a zero if its length is 2.
if len(pos) == 2:
return np.hstack([pos, 0.])
return pos
class _Detection:
__slots__ = ('entity', 'detected')
def __init__(self, entity, detected=False):
self.entity = entity
self.detected = detected
class PositionDetector(composer.Entity):
"""Detects the presence of registered entities within an axis-aligned box.
The volume of this detector is defined by a "lower" corner and an "upper"
corner, which suffice to define an axis-aligned box.
An entity is considered "detected" if the `xpos` value of any one of its geom
lies within the active region defined by this detector. Note that this is NOT
a contact-based detector. Generally speaking, a geom will not be detected
until it is already "half inside" the region.
This detector supports both 2D and 3D modes. In 2D mode, the active region
has an effective infinite height along the z-direction.
This detector also provides an "inverted" detection mode, where an entity is
detected when it is not inside the detector's region.
"""
def _build(self,
pos,
size,
inverted=False,
visible=False,
rgba=(1, 1, 1, 1),
material=None,
detected_rgba=(0, 1, 0, 0.25),
retain_substep_detections=False,
name='position_detector'):
"""Builds the detector.
Args:
pos: The position at the center of this detector's active region. Should
be an array-like object of length 3 in 3D mode, or length 2 in 2D mode.
size: The half-lengths of this detector's active region. Should
be an array-like object of length 3 in 3D mode, or length 2 in 2D mode.
inverted: (optional) A boolean, whether to operate in inverted detection
mode. If `True`, an entity is detected when it is not in the active
region.
visible: (optional) A boolean, whether this detector is visible by
default in rendered images. If `False`, this detector's active zone
is placed in MuJoCo rendering group 4, which is not rendered by default,
but can be toggled on (e.g. in `dm_control.viewer`) for debugging
purposes.
rgba: (optional) The color to render when nothing is detected.
material: (optional) The material of the position detector.
detected_rgba: (optional) The color to render when an entity is detected.
retain_substep_detections: (optional) If `True`, the detector will remain
activated at the end of a control step if it became activated at any
substep. If `False`, the detector reports its instantaneous state.
name: (optional) XML element name of this position detector.
Raises:
ValueError: If the `pos` and `size` arrays do not have the same length.
"""
if len(pos) != len(size):
raise ValueError('`pos` and `size` should have the same length: '
'got {!r} and {!r}'.format(pos, size))
self._inverted = inverted
self._detected = False
self._retain_substep_detections = retain_substep_detections
self._lower = np.array(pos) - np.array(size)
self._upper = np.array(pos) + np.array(size)
self._lower_3d = _ensure_3d(self._lower)
self._upper_3d = _ensure_3d(self._upper)
self._mid_3d = (self._lower_3d + self._upper_3d) / 2.
self._entities = []
self._entity_geoms = {}
self._rgba = np.asarray(rgba)
self._detected_rgba = np.asarray(detected_rgba)
render_pos = np.zeros(3)
render_pos[:len(pos)] = pos
render_size = np.full(3, _RENDERED_HEIGHT_IN_2D_MODE)
render_size[:len(size)] = size
self._mjcf_root = mjcf.RootElement(model=name)
self._site = self._mjcf_root.worldbody.add(
'site', name='detection_zone', type='box',
pos=render_pos, size=render_size, rgba=self._rgba, material=material)
self._lower_site = self._mjcf_root.worldbody.add(
'site', name='lower', pos=self._lower_3d, size=[0.05],
rgba=self._rgba)
self._mid_site = self._mjcf_root.worldbody.add(
'site', name='mid', pos=self._mid_3d, size=[0.05],
rgba=self._rgba)
self._upper_site = self._mjcf_root.worldbody.add(
'site', name='upper', pos=self._upper_3d, size=[0.05],
rgba=self._rgba)
self._lower_sensor = self._mjcf_root.sensor.add(
'framepos', objtype='site', objname=self._lower_site,
name='{}_lower'.format(name))
self._mid_sensor = self._mjcf_root.sensor.add(
'framepos', objtype='site', objname=self._mid_site,
name='{}_mid'.format(name))
self._upper_sensor = self._mjcf_root.sensor.add(
'framepos', objtype='site', objname=self._upper_site,
name='{}_upper'.format(name))
if not visible:
self._site.group = composer.SENSOR_SITES_GROUP
self._lower_site.group = composer.SENSOR_SITES_GROUP
self._mid_site.group = composer.SENSOR_SITES_GROUP
self._upper_site.group = composer.SENSOR_SITES_GROUP
def resize(self, pos, size):
if len(pos) != len(size):
raise ValueError('`pos` and `size` should have the same length: '
'got {!r} and {!r}'.format(pos, size))
self._lower = np.array(pos) - np.array(size)
self._upper = np.array(pos) + np.array(size)
self._lower_3d = _ensure_3d(self._lower)
self._upper_3d = _ensure_3d(self._upper)
self._mid_3d = (self._lower_3d + self._upper_3d) / 2.
render_pos = np.zeros(3)
render_pos[:len(pos)] = pos
render_size = np.full(3, _RENDERED_HEIGHT_IN_2D_MODE)
render_size[:len(size)] = size
self._site.pos = render_pos
self._site.size = render_size
self._lower_site.pos = self._lower_3d
self._mid_site.pos = self._mid_3d
self._upper_site.pos = self._upper_3d
def set_colors(self, rgba, detected_rgba):
self.set_color(rgba)
self.set_detected_color(detected_rgba)
def set_color(self, rgba):
self._rgba[:3] = rgba
self._site.rgba = self._rgba
def set_detected_color(self, detected_rgba):
self._detected_rgba[:3] = detected_rgba
def set_position(self, physics, pos):
physics.bind(self._site).pos = pos
size = physics.bind(self._site).size[:3]
self._lower = np.array(pos) - np.array(size)
self._upper = np.array(pos) + np.array(size)
self._lower_3d = _ensure_3d(self._lower)
self._upper_3d = _ensure_3d(self._upper)
self._mid_3d = (self._lower_3d + self._upper_3d) / 2.
physics.bind(self._lower_site).pos = self._lower_3d
physics.bind(self._mid_site).pos = self._mid_3d
physics.bind(self._upper_site).pos = self._upper_3d
@property
def mjcf_model(self):
return self._mjcf_root
def register_entities(self, *entities):
for entity in entities:
self._entities.append(_Detection(entity))
self._entity_geoms[entity] = entity.mjcf_model.find_all('geom')
def deregister_entities(self):
self._entities = []
@property
def detected_entities(self):
"""A list of detected entities."""
return [
detection.entity for detection in self._entities if detection.detected]
def initialize_episode_mjcf(self, unused_random_state):
self._entity_geoms = {}
for detection in self._entities:
entity = detection.entity
self._entity_geoms[entity] = entity.mjcf_model.find_all('geom')
def initialize_episode(self, physics, unused_random_state):
self._update_detection(physics)
def before_step(self, physics, unused_random_state):
for detection in self._entities:
detection.detected = False
def after_substep(self, physics, unused_random_state):
self._update_detection(physics)
def _is_in_zone(self, xpos):
return (np.all(self._lower < xpos[:len(self._lower)])
and np.all(self._upper > xpos[:len(self._upper)]))
def _update_detection(self, physics):
self._previously_detected = self._detected
self._detected = False
for detection in self._entities:
if not self._retain_substep_detections:
detection.detected = False
for geom in self._entity_geoms[detection.entity]:
if self._is_in_zone(physics.bind(geom).xpos) != self._inverted:
detection.detected = True
self._detected = True
break
if self._detected and not self._previously_detected:
physics.bind(self._site).rgba = self._detected_rgba
elif self._previously_detected and not self._detected:
physics.bind(self._site).rgba = self._rgba
def site_pos(self, physics):
return physics.bind(self._site).pos
@property
def activated(self):
return self._detected
@property
def upper(self):
return self._upper
@property
def lower(self):
return self._lower
@property
def mid(self):
return (self._lower + self._upper) / 2.
@property
def lower_site(self):
return self._lower_site
@property
def mid_site(self):
return self._mid_site
@property
def upper_site(self):
return self._upper_site
@property
def lower_sensor(self):
return self._lower_sensor
@property
def mid_sensor(self):
return self._mid_sensor
@property
def upper_sensor(self):
return self._upper_sensor
| dm_control-main | dm_control/entities/props/position_detector.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_control.composer.props.primitive."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control import mjcf
from dm_control.entities.props import primitive
import numpy as np
class PrimitiveTest(parameterized.TestCase):
def _make_free_prop(self, geom_type='sphere', size=(0.1,), **kwargs):
prop = primitive.Primitive(geom_type=geom_type, size=size, **kwargs)
arena = composer.Arena()
arena.add_free_entity(prop)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
return prop, physics
@parameterized.parameters([
dict(geom_type='sphere', size=[0.1]),
dict(geom_type='capsule', size=[0.1, 0.2]),
dict(geom_type='cylinder', size=[0.1, 0.2]),
dict(geom_type='box', size=[0.1, 0.2, 0.3]),
dict(geom_type='ellipsoid', size=[0.1, 0.2, 0.3]),
])
def test_instantiation(self, geom_type, size):
name = 'foo'
rgba = [1., 0., 1., 0.5]
prop, physics = self._make_free_prop(
geom_type=geom_type, size=size, name=name, rgba=rgba)
# Check that the name and other kwargs are set correctly.
self.assertEqual(prop.mjcf_model.model, name)
np.testing.assert_array_equal(physics.bind(prop.geom).rgba, rgba)
# Check that we can step without anything breaking.
physics.step()
@parameterized.parameters([
dict(position=[0., 0., 0.]),
dict(position=[0.1, -0.2, 0.3]),
])
def test_position_observable(self, position):
prop, physics = self._make_free_prop()
prop.set_pose(physics, position=position)
observation = prop.observables.position(physics)
np.testing.assert_array_equal(position, observation)
@parameterized.parameters([
dict(quat=[1., 0., 0., 0.]),
dict(quat=[0., -1., 1., 0.]),
])
def test_orientation_observable(self, quat):
prop, physics = self._make_free_prop()
normalized_quat = np.array(quat) / np.linalg.norm(quat)
prop.set_pose(physics, quaternion=normalized_quat)
observation = prop.observables.orientation(physics)
np.testing.assert_array_almost_equal(normalized_quat, observation)
@parameterized.parameters([
dict(velocity=[0., 0., 0.]),
dict(velocity=[0.1, -0.2, 0.3]),
])
def test_linear_velocity_observable(self, velocity):
prop, physics = self._make_free_prop()
prop.set_velocity(physics, velocity=velocity)
observation = prop.observables.linear_velocity(physics)
np.testing.assert_array_almost_equal(velocity, observation)
@parameterized.parameters([
dict(angular_velocity=[0., 0., 0.]),
dict(angular_velocity=[0.1, -0.2, 0.3]),
])
def test_angular_velocity_observable(self, angular_velocity):
prop, physics = self._make_free_prop()
prop.set_velocity(physics, angular_velocity=angular_velocity)
observation = prop.observables.angular_velocity(physics)
np.testing.assert_array_almost_equal(angular_velocity, observation)
if __name__ == '__main__':
absltest.main()
| dm_control-main | dm_control/entities/props/primitive_test.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Prop consisting of a single geom with position and velocity sensors."""
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import define
from dm_control.composer.observation import observable
class Primitive(composer.Entity):
"""A prop consisting of a single geom with position and velocity sensors."""
def _build(self, geom_type, size, name=None, **kwargs):
"""Initializes the prop.
Args:
geom_type: String specifying the geom type.
size: List or numpy array of up to 3 numbers, depending on `geom_type`:
geom_type='box', size=[x_half_length, y_half_length, z_half_length]
geom_type='capsule', size=[radius, half_length]
geom_type='cylinder', size=[radius, half_length]
geom_type='ellipsoid', size=[x_radius, y_radius, z_radius]
geom_type='sphere', size=[radius]
name: (optional) A string, the name of this prop.
**kwargs: Additional geom parameters. Please see the MuJoCo documentation
for further details: http://www.mujoco.org/book/XMLreference.html#geom.
"""
self._mjcf_root = mjcf.element.RootElement(model=name)
self._geom = self._mjcf_root.worldbody.add(
'geom', name='geom', type=geom_type, size=size, **kwargs)
self._position = self._mjcf_root.sensor.add(
'framepos', name='position', objtype='geom', objname=self.geom)
self._orientation = self._mjcf_root.sensor.add(
'framequat', name='orientation', objtype='geom', objname=self.geom)
self._linear_velocity = self._mjcf_root.sensor.add(
'framelinvel', name='linear_velocity', objtype='geom',
objname=self.geom)
self._angular_velocity = self._mjcf_root.sensor.add(
'frameangvel', name='angular_velocity', objtype='geom',
objname=self.geom)
def _build_observables(self):
return PrimitiveObservables(self)
@property
def geom(self):
"""The geom belonging to this prop."""
return self._geom
@property
def position(self):
"""Sensor that returns the prop position."""
return self._position
@property
def orientation(self):
"""Sensor that returns the prop orientation (as a quaternion)."""
# TODO(b/120829807): Consider returning a rotation matrix instead.
return self._orientation
@property
def linear_velocity(self):
"""Sensor that returns the linear velocity of the prop."""
return self._linear_velocity
@property
def angular_velocity(self):
"""Sensor that returns the angular velocity of the prop."""
return self._angular_velocity
@property
def mjcf_model(self):
return self._mjcf_root
class PrimitiveObservables(composer.Observables,
composer.FreePropObservableMixin):
"""Primitive entity's observables."""
@define.observable
def position(self):
return observable.MJCFFeature('sensordata', self._entity.position)
@define.observable
def orientation(self):
return observable.MJCFFeature('sensordata', self._entity.orientation)
@define.observable
def linear_velocity(self):
return observable.MJCFFeature('sensordata', self._entity.linear_velocity)
@define.observable
def angular_velocity(self):
return observable.MJCFFeature('sensordata', self._entity.angular_velocity)
| dm_control-main | dm_control/entities/props/primitive.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_control.composer.props.position_detector."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control.entities.props import position_detector
from dm_control.entities.props import primitive
import numpy as np
class PositionDetectorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.arena = composer.Arena()
self.props = [
primitive.Primitive(geom_type='sphere', size=(0.1,)),
primitive.Primitive(geom_type='sphere', size=(0.1,))
]
for prop in self.props:
self.arena.add_free_entity(prop)
self.task = composer.NullTask(self.arena)
def assertDetected(self, entity, detector):
if not self.inverted:
self.assertIn(entity, detector.detected_entities)
else:
self.assertNotIn(entity, detector.detected_entities)
def assertNotDetected(self, entity, detector):
if not self.inverted:
self.assertNotIn(entity, detector.detected_entities)
else:
self.assertIn(entity, detector.detected_entities)
@parameterized.parameters(False, True)
def test3DDetection(self, inverted):
self.inverted = inverted
detector_pos = np.array([0.3, 0.2, 0.1])
detector_size = np.array([0.1, 0.2, 0.3])
detector = position_detector.PositionDetector(
pos=detector_pos, size=detector_size, inverted=inverted)
detector.register_entities(*self.props)
self.arena.attach(detector)
env = composer.Environment(self.task)
env.reset()
self.assertNotDetected(self.props[0], detector)
self.assertNotDetected(self.props[1], detector)
def initialize_episode(physics, unused_random_state):
for prop in self.props:
prop.set_pose(physics, detector_pos)
self.task.initialize_episode = initialize_episode
env.reset()
self.assertDetected(self.props[0], detector)
self.assertDetected(self.props[1], detector)
self.props[0].set_pose(env.physics, detector_pos - detector_size)
env.step([])
self.assertNotDetected(self.props[0], detector)
self.assertDetected(self.props[1], detector)
self.props[0].set_pose(env.physics, detector_pos - detector_size / 2)
self.props[1].set_pose(env.physics, detector_pos + detector_size * 1.01)
env.step([])
self.assertDetected(self.props[0], detector)
self.assertNotDetected(self.props[1], detector)
@parameterized.parameters(False, True)
def test2DDetection(self, inverted):
self.inverted = inverted
detector_pos = np.array([0.3, 0.2])
detector_size = np.array([0.1, 0.2])
detector = position_detector.PositionDetector(
pos=detector_pos, size=detector_size, inverted=inverted)
detector.register_entities(*self.props)
self.arena.attach(detector)
env = composer.Environment(self.task)
env.reset()
self.assertNotDetected(self.props[0], detector)
self.assertNotDetected(self.props[1], detector)
def initialize_episode(physics, unused_random_state):
# In 2D mode, detection should occur no matter how large |z| is.
self.props[0].set_pose(physics, [detector_pos[0], detector_pos[1], 1e+6])
self.props[1].set_pose(physics, [detector_pos[0], detector_pos[1], -1e+6])
self.task.initialize_episode = initialize_episode
env.reset()
self.assertDetected(self.props[0], detector)
self.assertDetected(self.props[1], detector)
self.props[0].set_pose(
env.physics, [detector_pos[0] - detector_size[0], detector_pos[1], 0])
env.step([])
self.assertNotDetected(self.props[0], detector)
self.assertDetected(self.props[1], detector)
self.props[0].set_pose(
env.physics, [detector_pos[0] - detector_size[0] / 2,
detector_pos[1] + detector_size[1] / 2, 0])
self.props[1].set_pose(
env.physics, [detector_pos[0], detector_pos[1] + detector_size[1], 0])
env.step([])
self.assertDetected(self.props[0], detector)
self.assertNotDetected(self.props[1], detector)
if __name__ == '__main__':
absltest.main()
| dm_control-main | dm_control/entities/props/position_detector_test.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the Duplo prop."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import mjcf
from dm_control.entities.props import duplo
from dm_control.entities.props.duplo import utils
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
mjlib = mjbindings.mjlib
# Expected separation force when `variation == 0`
EXPECTED_FIXED_FORCE = 10.0
EXPECTED_FIXED_FORCE_TOL = 0.5
# Bounds and median are based on empirical distribution of separation forces
# for real Duplo blocks.
EXPECTED_MIN_FORCE = 6.
EXPECTED_MAX_FORCE = 18.
EXPECTED_MEDIAN_FORCE = 12.
EXPECTED_MEDIAN_FORCE_TOL = 2.
class DuploTest(parameterized.TestCase):
"""Tests for the Duplo prop."""
def make_bricks(self, seed, *args, **kwargs):
top_brick = duplo.Duplo(*args, **kwargs)
bottom_brick = duplo.Duplo(*args, **kwargs)
# This sets the radius of the studs. NB: we do this for both bricks because
# the stud radius has a (tiny!) effect on the mass of the top brick.
top_brick.initialize_episode_mjcf(np.random.RandomState(seed))
bottom_brick.initialize_episode_mjcf(np.random.RandomState(seed))
return top_brick, bottom_brick
def measure_separation_force(self, seed, *args, **kwargs):
top_brick, bottom_brick = self.make_bricks(seed=seed, *args, **kwargs)
return utils.measure_separation_force(top_brick, bottom_brick)
@parameterized.parameters([p._asdict() for p in duplo._STUD_SIZE_PARAMS])
def test_separation_force_fixed(self, easy_align, flanges):
forces = []
for seed in range(3):
forces.append(self.measure_separation_force(
seed=seed, easy_align=easy_align, flanges=flanges, variation=0.0))
# Separation forces should all be identical since variation == 0.0.
np.testing.assert_array_equal(forces[0], forces[1:])
# Separation forces should be close to the reference value.
self.assertAlmostEqual(forces[0], EXPECTED_FIXED_FORCE,
delta=EXPECTED_FIXED_FORCE_TOL)
@parameterized.parameters([p._asdict() for p in duplo._STUD_SIZE_PARAMS])
def test_separation_force_distribution(self, easy_align, flanges):
forces = []
for seed in range(10):
forces.append(self.measure_separation_force(
seed=seed, easy_align=easy_align, flanges=flanges, variation=1.0))
self.assertGreater(min(forces), EXPECTED_MIN_FORCE)
self.assertLess(max(forces), EXPECTED_MAX_FORCE)
median_force = np.median(forces)
median_force_delta = median_force - EXPECTED_MEDIAN_FORCE
self.assertLess(
abs(median_force_delta), EXPECTED_MEDIAN_FORCE_TOL,
msg=('Expected median separation force to be {}+/-{} N, got {} N.'
.format(EXPECTED_MEDIAN_FORCE, EXPECTED_MEDIAN_FORCE_TOL,
median_force)))
@parameterized.parameters([p._asdict() for p in duplo._STUD_SIZE_PARAMS])
def test_separation_force_identical_with_same_seed(self, easy_align, flanges):
def measure(seed):
return self.measure_separation_force(
seed=seed, easy_align=easy_align, flanges=flanges, variation=1.0)
first = measure(seed=0)
second = measure(seed=0)
third = measure(seed=1)
self.assertEqual(first, second)
self.assertNotEqual(first, third)
def test_exception_if_color_out_of_range(self):
invalid_color = (1., 0., 2.)
expected_message = duplo._COLOR_NOT_BETWEEN_0_AND_1.format(invalid_color)
with self.assertRaisesWithLiteralMatch(ValueError, expected_message):
_ = duplo.Duplo(color=invalid_color)
@parameterized.parameters([p._asdict() for p in duplo._STUD_SIZE_PARAMS])
def test_stud_and_hole_sites_align_when_stacked(self, easy_align, flanges):
top_brick, bottom_brick = self.make_bricks(
easy_align=easy_align, flanges=flanges, seed=0)
arena, _ = utils.stack_bricks(top_brick, bottom_brick)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
# Step the physics a few times to allow it to settle.
for _ in range(10):
physics.step()
# When two bricks are stacked, the studs on the bottom brick should align
# precisely with the holes on the top brick.
bottom_stud_pos = physics.bind(bottom_brick.studs.ravel()).xpos
top_hole_pos = physics.bind(top_brick.holes.ravel()).xpos
np.testing.assert_allclose(bottom_stud_pos, top_hole_pos, atol=1e-6)
# TODO(b/120829077): Extend this test to other brick configurations.
def test_correct_stud_contacts(self):
top_brick, bottom_brick = self.make_bricks(seed=0)
arena, _ = utils.stack_bricks(top_brick, bottom_brick)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
# Step the physics a few times to allow it to settle.
for _ in range(10):
physics.step()
# Each stud should make 3 contacts - two with flanges, one with a tube.
expected_contacts_per_stud = 3
for stud_site in bottom_brick.studs.flat:
stud_geom = bottom_brick.mjcf_model.find('geom', stud_site.name)
geom_id = physics.bind(stud_geom).element_id
# Check that this stud participates in the expected number of contacts.
stud_contacts = ((physics.data.contact.geom1 == geom_id) ^
(physics.data.contact.geom2 == geom_id))
self.assertEqual(stud_contacts.sum(), expected_contacts_per_stud)
# The normal forces should be roughly equal across contacts.
normal_forces = []
for contact_id in np.where(stud_contacts)[0]:
all_forces = np.empty(6)
mjlib.mj_contactForce(physics.model.ptr, physics.data.ptr,
contact_id, all_forces)
# all_forces is [normal, tangent, tangent, torsion, rolling, rolling]
normal_forces.append(all_forces[0])
np.testing.assert_allclose(
normal_forces[0], normal_forces[1:], rtol=0.05)
if __name__ == '__main__':
absltest.main()
| dm_control-main | dm_control/entities/props/duplo/duplo_test.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A 2x4 Duplo brick."""
import collections
import os
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import define
from dm_control.composer.observation import observable
import numpy as np
_DUPLO_XML_PATH = os.path.join(os.path.dirname(__file__), 'duplo2x4.xml')
# Stud radii are drawn from a uniform distribution. The `variation` argument
# scales the minimum and maximum whilst keeping the lower quartile constant.
_StudSize = collections.namedtuple(
'_StudSize', ['minimum', 'lower_quartile', 'maximum'])
_StudParams = collections.namedtuple('_StudParams', ['easy_align', 'flanges'])
_STUD_SIZE_PARAMS = {
_StudParams(easy_align=False, flanges=False):
_StudSize(minimum=0.004685, lower_quartile=0.004781, maximum=0.004898),
_StudParams(easy_align=False, flanges=True):
_StudSize(minimum=0.004609, lower_quartile=0.004647, maximum=0.004716),
_StudParams(easy_align=True, flanges=False):
_StudSize(minimum=0.004754, lower_quartile=0.004844, maximum=0.004953),
_StudParams(easy_align=True, flanges=True):
_StudSize(minimum=0.004695, lower_quartile=0.004717, maximum=0.004765)
}
_COLOR_NOT_BETWEEN_0_AND_1 = (
'All values in `color` must be between 0 and 1, got {!r}.')
class Duplo(composer.Entity):
"""A 2x4 Duplo brick."""
def _build(self, easy_align=False, flanges=True, variation=0.0,
color=(1., 0., 0.)):
"""Initializes a new `Duplo` instance.
Args:
easy_align: If True, the studs on the top of the brick will be capsules
rather than cylinders. This makes alignment easier.
flanges: Whether to use flanges on the bottom of the brick. These make the
dynamics more expensive, but allow the bricks to be clicked together in
partially overlapping configurations.
variation: A float that controls the amount of variation in stud size (and
therefore separation force). A value of 1.0 results in a distribution of
separation forces that approximately matches the empirical distribution
measured for real Duplo bricks. A value of 0.0 yields a deterministic
separation force approximately equal to the mode of the empirical
distribution.
color: An optional tuple of (R, G, B) values specifying the color of the
Duplo brick. These should be floats between 0 and 1. The default is red.
Raises:
ValueError: If `color` contains any value that is not between 0 and 1.
"""
self._mjcf_root = mjcf.from_path(_DUPLO_XML_PATH)
stud = self._mjcf_root.default.find('default', 'stud')
if easy_align:
# Make cylindrical studs invisible and disable contacts.
stud.geom.group = 3
stud.geom.contype = 9
stud.geom.conaffinity = 8
# Make capsule studs visible and enable contacts.
stud_cap = self._mjcf_root.default.find('default', 'stud-capsule')
stud_cap.geom.group = 0
stud_cap.geom.contype = 0
stud_cap.geom.conaffinity = 4
self._active_stud_dclass = stud_cap
else:
self._active_stud_dclass = stud
if flanges:
flange_dclass = self._mjcf_root.default.find('default', 'flange')
flange_dclass.geom.contype = 4 # Enable contact with flanges.
stud_size = _STUD_SIZE_PARAMS[(easy_align, flanges)]
offset = (1 - variation) * stud_size.lower_quartile
self._lower = offset + variation * stud_size.minimum
self._upper = offset + variation * stud_size.maximum
self._studs = np.ndarray((2, 4), dtype=object)
self._holes = np.ndarray((2, 4), dtype=object)
for row in range(2):
for column in range(4):
self._studs[row, column] = self._mjcf_root.find(
'site', 'stud_{}{}'.format(row, column))
self._holes[row, column] = self._mjcf_root.find(
'site', 'hole_{}{}'.format(row, column))
if not all(0 <= value <= 1 for value in color):
raise ValueError(_COLOR_NOT_BETWEEN_0_AND_1.format(color))
self._mjcf_root.default.geom.rgba[:3] = color
def initialize_episode_mjcf(self, random_state):
"""Randomizes the stud radius (and therefore the separation force)."""
radius = random_state.uniform(self._lower, self._upper)
self._active_stud_dclass.geom.size[0] = radius
def _build_observables(self):
return DuploObservables(self)
@property
def studs(self):
"""A (2, 4) numpy array of `mjcf.Elements` corresponding to stud sites."""
return self._studs
@property
def holes(self):
"""A (2, 4) numpy array of `mjcf.Elements` corresponding to hole sites."""
return self._holes
@property
def mjcf_model(self):
return self._mjcf_root
class DuploObservables(composer.Observables, composer.FreePropObservableMixin):
"""Observables for the `Duplo` prop."""
@define.observable
def position(self):
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.find('sensor', 'position'))
@define.observable
def orientation(self):
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.find('sensor', 'orientation'))
@define.observable
def linear_velocity(self):
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.find('sensor', 'linear_velocity'))
@define.observable
def angular_velocity(self):
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.find('sensor', 'angular_velocity'))
@define.observable
def force(self):
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.find('sensor', 'force'))
| dm_control-main | dm_control/entities/props/duplo/__init__.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities used in tests, and for tuning the Duplo model."""
from dm_control import composer
from dm_control import mjcf
from scipy import optimize
def stack_bricks(top_brick, bottom_brick):
"""Stacks two Duplo bricks, returns the attachment frame of the top brick."""
arena = composer.Arena()
# Bottom brick is fixed in place, top brick has a freejoint.
arena.attach(bottom_brick)
attachment_frame = arena.add_free_entity(top_brick)
# Attachment frame is positioned such that the top brick is on top of the
# bottom brick.
attachment_frame.pos = (0, 0, 0.0192)
return arena, attachment_frame
def measure_separation_force(top_brick,
bottom_brick,
min_force=0.,
max_force=20.,
tolerance=0.01,
time_limit=0.5,
height_threshold=1e-3):
"""Utility for measuring the separation force for a pair of Duplo bricks.
Args:
top_brick: An instance of `Duplo` representing the top brick.
bottom_brick: An instance of `Duplo` representing the bottom brick.
min_force: A force that should be insufficent to separate the bricks (N).
max_force: A force that should be sufficent to separate the bricks (N).
tolerance: The desired precision of the solution (N).
time_limit: The maximum simulation time (s) over which to apply force on
each iteration. Increasing this value will result in smaller estimates
of the separation force, since given sufficient time the bricks may slip
apart gradually under a smaller force. This is due to MuJoCo's soft
contact model (see http://mujoco.org/book/index.html#Soft).
height_threshold: The distance (m) that the upper brick must move in the
z-axis for the bricks to count as separated.
Returns:
A float, the measured separation force (N).
"""
arena, attachment_frame = stack_bricks(top_brick, bottom_brick)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
bound_attachment_frame = physics.bind(attachment_frame)
def func(force):
"""Returns +1 if the bricks separate under this force, and -1 otherwise."""
with physics.model.disable('gravity'):
# Reset the simulation.
physics.reset()
# Get the initial height.
initial_height = bound_attachment_frame.xpos[2]
# Apply an upward force to the attachment frame.
bound_attachment_frame.xfrc_applied[2] = force
# Advance the simulation until either the height threshold or time limit
# is reached.
while physics.time() < time_limit:
physics.step()
distance_lifted = bound_attachment_frame.xpos[2] - initial_height
if distance_lifted > height_threshold:
return 1.0
return -1.0
# Ensure that the min and max forces bracket the true separation force.
while func(min_force) > 0:
min_force *= 0.5
while func(max_force) < 0:
max_force *= 2
return optimize.bisect(func, a=min_force, b=max_force, xtol=tolerance,
disp=True)
| dm_control-main | dm_control/entities/props/duplo/utils.py |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Script for tuning Duplo stud sizes to give desired separation forces."""
import collections
import pprint
from absl import app
from absl import logging
from dm_control.entities.props import duplo
from dm_control.entities.props.duplo import utils
from scipy import optimize
# pylint: disable=protected-access,invalid-name
_StudSize = duplo._StudSize
ORIGINAL_STUD_SIZE_PARAMS = duplo._STUD_SIZE_PARAMS
# pylint: enable=protected-access,invalid-name
DESIRED_FORCES = _StudSize(minimum=6., lower_quartile=10., maximum=18.)
# The safety margin here is because the separation force isn't quite monotonic
# w.r.t. the stud radius. If we set the min and max radii according to the
# exact desired bounds on the separation force then we may occasionally sample
# stud radii that yield out-of-bounds forces.
SAFETY_MARGIN = 0.2
def get_separation_force_for_radius(radius, **duplo_kwargs):
"""Measures Duplo separation force as a function of stud radius."""
top_brick = duplo.Duplo(**duplo_kwargs)
bottom_brick = duplo.Duplo(**duplo_kwargs)
# Set the radius of the studs on the bottom brick (this would normally be done
# in `initialize_episode_mjcf`). Note: we also set the radius of the studs on
# the top brick, since this has a (tiny!) effect on its mass.
# pylint: disable=protected-access
top_brick._active_stud_dclass.geom.size[0] = radius
bottom_brick._active_stud_dclass.geom.size[0] = radius
# pylint: enable=protected-access
separation_force = utils.measure_separation_force(top_brick, bottom_brick)
logging.debug('Stud radius: %f\tseparation force: %f N',
radius, separation_force)
return separation_force
class _KeepBracketingSolutions:
"""Wraps objective func, keeps closest solutions bracketing the target."""
_solution = collections.namedtuple('_solution', ['x', 'residual'])
def __init__(self, func):
self._func = func
self.below = self._solution(x=None, residual=-float('inf'))
self.above = self._solution(x=None, residual=float('inf'))
def __call__(self, x):
residual = self._func(x)
if self.below.residual < residual <= 0:
self.below = self._solution(x=x, residual=residual)
elif 0 < residual < self.above.residual:
self.above = self._solution(x=x, residual=residual)
return residual
@property
def closest(self):
if abs(self.below.residual) < self.above.residual:
return self.below
else:
return self.above
def tune_stud_radius(desired_force,
min_radius=0.0045,
max_radius=0.005,
desired_places=6,
side='closest',
**duplo_kwargs):
"""Find a stud size that gives the desired separation force."""
@_KeepBracketingSolutions
def func(radius):
radius = round(radius, desired_places) # Round radius for aesthetics (!)
return (get_separation_force_for_radius(radius=radius, **duplo_kwargs)
- desired_force)
# Ensure that the min and max radii bracket the solution.
while func(min_radius) > 0:
min_radius = max(1e-3, min_radius - (max_radius - min_radius))
while func(max_radius) < 0:
max_radius += (max_radius - min_radius)
tolerance = 10**-(desired_places)
# Use bisection to refine the bounds on the optimal radius. Note: this assumes
# that separation force is monotonic w.r.t. stud radius, but this isn't
# exactly true in all cases.
optimize.bisect(func, a=min_radius, b=max_radius, xtol=tolerance, disp=True)
if side == 'below':
solution = func.below
elif side == 'above':
solution = func.above
else:
solution = func.closest
radius = round(solution.x, desired_places)
force = get_separation_force_for_radius(radius, **duplo_kwargs)
return radius, force
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tuned_stud_radii = {}
tuned_separation_forces = {}
for stud_params in sorted(ORIGINAL_STUD_SIZE_PARAMS):
duplo_kwargs = stud_params._asdict()
min_result = tune_stud_radius(
desired_force=DESIRED_FORCES.minimum + SAFETY_MARGIN,
variation=0.0, side='above', **duplo_kwargs)
lq_result = tune_stud_radius(
desired_force=DESIRED_FORCES.lower_quartile,
variation=0.0, side='closest', **duplo_kwargs)
max_result = tune_stud_radius(
desired_force=DESIRED_FORCES.maximum - SAFETY_MARGIN,
variation=0.0, side='below', **duplo_kwargs)
radii, forces = zip(*(min_result, lq_result, max_result))
logging.info('\nDuplo configuration: %s\nTuned radii: %s, forces: %s',
stud_params, radii, forces)
tuned_stud_radii[stud_params] = _StudSize(*radii)
tuned_separation_forces[stud_params] = _StudSize(*forces)
logging.info('%s\nNew Duplo parameters:\n%s\nSeparation forces:\n%s',
'-'*60,
pprint.pformat(tuned_stud_radii),
pprint.pformat(tuned_separation_forces))
if __name__ == '__main__':
app.run(main)
| dm_control-main | dm_control/entities/props/duplo/autotune.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kernel modules required by multivariate-normal multi-armed bandit models."""
from typing import Any, Optional, Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfk = tfp.math.psd_kernels
class ActionDistanceKernel(tfk.PositiveSemidefiniteKernel):
"""RBF kernel based on pre-recorded distance matrix.
Covariance matrix based on distance between actions of different policies
on the same set of states (with a learnable variance and lengthscale
parameters).
"""
def __init__(self,
distances: np.ndarray,
variance: float = 2.,
lengthscale: float = 1.,
bias_variance: Optional[float] = None,
trainable: bool = True,
variance_prior: Any = None,
lengthscale_prior: Any = None,
bias_variance_prior: Any = None,
dtype: np.dtype = np.float32,
name: str = 'ActionDistanceKernel'):
"""Kernel from distances in action vectors on subset of states.
Args:
distances: numpy array of euclidean distances between pairs of policies
variance: variance (possibly trainable) parameter of the kernel
lengthscale: lengthscale (possibly trainable) parameter
bias_variance: optional value to initialise variance of the bias in kernel
trainable: indicates if variance and lengthscale are trainable parameters
variance_prior: config for variance prior
lengthscale_prior: config for length scale prior
bias_variance_prior: config for bias variance prior
dtype: types of variance and lengthscale parameters
name: name of the kernel
"""
super(ActionDistanceKernel, self).__init__(
feature_ndims=1, dtype=dtype, name=name)
self._distances = distances
self._log_var = tf.Variable(
np.log(variance),
trainable=trainable,
dtype=self.dtype,
name='kernel_log_var')
self._var = tfp.util.DeferredTensor(self._log_var, tf.math.exp)
self._variance_prior = variance_prior
self._log_lengthscale = tf.Variable(
np.log(lengthscale),
trainable=trainable,
dtype=self.dtype,
name='kernel_log_lengthscale')
self._lengthscale = tfp.util.DeferredTensor(self._log_lengthscale,
tf.math.exp)
self._lengthscale_prior = lengthscale_prior
# if bias_variance parameter is passed, make a constant offset in the kernel
if bias_variance is None:
self._log_bias_variance = None
self._bias_variance = tf.Variable(
0., trainable=False, dtype=self.dtype, name='kernel_bias_variance')
else:
self._log_bias_variance = tf.Variable(
np.log(bias_variance),
trainable=True,
dtype=self.dtype,
name='kernel_bias_variance')
self._bias_variance = tfp.util.DeferredTensor(self._log_bias_variance,
tf.math.exp)
self._bias_variance_prior = bias_variance_prior
def regularization_loss(self):
"""Regularization loss for trainable variables."""
# Loss for variance: inverse Gamma distribution.
prior = self._variance_prior
if self._log_var.trainable and prior is not None and prior['use_prior']:
inv_gamma = tfp.distributions.InverseGamma(prior['alpha'], prior['beta'])
loss_var = -inv_gamma.log_prob(self._var)
else:
loss_var = 0.
# Loss for lengthscale: inverse Gamma distribution.
prior = self._lengthscale_prior
if (self._log_lengthscale.trainable and
prior is not None and prior['use_prior']):
inv_gamma = tfp.distributions.InverseGamma(prior['alpha'], prior['beta'])
loss_lengthscale = -inv_gamma.log_prob(self._lengthscale)
else:
loss_lengthscale = 0.
# Loss for bias_variance: inverse Gamma distribution.
prior = self._bias_variance_prior
if (self._log_bias_variance is not None and
prior is not None and prior['use_prior']):
inv_gamma = tfp.distributions.InverseGamma(prior['alpha'], prior['beta'])
loss_bias_var = -inv_gamma.log_prob(self._bias_variance)
else:
loss_bias_var = 0.
return loss_var + loss_lengthscale + loss_bias_var
def _compute_distances(self):
# the parent kernel will just return what is recorded in the distance matrix
return tf.convert_to_tensor(self._distances, dtype=tf.float32)
def _apply(self, x1, x2, example_ndims=1):
# transformation for a particular type of kernel
distances = self._compute_distances()
# add a constant offset kernel with trainable variance
distances += self._bias_variance * tf.ones_like(distances, dtype=tf.float32)
# get the relevant part of the matrix
x1 = tf.cast(x1, tf.int32)
x2 = tf.cast(x2, tf.int32)
n_policies = tf.shape(distances)[0]
distances = tf.reshape(distances, [-1])
return tf.squeeze(tf.gather(distances, x1*n_policies+x2), -1)
def get_lengthscale(self):
return self._lengthscale
def get_var(self):
return self._var
def get_bias_variance(self):
return self._bias_variance
def _batch_shape(self):
"""Parameter batch shape is ignored."""
return tf.TensorShape([])
def _batch_shape_tensor(self):
"""Parameter batch shape is ignored."""
return tf.convert_to_tensor(tf.TensorShape([]))
class ActionDistanceMatern12(ActionDistanceKernel):
"""Matern kernel with v=1/2."""
def __init__(self,
distances: np.ndarray,
variance: float = 2.,
lengthscale: float = 1.,
bias_variance: Optional[float] = None,
trainable: bool = True,
variance_prior: Any = None,
lengthscale_prior: Any = None,
bias_variance_prior: Any = None,
dtype: np.dtype = np.float32,
name: str = 'ActionDistanceMatern12'):
super(ActionDistanceMatern12, self).__init__(
distances=distances,
variance=variance,
lengthscale=lengthscale,
bias_variance=bias_variance,
trainable=trainable,
variance_prior=variance_prior,
lengthscale_prior=lengthscale_prior,
bias_variance_prior=bias_variance_prior,
dtype=dtype,
name=name)
def _compute_distances(self):
# transformation for Matern kernel 3/2
r = tf.divide(
tf.convert_to_tensor(self._distances, dtype=tf.float32),
self._lengthscale)
return tf.exp(-r) * self._var
# Helper functions for kernels
def select_experiment_distances(selected_policies: Dict[Text, float],
policy_keys_in_distances: Dict[Text, int],
distances: np.ndarray) -> np.ndarray:
"""Get a submatrix of distances for the selected policies."""
action_to_policy_keys = sorted(selected_policies)
indexes_in_distance = []
for action_to_policy_key in action_to_policy_keys:
indexes_in_distance.append(policy_keys_in_distances[action_to_policy_key])
return distances[indexes_in_distance, :][:, indexes_in_distance]
| active_ops-main | kernel.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian process model at discrete indices."""
from typing import Sequence, Union
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
class DistributionWrapper(object):
"""Helper class for MVNormal model with mean and stddev methods."""
def __init__(self, mean, stddev):
self._mean = mean
self._stddev = stddev
def mean(self):
return self._mean
def stddev(self):
return self._stddev
class GaussianProcess(object):
"""Gaussian process model at discrete indices."""
def __init__(self,
num_indices: int,
kernel: tfk.PositiveSemidefiniteKernel,
offset: Union[float, tf.Tensor, tf.Variable],
variance: Union[float, tf.Tensor, tf.Variable]):
"""Creates a model for a stochastic process.
Args:
num_indices: integer, the number of discrete indices.
kernel: An instance of
`tfp.positive_semidefinite_kernels.PositiveSemidefiniteKernels`. The
type of the kernel will be used to cast the inputs and outputs of the
model.
offset: Scalar, offset the observations by this amount.
variance: variance of the Gaussian observation noise.
"""
self._n_xs = num_indices
self._kernel = kernel
self._offset = offset
self._dtype = kernel.dtype
self._variance = variance
# self._xs is not supposed to change and is treated as constants.
self._xs = tf.range(self.n_xs, dtype=self._dtype)[:, None]
# These values will be updated and are treated as variables.
self._ys_num = tf.Variable(tf.zeros(self.n_xs, dtype=self._dtype),
trainable=False)
self._ys_mean = tf.Variable(tf.zeros(self.n_xs, dtype=self._dtype),
trainable=False)
self._ys_sq_mean = tf.Variable(tf.zeros(self.n_xs, dtype=self._dtype),
trainable=False)
def add(self, xs, ys):
"""Adds a batch of observations to the model.
Args:
xs: An array (or equivalent) of shape `[B, input_dim]`, where `B` is an
arbitrary batch dimension, and `input_dim` must be compatible with
the trailing dimension of the already fed in observations (if any).
ys: An array (or equivalent) of shape `[B]` or `[B, 1]`,
where `B` is an arbitrary batch dimension.
"""
xs = np.asarray(xs, self._dtype)
ys = np.asarray(ys, self._dtype)
if ys.ndim > 2 or (ys.ndim == 2 and ys.shape[1] > 1):
raise ValueError('ys must have a shape of [B] or [B, 1]')
ys = ys.ravel()
ys_num = self._ys_num.numpy()
ys_mean = self._ys_mean.numpy()
ys_sq_mean = self._ys_sq_mean.numpy()
for x, y in zip(xs, ys):
i = int(x[0])
ys_num[i] += 1.
ys_mean[i] += (y - ys_mean[i]) / ys_num[i]
ys_sq_mean[i] += (y ** 2 - ys_sq_mean[i]) / ys_num[i]
self._ys_num.assign(ys_num)
self._ys_mean.assign(ys_mean)
self._ys_sq_mean.assign(ys_sq_mean)
def index(self, index_points, latent_function: bool = False):
"""Compute the marginal posterior distribution at the given `index_points`.
Args:
index_points: A Tensor (or equivalent) of shape `[B, input_dim]`, where
`B` is an arbitrary batch dimension, and `input_dim` must be compatible
with the trailing dimension of the already fed in observations (if any).
latent_function: If True, return the distribution of the latent
function value at index points without observation noise. Otherwise,
return the distribution of noisy observations.
Returns:
An object with mean and stddev methods.
"""
_, post_mean, post_var = self._marginal_and_posterior()
index_points = tf.squeeze(tf.cast(index_points, tf.int32), axis=1)
post_mean = tf.gather(post_mean, index_points)
post_var = tf.gather(post_var, index_points)
if not latent_function:
post_var += self._variance
return DistributionWrapper(post_mean, tf.sqrt(post_var))
def loss(self):
"""The negative log probability of the observations under the GP."""
log_marg, _, _ = self._marginal_and_posterior(margin_only=True)
return -log_marg
@property
def n_xs(self):
"""Returns the number of unique indices."""
return self._n_xs
@property
def n_observations(self):
"""Returns the number of observations used by the model."""
return tf.reduce_sum(self._ys_num)
def _merge_observations(self):
"""Merge observations at the same index into a single observation."""
# Observations.
ys_mean = self._ys_mean - self._offset
ys_var = self._variance # Scalar.
ys_s = self._ys_sq_mean - tf.square(self._ys_mean) # Empirical variance.
# Filter indices without observations.
index_mask = tf.greater(self._ys_num, 0)
xs = tf.boolean_mask(self._xs, index_mask)
n_xs = tf.cast(tf.shape(xs)[0], self._dtype)
ys_mean = tf.boolean_mask(ys_mean, index_mask)
ys_s = tf.boolean_mask(ys_s, index_mask)
ys_num = tf.boolean_mask(self._ys_num, index_mask)
o_mean = ys_mean
o_var = ys_var / ys_num
# Additional likelihood term inside exp(-1/2(.)).
extra_term = -0.5 * tf.reduce_sum(ys_num / ys_var * ys_s)
# Additional likelihood term of 1/\sqrt(2\pi * var)
extra_term += -0.5 * (
tf.math.log(2.0 * np.pi) * (self.n_observations - n_xs)
+ tf.math.log(ys_var) * self.n_observations
- tf.reduce_sum(tf.math.log(o_var)))
return index_mask, xs, o_mean, o_var, extra_term
@tf.function
def _marginal_and_posterior(self, margin_only=False):
"""Compute marginal log-likelihood and posterior mean and variance."""
index_mask, xs, o_mean, o_var, extra_term = self._merge_observations()
n_xs = tf.cast(tf.shape(xs)[0], self._dtype)
log_marg = extra_term - 0.5 * tf.math.log(2.0 * np.pi) * n_xs
# K + sigma2*I or K + Sigma (with Sigma diagonal) matrix
# where X are training or inducing inputs
k_x_all = self._kernel.matrix(xs, self._xs)
k_xx = tf.boolean_mask(k_x_all, index_mask, axis=1)
k = k_xx + tf.linalg.diag(o_var)
chol = tf.linalg.cholesky(k)
# L^{-1} \mu
a = tf.linalg.triangular_solve(chol, tf.expand_dims(o_mean, 1), lower=True)
log_marg += (
-tf.reduce_sum(tf.math.log(tf.linalg.diag_part(chol)))
- 0.5 * tf.reduce_sum(tf.square(a)))
log_marg = tf.reshape(log_marg, [-1])
if margin_only:
return (log_marg,
tf.zeros((), dtype=self._dtype),
tf.zeros((), dtype=self._dtype))
# predict at the training inputs X
a2 = tf.linalg.triangular_solve(chol, k_x_all, lower=True)
# posterior variance
k_all_diag = self._kernel.apply(self._xs, self._xs)
post_var = k_all_diag - tf.reduce_sum(tf.square(a2), 0)
# posterior mean
post_mean = tf.squeeze(tf.matmul(a2, a, transpose_a=True), axis=1)
post_mean = post_mean + self._offset
return log_marg, post_mean, post_var
def sample(self):
"""Compute marginal log-likelihood and posterior mean and variance."""
index_mask, _, o_mean, o_var, _ = self._merge_observations()
# K + sigma2*I or K + Sigma (with Sigma diagonal) matrix
# where X are training or inducing inputs
k_all_all = self._kernel.matrix(self._xs, self._xs)
k_x_all = tf.boolean_mask(k_all_all, index_mask)
k_xx = tf.boolean_mask(k_x_all, index_mask, axis=1)
k = k_xx + tf.linalg.diag(o_var)
chol = tf.linalg.cholesky(k)
# L^{-1} \mu
a = tf.linalg.triangular_solve(chol, tf.expand_dims(o_mean, 1), lower=True)
# predict at the training inputs X
a2 = tf.linalg.triangular_solve(chol, k_x_all, lower=True)
# posterior mean
post_mean = tf.squeeze(tf.matmul(a2, a, transpose_a=True), axis=1)
post_mean = post_mean + self._offset
# full posterior covariance matrix.
post_var = k_all_all - tf.matmul(a2, a2, transpose_a=True)
mvn = tfd.MultivariateNormalTriL(
loc=post_mean, scale_tril=tf.linalg.cholesky(post_var))
return mvn.sample()
class GaussianProcessWithSideObs(GaussianProcess):
"""Gaussian process model at discrete indices and side observations."""
def __init__(self,
num_indices: int,
kernel: tfk.PositiveSemidefiniteKernel,
offset: Union[float, tf.Tensor, tf.Variable],
variance: Union[float, tf.Tensor, tf.Variable],
side_observations: Sequence[Sequence[float]],
side_observations_variance: Union[float, Sequence[float],
Sequence[Sequence[float]],
tf.Tensor, tf.Variable]):
"""Creates a model for a stochastic process.
Args:
num_indices: integer, the number of discrete indices.
kernel: An instance of
`tfp.positive_semidefinite_kernels.PositiveSemidefiniteKernels`. The
type of the kernel will be used to cast the inputs and outputs of the
model.
offset: Scalar, offset the observations by this amount.
variance: variance of the Gaussian observation noise.
side_observations: [num_side_observation_per_index, num_indices] array of
side observations.
side_observations_variance: side observation variances of the same shape
as side_observations or can be broadcast to the same shape.
"""
super().__init__(num_indices=num_indices,
kernel=kernel,
offset=offset,
variance=variance)
self._zs_var = side_observations_variance
# self._zs is not supposed to change and is treated as constants.
self._zs = tf.constant(side_observations, dtype=self._dtype)
if self._zs.ndim != 2:
raise ValueError('Side observation dimension must be 2.')
if self._zs.shape[1] != num_indices:
raise ValueError('Side observation dimension does not match num_indices.')
def _merge_observations(self):
"""Merge observations and side observations at the same index."""
# Observations.
ys_mean = self._ys_mean - self._offset
ys_var = self._variance # Scalar.
ys_s = self._ys_sq_mean - tf.square(self._ys_mean) # Empirical variance.
# Side observations.
zs = self._zs - self._offset
# Broadcast zs_var to have the same shape as zs.
zs_var = self._zs_var + tf.zeros_like(zs)
o_var = 1. / (tf.reduce_sum(1. / zs_var, axis=0) + self._ys_num / ys_var)
o_mean = (tf.reduce_sum(zs / zs_var, axis=0)
+ self._ys_num / ys_var * ys_mean) * o_var
# Additional likelihood term inside exp(-1/2(.)).
extra_term = -0.5 * tf.reduce_sum(
tf.reduce_sum(tf.square(zs) / zs_var, axis=0)
+ self._ys_num / ys_var * tf.square(ys_mean)
- tf.square(o_mean) / o_var
+ self._ys_num / ys_var * ys_s)
# Additional likelihood term of 1/\sqrt(2\pi * var)
extra_term += -0.5 * (
tf.math.log(2.0 * np.pi) * (
self.n_observations + (zs.shape[0] - 1) * zs.shape[1])
+ tf.reduce_sum(tf.math.log(zs_var))
+ tf.math.log(ys_var) * self.n_observations
- tf.reduce_sum(tf.math.log(o_var)))
# All the indices are returned due to the side observation.
index_mask = tf.ones(self._xs.shape[0], dtype=tf.bool)
xs = self._xs
return index_mask, xs, o_mean, o_var, extra_term
| active_ops-main | gp_models.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agents classes that implement various sampling strategies.
Implements uniform sampling of all arms (UniformAgent), and UCB algorithms.
"""
import abc
import numpy as np
import multiarm_model
class Agent(abc.ABC):
"""Agent that executes a policy that pulls the arms."""
def __init__(self, model: multiarm_model.MultiArmModel):
self._model = model
def update(self, arm: int, reward: float) -> None:
"""Update the algorithm given a pulled arm and the reward observation."""
self._model.update(arm, reward)
@abc.abstractmethod
def select_action(self):
"""Choose an arm index to pull."""
@property
def best_arm(self) -> int:
return np.argmax(self._model.mean)
class UniformAgent(Agent):
"""Agent that samples all arms uniformly."""
def select_action(self) -> int:
steps = self._model.steps
return np.random.randint(len(steps))
class UCBAgent(Agent):
"""UCB algorithm.
Selects a sample based on maximizing UCB criterion: mean + exploration_coef *
st_dev. Works with any arm model that provides mean and variance estimate.
"""
def __init__(self,
model: multiarm_model.MultiArmModel,
minimum_pulls: int = 0,
initial_rand_samples: int = 0,
exploration_coef: float = 0.0):
super().__init__(model)
self._minimum_pulls = minimum_pulls
self._initial_rand_samples = initial_rand_samples
self._exploration_coef = exploration_coef
def select_action(self) -> int:
steps = self._model.steps
# Pull a random arm if number of initial random samples is not yet reached.
if steps.sum() < self._initial_rand_samples:
return np.random.randint(len(steps))
# Pull a random arm that has fewer than the minimum number of pulls yet.
init_steps = np.nonzero(steps < self._minimum_pulls)[0]
if init_steps.size > 0:
return init_steps[np.random.randint(init_steps.size)]
# Compute UCB criterion.
scores = self._model.mean + self._exploration_coef*self._model.stddev
return np.random.choice(np.flatnonzero(scores == scores.max()))
| active_ops-main | agents.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class of bandit prolems for active policy selection."""
from typing import Dict, Sequence, Text
import numpy as np
class MAB(object):
"""Class of multi-armed bandit."""
def __init__(self,
selected_policies_fqe: Dict[Text, float]):
self._policy_name_list = sorted(selected_policies_fqe)
self.opes = np.asarray(
[selected_policies_fqe[policy] for policy in self._policy_name_list])
self.num_arms = len(selected_policies_fqe)
self._policies = None
self._rewards = None
@property
def rewards(self) -> Sequence[Sequence[float]]:
return self._rewards
def load_reward_samples(
self, reward_samples_dict: Dict[Text, np.ndarray]
) -> Dict[Text, np.ndarray]:
"""Load pre-sampled arm rewards from a dict for relevant policies.
Args:
reward_samples_dict: a dictionary that maps all policy names to rewards
Returns:
A dictionary that maps policy names to rewards for subsample of policies.
"""
for _ in self._policy_name_list:
self._rewards = [
reward_samples_dict[pi] for pi in self._policy_name_list
]
return reward_samples_dict
def pull(self, arm_index: int) -> float:
"""Pull an arm and return the reward.
Draw a sample from pre-sampled rewards.
Args:
arm_index: index of the arm to pull.
Returns:
Sampled reward of the selected arm.
"""
if arm_index < 0 or arm_index >= self.num_arms:
raise ValueError(f'arm_index ({arm_index}) is out of the range of '
f'[0, {self.num_arms-1}]')
return self._sample_arm_reward_from_samples(arm_index)
def _sample_arm_reward_from_samples(self, arm_index: int) -> float:
"""Draw a random sample from the pre-sampled rewards."""
rewards = self._rewards[arm_index]
return np.random.choice(rewards)
| active_ops-main | bandit.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that models a single arm of a bandit.
An arm object keeps track of stats obtained when an arm was pulled. This file
contains a simple arm (SingleArmModel) and Bayesian arm (SingleBayesArm) that
models independent arms.
"""
import abc
from typing import Sequence, Tuple
import numpy as np
import scipy as sp
def inv_gamma_prior_ml(var_samples: Sequence[float],
tol: float = 1e-6,
max_iters: int = 100) -> Tuple[float, float]:
"""Estimate inverse gamma prior parameters of variances with MLE.
ML2 algorithm in https://arxiv.org/abs/1605.01019.
Args:
var_samples: sequence of variance samples.
tol: tolerance of the alpha estimate.
max_iters: max number of iterations to run.
Returns:
Pair of shape parameter alpha and scale parameter beta.
"""
vs = np.asarray(var_samples)
mu = vs.mean()
v = vs.var()
inv_mean = (1. / vs).mean()
n = len(vs)
a = mu ** 2 / v + 2
c = -np.log(inv_mean * n) - np.log(vs).mean()
for _ in range(max_iters):
curr_a = a
num = c - sp.special.digamma(a) + np.log(n * a)
den = a ** 2 * (1 / a - sp.special.polygamma(1, a))
inv_a = 1. / a + num / den
a = 1. / inv_a
if np.abs(a - curr_a) < tol:
break
else:
print(
'MLE of inverse gamma prior parameters is terminated without '+
'convergence after %d iterations.', max_iters)
b = a / inv_mean
return a, b
class SingleArmModel(abc.ABC):
"""Class modelling a single arm of multi-armed bandit.
Posterior distribution of the reward mean.
Assume an uninformative prior.
"""
def __init__(self):
self._t = 0
self._mean_x = 0.
self._mean_x2 = 0.
def update(self, reward: float):
self._t += 1
self._mean_x += 1 / self._t * (reward - self._mean_x)
self._mean_x2 += 1 / self._t * (reward ** 2 - self._mean_x2)
@property
def step(self) -> int:
return self._t
@property
def mean(self) -> float:
"""Return the estimate of the mean reward."""
return self._mean_x
@property
def mean2(self) -> float:
"""Return the estimate of the mean squared reward."""
return self._mean_x2
@property
def mean_without_prior(self) -> float:
"""Return the estimate of the mean reward ignoring the prior if exists."""
return self._mean_x
@property
def sum2(self) -> float:
return self._mean_x2 * self._t
@property
def stddev(self) -> float:
"""Return the estimation stddev of the mean reward."""
if self._t <= 0:
raise ValueError('Cannot compute the stddev if the number of pulls '
f'({self._t}) <= 0')
var_x = self._mean_x2 - self._mean_x**2
return np.sqrt(var_x / self._t)
def sample(self) -> float:
"""Return a sample of the mean reward from the posterior."""
return self.mean + self.stddev * np.random.randn()
class SingleBayesArm(SingleArmModel):
"""Estimate the posterior of the reward mean and variance parameters.
The hierarchical Bayesian model for the reward is as follows:
r ~ Norm(mu, sigma^2)
mu ~ Norm(m, s^2)
sigma^2 ~ IG(alpha, beta)
where mu and sigma^2 are the mean and variance of the Gaussian distribution
for reward r. m and s^2 are the mean and variance of the prior Gaussian
distribution of mu. alpha and beta are the alpha and beta parameters of the
prior inverse Gamma distribution of sigma^2.
When new observations are added, we update the estimate of reward mean mu and
variance sigma^2. We either sample sigma^2 with Gibbs sampling for a few steps
or compute the MAP of the joint posterior with coordinate ascend. Given
samples or point estimate of sigma^2, we estimate the conditoinal mean and
variance of mu with
mu ~ ensemble of P(mu|sigma_sample) or mu ~ P(mu|sigma^2 MAP)
"""
def __init__(self, prior_mean: float, prior_std: float, alpha: float,
beta: float, sample: bool, steps: int = 10, burnin: int = 0):
"""Initialize the Bayesian model for a single arm.
Args:
prior_mean: mean parameter for the prior of variable mu.
prior_std: prior std parameter for the prior of variable mu.
alpha: alpha parameter for the prior of variable mu.
beta: beta parameter for the prior of variable mu.
sample: sample sigma^2 or estimate a point estimate from the joint MAP.
steps: if `sample` is True, it is the number of samples to keep from Gibbs
sampling after burnin periord. Otherwise, it is the number of coordinate
ascend steps after burn in.
burnin: burn-in period in sampling or optimization of sigma^2.
"""
super().__init__()
self._m = prior_mean
self._s = prior_std
self._a = alpha
self._b = beta
self._sample = sample # Sample sigma2 from posterior or compute the MAP.
self._steps = steps
self._burnin = burnin
# Initialize mu and sigma**2 from the prior mode for sampling or
# optimization.
self._mu = self._m
self._sigma2 = self._b / (self._a + 1)
# Maintaining the posterior mean and atd of mu at step t and a list of
# samples of sigma2.
self._m_t = self._m
self._s_t = self._s
if self._sample:
self._sigma2_samples = 1. / np.random.gamma(
shape=self._a, scale=1 / self._b, size=self._steps)
else:
# Single sample at the prior mode.
self._sigma2_samples = np.array([self._b / (self._a + 1)])
self._to_update_pos = False # Requires updating posterior at next call.
def update(self, reward: float):
super().update(reward)
self._to_update_pos = True
def _mu_cond_on_sigma2(self, sigma2):
"""Return conditional mean and variance of mu given sigma2."""
m = self._m
s = self._s
t = self._t
mx = self._mean_x
ratio = sigma2 / s ** 2
m_t = (t * mx + ratio * m) / (t + ratio)
s2_t = sigma2 / (t + ratio)
return m_t, s2_t
def _sigma2_cond_on_mu(self, mu):
"""Return conditional mean and variance of sigma2 given mu."""
a = self._a
b = self._b
t = self._t
mx = self._mean_x
mx2 = self._mean_x2
varx = mx2 - mx ** 2 # Sample variance.
a_t = a + 0.5 * t
b_t = b + 0.5 * ((mx - mu) ** 2 + varx)
return a_t, b_t
def _sample_sigma2(self):
"""Obtain samples or MAP of sigma2."""
mu = self._mu
sigma2 = self._sigma2
for i in range(self._steps + self._burnin):
# Sample or optimize sigma2 given mu.
a_t, b_t = self._sigma2_cond_on_mu(mu)
if self._sample:
sigma2 = 1. / np.random.gamma(shape=a_t, scale=1 / b_t)
else:
sigma2 = b_t / (a_t + 1)
# Sample or optimize mu given sigma2.
m_t, s2_t = self._mu_cond_on_sigma2(sigma2)
if self._sample:
mu = m_t + np.sqrt(s2_t) * np.random.randn()
else:
mu = m_t
if self._sample and i >= self._burnin:
self._sigma2_samples[i - self._burnin] = sigma2
if not self._sample:
self._sigma2_samples[0] = sigma2
self._mu = mu
self._sigma2 = sigma2
return self._sigma2_samples
def _update_posterior(self):
"""Sample or optimize sigma2 and update the posterior of mu."""
if not self._to_update_pos:
return
sigma2_samples = self._sample_sigma2()
ms = np.zeros(len(sigma2_samples))
s2s = np.zeros(len(sigma2_samples))
for i, sigma2 in enumerate(sigma2_samples):
ms[i], s2s[i] = self._mu_cond_on_sigma2(sigma2)
pos_mean = ms.mean()
pos_mean_sq = (ms**2 + s2s).mean()
pos_std = np.sqrt(pos_mean_sq - pos_mean**2)
self._m_t = pos_mean
self._s_t = pos_std
self._to_update_pos = False
@property
def mean(self) -> float:
"""Return the estimate of the mean reward."""
self._update_posterior()
return self._m_t
@property
def stddev(self) -> float:
"""Return the estimation stddev of the mean reward."""
self._update_posterior()
return self._s_t
def sample(self) -> float:
"""Return a sample of the mean reward from the posterior."""
# Sample a sigma2.
sigma2 = self._sigma2_samples[np.random.randint(len(self._sigma2_samples))]
# Sample mu conditioned on sigma2.
m_t, s2_t = self._mu_cond_on_sigma2(sigma2)
return m_t + np.sqrt(s2_t) * np.random.randn()
| active_ops-main | arm_model.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that represents a multiarm model.
Classes inherit from MultiArmModel which maintains lists of stats of single
arms. IndependentMultiArmModel treats all arms independently.
"""
import abc
from typing import Any, Dict, Optional, Sequence, Text, Type, Union
import numpy as np
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import arm_model
import gp_models
tfk = tfp.math.psd_kernels
class MultiArmModel(abc.ABC):
"""Posterior distribution of the reward mean of multiple arms."""
def __init__(self, num_arms: int):
self._num_arms = num_arms
@property
def num_arms(self) -> int:
return self._num_arms
@property
@abc.abstractmethod
def steps(self):
"""Return an array of the steps of each arm."""
@property
@abc.abstractmethod
def mean(self):
"""Return an array of the estimate of the mean rewards."""
@property
@abc.abstractmethod
def stddev(self):
"""Return an array of the estimation stddev of the mean rewards."""
@abc.abstractmethod
def update(self, arm: int, reward: float):
"""Update the model given a pulled arm and the reward observation."""
def sample(self):
"""Return an array of samples of the mean rewards from the posterior."""
return self.mean + self.stddev * np.random.randn(self._num_arms)
class IndependentMultiArmModel(MultiArmModel):
"""Posterior distribution of the reward mean of multiple arms."""
def __init__(self,
num_arms: int,
arm_class: Type[arm_model.SingleArmModel],
arm_args: Optional[Sequence[Any]] = None,
arm_kwargs: Optional[Sequence[Dict[Text, Any]]] = None):
super().__init__(num_arms)
self._arms = []
if arm_args is None:
arm_args = [[] for _ in range(num_arms)]
if arm_kwargs is None:
arm_kwargs = [{} for _ in range(num_arms)]
for i in range(num_arms):
self._arms.append(arm_class(*arm_args[i], **arm_kwargs[i]))
@property
def steps(self) -> np.ndarray:
return np.array([arm.step for arm in self._arms])
def update(self, arm: int, reward: float):
"""Update the model given a pulled arm and the reward observation."""
self._arms[arm].update(reward)
@property
def mean(self) -> np.ndarray:
"""Return an array of the estimate of the mean rewards."""
return np.array([arm.mean for arm in self._arms])
@property
def mean_without_prior(self) -> np.ndarray:
"""Return an array of the estimate of the mean rewards."""
return np.array([arm.mean_without_prior for arm in self._arms])
@property
def mean2(self) -> np.ndarray:
"""Return an array of the estimate of the mean squared rewards."""
return np.array([arm.mean2 for arm in self._arms])
@property
def sum2(self):
"""Return an array of the estimate of the sum of squared rewards."""
return np.array([arm.sum2 for arm in self._arms])
@property
def stddev(self) -> np.ndarray:
"""Return an array of the estimation stddev of the mean rewards."""
return np.array([arm.stddev for arm in self._arms])
def sample(self) -> np.ndarray:
"""Return an array of samples of the mean rewards from the posterior."""
return np.array([arm.sample() for arm in self._arms])
class MVNormalBase(MultiArmModel):
"""Base multivariate normal model for a fixed set of arms.
Implemented by a Gaussian process indexed at 1-D integer locations.
Please note that the GP model and its hyper-parameters are updated lazily,
which means every time a new observation is added with `update`, it
delays the update of the posterior mean, stddev and its hyper-parameters
optimization until next time a public method is called. This is useful to
avoid repeated computation of the posterior when adding multiple observations.
But it also means that the hyper-parameters optimization is run only once in
this case.
"""
def __init__(self,
num_arms: int,
kernel: tfk.PositiveSemidefiniteKernel,
gp: gp_models.GaussianProcess,
optimizer_config: Optional[Dict[Text, Any]] = None,
dtype: np.dtype = np.float32):
super().__init__(num_arms)
self._kernel = kernel
self._gp = gp
self._dtype = dtype
self._arm_xs = tf.range(num_arms, dtype=dtype)[:, None]
self._make_optimizer(optimizer_config)
self._steps = np.zeros(num_arms)
# Initialize self._mean and self._stddev.
self._get_mean_stddev()
self._to_update = False
def _make_optimizer(self, optimizer_config):
"""Make the optimizer for GP hyper-parameters."""
self._optimizer_config = optimizer_config
if optimizer_config is not None:
required_configs = [
'optimizer_name', 'learning_rate', 'steps_per_update']
for key in required_configs:
if key not in optimizer_config:
raise ValueError(f'{key} is required in optimizer_config')
self._optimizer = getattr(
snt.optimizers, optimizer_config['optimizer_name'])(
learning_rate=optimizer_config['learning_rate'],
**optimizer_config.get('kwargs', {}))
else:
self._optimizer = None
@property
@abc.abstractmethod
def trainable_variables(self):
"""Trainable variables."""
@property
def steps(self) -> np.ndarray:
return self._steps
@property
def mean(self):
"""Return an array of the estimate of the mean rewards."""
self._update_gp()
return self._mean # pytype: disable=attribute-error # bind-properties
@property
def stddev(self):
"""Return an array of the estimation stddev of the mean rewards."""
self._update_gp()
return self._stddev # pytype: disable=attribute-error # bind-properties
def update(self, arm: int, reward: float):
"""Update the model given a pulled arm and the reward observation."""
self._steps[arm] += 1
x = tf.constant(np.array([[arm]], dtype=self._dtype))
y = tf.constant(np.array([reward], dtype=self._dtype))
self._gp.add(x, y)
self._to_update = True # Mark the model to be updated later.
def sample(self, joint=False):
"""Sample a function from the posterior of GP."""
self._update_gp()
if joint:
# Sample from the joint distributions over all arms.
one_sample = self._gp.sample().numpy()
else:
# Sample from the marginal distributions, independent among arms.
distr = self._gp.index(self._arm_xs, latent_function=True)
one_sample = (distr.mean().numpy() +
distr.stddev().numpy() * np.random.randn(self._num_arms))
return one_sample
def _update_gp(self):
loss_dict = {}
if self._to_update:
if self._optimizer is not None:
loss_dict = self._optimize(self._optimizer_config['steps_per_update'])
self._get_mean_stddev()
self._to_update = False
return loss_dict
@tf.function
def _optimize(self, steps):
"""Optimizer hyper-parameters."""
model_loss = tf.constant([0.])
regularization_loss = tf.constant([0.])
for _ in tf.range(steps):
with tf.GradientTape() as tape:
model_loss = self._gp.loss()
loss = model_loss
regularization_loss = tf.reshape(self._regularization_loss(), [-1])
loss += regularization_loss
gradients = tape.gradient(loss, self.trainable_variables)
self._optimizer.apply(gradients, self.trainable_variables)
return {
'model_loss': model_loss,
'regularization_loss': regularization_loss
}
@abc.abstractmethod
def _regularization_loss(self):
"""Regularization loss for trainable variables."""
def _get_mean_stddev(self):
"""Compute and update posterior mean and stddev to self._mean/_stddev."""
distr = self._gp.index(self._arm_xs, latent_function=True)
self._mean = distr.mean().numpy()
self._stddev = distr.stddev().numpy()
class MVNormal(MVNormalBase):
"""Multivariate normal model for a fixed set of arms.
Offset can be a vector of values indexed by the arm.
"""
def __init__(self,
num_arms: int,
kernel: tfk.PositiveSemidefiniteKernel,
observation_noise_variance: float = 1.0,
observation_noise_variance_prior: Any = None,
optimizer_config: Optional[Dict[Text, Any]] = None,
offset: Optional[float] = None,
dtype: np.dtype = np.float32):
"""Initialize the multi-variate normal model.
Args:
num_arms: number of arms / discrete indices.
kernel: GP kernel.
observation_noise_variance: initial guess of the variance of the
observation noise.
observation_noise_variance_prior: configuration dict to specify the
inverse Gamma prior for the variance if provided. It should include
three keys: 'use_prior' (boolean, whether to use prior), 'alpha' and
'beta' (prior parameters in floats).
optimizer_config: configuration dict to specify the sonnet optimizer for
GP hyper-parameter optimization. It must includes the following keys:
'optimizer_name', 'learning_rate', 'steps_per_update', and optionally
other parameters to initialize the optimizer.
offset: initial guess of the trainable scalar mean hyper-parameter if
provided. Otherwise, it is assumed to be fixed at zero.
dtype: float type of the model.
"""
# Define the GP.
if offset is None:
self._offset = tf.Variable(0., dtype=dtype, trainable=False,
name='offset')
else:
self._offset = tf.Variable(offset, dtype=dtype, trainable=True,
name='offset')
self._log_obs_var = tf.Variable(
np.log(observation_noise_variance), dtype=dtype, trainable=True,
name='log_obs_var')
# Add constant to avoid obs_var becoming too small and thus kernel not
# invertible once the observations start repeating
# self._obs_var = tfp.util.DeferredTensor(
# self._log_obs_var, lambda x: tf.math.exp(x) + 1.0)
# Or use this line if do not need to restrict obs_var
self._obs_var = tfp.util.DeferredTensor(self._log_obs_var, tf.math.exp)
self._observation_noise_variance_prior = observation_noise_variance_prior
gp = gp_models.GaussianProcess(
num_indices=num_arms,
kernel=kernel,
offset=self._offset,
variance=self._obs_var)
super().__init__(num_arms=num_arms,
kernel=kernel,
gp=gp,
optimizer_config=optimizer_config,
dtype=dtype)
@property
def trainable_variables(self):
possible_trainable_variables = (
self._offset, self._log_obs_var) + self._kernel.trainable_variables
return tuple(x for x in possible_trainable_variables
if x is not None and x.trainable)
def _regularization_loss(self):
# Loss for offset.
loss_offset = 0.
# Loss for observation noise variance: inverse Gamma distribution.
prior = self._observation_noise_variance_prior
if self._log_obs_var.trainable and prior is not None and prior['use_prior']:
inv_gamma = tfp.distributions.InverseGamma(prior['alpha'], prior['beta'])
loss_obs_var = -inv_gamma.log_prob(self._obs_var)
else:
loss_obs_var = 0.
# Loss for kernel variables.
if hasattr(self._kernel, 'regularization_loss'):
loss_kernel = self._kernel.regularization_loss()
else:
loss_kernel = 0.
return loss_offset + loss_obs_var + loss_kernel
def get_observation_noise_variance(self):
return self._obs_var.numpy()
def get_offset(self):
return self._offset.numpy()
class MVNormalWithSideObs(MVNormalBase):
"""Multivariate normal model for a fixed set of arms and side observations."""
def __init__(
self,
num_arms: int,
kernel: tfk.PositiveSemidefiniteKernel,
side_observations: Union[Sequence[float], Sequence[Sequence[float]]],
observation_noise_variance: float = 1.0,
observation_noise_variance_prior: Any = None,
tie_side_observations_variance_with_main_observations: bool = False,
tie_side_observations_variance_along_sources: bool = False,
side_observations_variance: Union[float, Sequence[float]] = 1.0,
side_observations_variance_prior: Any = None,
side_observations_variance_trainable: bool = False,
optimizer_config: Optional[Dict[Text, Any]] = None,
offset: Optional[float] = None,
dtype: np.dtype = np.float32):
"""Initialize the multi-variate normal model with side observations.
The side observation may come from one or multiple sources, e.g. different
OPE estimates.
Args:
num_arms: number of arms / discrete indices.
kernel: GP kernel.
side_observations: array of `num_arms` side observations, or a 2D array
of side observations from multiple sources in shape
(`num_sources`, `num_arms`).
observation_noise_variance: initial guess of the variance of the
observation noise.
observation_noise_variance_prior: configuration dict to specify the
inverse Gamma prior for the variance if provided. It should include
three keys: 'use_prior' (boolean, whether to use prior), 'alpha' and
tie_side_observations_variance_with_main_observations: whether to share
the variance parameter of all side observations with the variance of
the main observations. If true, the following options related to side
observation variance will be ignored.
tie_side_observations_variance_along_sources: whether to share the
variance parameter of the side observation among all the sources.
side_observations_variance: value (or initial guess if trainable) of the
side observation variance, or one value per source if provided as an
array.
side_observations_variance_prior: configuration dict to specify the
inverse Gamma prior for the variance of side observations if provided.
side_observations_variance_trainable: whether to train the side
observation variance parameters.
optimizer_config: configuration dict to specify the sonnet optimizer for
GP hyper-parameter optimization. It must includes the following keys:
'optimizer_name', 'learning_rate', 'steps_per_update', and optionally
other parameters to initialize the optimizer.
offset: initial guess of the trainable scalar mean hyper-parameter if
provided. Otherwise, it is assumed to be fixed at zero.
dtype: float type of the model.
"""
# Define the GP.
if offset is None:
self._offset = tf.Variable(0., dtype=dtype, trainable=False,
name='offset')
else:
self._offset = tf.Variable(offset, dtype=dtype, trainable=True,
name='offset')
self._log_obs_var = tf.Variable(
np.log(observation_noise_variance), dtype=dtype, trainable=True,
name='log_obs_var')
self._obs_var = tfp.util.DeferredTensor(self._log_obs_var, tf.math.exp)
self._observation_noise_variance_prior = observation_noise_variance_prior
# Make sure side_obs has 2 dimensions.
side_obs = np.asarray(side_observations)
if side_obs.ndim == 1:
side_obs = side_obs[None, :]
if tie_side_observations_variance_with_main_observations:
# Tie the side observation variance with the main observation variance.
# Other side observation varaince related arguments are ignored.
self._log_side_obs_var = None
self._side_obs_var = self._obs_var
else:
side_obs_var = np.asarray(side_observations_variance)
if (side_obs_var.ndim > 1 or
(side_obs_var.ndim == 1 and side_obs_var.size != side_obs.shape[0])):
raise ValueError('side_observations_variance should be either a scalar '
'or a sequence with the same length as the number of '
'sources.')
if side_observations_variance_trainable:
# If side_observations_variance is a scalar and
# tie_side_observations_variance_along_sources if False, make a vector
# of trainable variance, one per side observation source.
if (side_obs_var.size == 1 and
not tie_side_observations_variance_along_sources):
side_obs_var = side_obs_var.ravel()[0] * np.ones(
(side_obs.shape[0], 1))
self._log_side_obs_var = tf.Variable(
np.log(side_obs_var), dtype=dtype, trainable=True,
name='log_side_obs_var')
self._side_obs_var = tfp.util.DeferredTensor(self._log_side_obs_var,
tf.math.exp)
else:
self._log_side_obs_var = None
self._side_obs_var = side_obs_var
self._side_observations_variance_prior = side_observations_variance_prior
gp = gp_models.GaussianProcessWithSideObs(
num_indices=num_arms,
kernel=kernel,
offset=self._offset,
variance=self._obs_var,
side_observations=side_obs,
side_observations_variance=self._side_obs_var)
super().__init__(num_arms=num_arms,
kernel=kernel,
gp=gp,
optimizer_config=optimizer_config,
dtype=dtype)
# Mark it as to be updated because we add side observations.
self._to_update = True
@property
def trainable_variables(self):
possible_trainable_variables = (
self._offset,
self._log_obs_var,
self._log_side_obs_var) + self._kernel.trainable_variables
return tuple(x for x in possible_trainable_variables
if x is not None and x.trainable)
def _regularization_loss(self):
# Loss for offset.
loss_offset = 0.
# Loss for observation noise variance: inverse Gamma distribution.
prior = self._observation_noise_variance_prior
if self._log_obs_var.trainable and prior is not None and prior['use_prior']:
inv_gamma = tfp.distributions.InverseGamma(prior['alpha'], prior['beta'])
loss_obs_var = -inv_gamma.log_prob(self._obs_var)
else:
loss_obs_var = 0.
# Loss for side observation noise variance: inverse Gamma distribution.
prior = self._side_observations_variance_prior
if (self._log_side_obs_var is not None and
prior is not None and prior['use_prior']):
inv_gamma = tfp.distributions.InverseGamma(prior['alpha'], prior['beta'])
loss_side_obs_var = -tf.reduce_sum(inv_gamma.log_prob(self._side_obs_var))
else:
loss_side_obs_var = 0.
# Loss for kernel variables.
if hasattr(self._kernel, 'regularization_loss'):
loss_kernel = self._kernel.regularization_loss()
else:
loss_kernel = 0.
return loss_offset + loss_obs_var + loss_side_obs_var + loss_kernel
def get_observation_noise_variance(self):
return self._obs_var.numpy()
def get_side_observation_noise_variance(self):
return self._side_obs_var.numpy()
def get_offset(self):
return self._offset.numpy()
| active_ops-main | multiarm_model.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_robotics-main | __init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a requirements.txt file with the artifacts in ../dist/.
This ensures tox/pip will test with these, rather than with some from
pypi. This currently assumes that ../dist only contains one version of
each dm_robotics library.
"""
import glob
import os
import pathlib
_CURRENT_FILE_DIR = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
if __name__ == '__main__':
with open(_CURRENT_FILE_DIR / 'requirements.txt', 'w') as f:
for artifact in glob.glob('../dist/*'):
f.write(artifact + os.linesep)
| dm_robotics-main | py/integration_test/generate_requirements_txt.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformations."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.transformations import transformations
import numpy as np
_NUM_RANDOM_SAMPLES = 1000
def _vel_transform_kdl(ht, t):
# Sample implementation of velocity transform, from KDL source.
r = ht[0:3, 0:3]
p = ht[0:3, 3]
tlin = t[0:3]
tang = t[3:6]
out_ang = r.dot(tang)
out_lin = r.dot(tlin) + np.cross(p, out_ang)
return np.concatenate([out_lin, out_ang])
def _force_transform_kdl(ht, w):
# Sample implementation of force transform, from KDL source.
r = ht[0:3, 0:3]
p = ht[0:3, 3]
f = w[0:3]
t = w[3:6]
out_lin = r.dot(f)
out_ang = np.cross(p, out_lin) + r.dot(t)
return np.concatenate([out_lin, out_ang])
class TransformationsTest(parameterized.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._random_state = np.random.RandomState()
@parameterized.parameters(
{'cfg': (1, 2, 3, np.radians(45), np.radians(45), np.radians(45))},
{'cfg': (0, 0, 0, np.radians(45), np.radians(45), np.radians(45))}
)
def test_homogeneous_matrix_construction(self, cfg):
x, y, z, rx, ry, rz = cfg
ht = transformations.poseuler_to_hmat(
np.array([x, y, z, rx, ry, rz]), 'ZYZ')
rotx = transformations.rotation_z_axis(rx, True)
roty = transformations.rotation_y_axis(ry, True)
rotz = transformations.rotation_z_axis(rz, True)
ht_target = transformations.pos_to_hmat(
np.array([x, y, z])).dot(rotx).dot(roty).dot(rotz)
np.testing.assert_allclose(ht, ht_target)
@parameterized.parameters(
{'a': [1, 2, 3], 'b': [0, 1, 0]},
{'a': [0, 1, 2], 'b': [-2, 1, 0]}
)
def test_cross_product(self, a, b):
npver = np.cross(a, b)
matver = transformations.cross_mat_from_vec3(a).dot(b)
np.testing.assert_allclose(npver, matver)
@parameterized.parameters(
{
'quat': [-0.41473841, 0.59483601, -0.45089078, 0.52044181],
'truemat':
np.array([[0.05167565, -0.10471773, 0.99315851],
[-0.96810656, -0.24937912, 0.02407785],
[0.24515162, -0.96272751, -0.11426475]])
},
{
'quat': [0.08769298, 0.69897558, 0.02516888, 0.7093022],
'truemat':
np.array([[-0.00748615, -0.08921678, 0.9959841],
[0.15958651, -0.98335294, -0.08688582],
[0.98715556, 0.15829519, 0.02159933]])
},
{
'quat': [0.58847272, 0.44682507, 0.51443343, -0.43520737],
'truemat':
np.array([[0.09190557, 0.97193884, 0.21653695],
[-0.05249182, 0.22188379, -0.97365918],
[-0.99438321, 0.07811829, 0.07141119]])
},
)
def test_quat_to_mat(self, quat, truemat):
"""Tests hard-coded quat-mat pairs generated from mujoco if mj not avail."""
mat = transformations.quat_to_mat(quat)
np.testing.assert_allclose(mat[0:3, 0:3], truemat, atol=1e-7)
@parameterized.parameters(
{
'pos': [0.34243, -0.8763, 0.01273],
'quat': [-0.41473841, 0.59483601, -0.45089078, 0.52044181],
'hmat':
np.array([[0.05167565, -0.10471773, 0.99315851, 0.34243],
[-0.96810656, -0.24937912, 0.02407785, -0.8763],
[0.24515162, -0.96272751, -0.114264750, 0.01273],
[0.0, 0.0, 0.0, 1.0]])
},
{
'pos': [1.693, 0.9734, -2.7178],
'quat': [0.08769298, 0.69897558, 0.02516888, 0.7093022],
'hmat':
np.array([[-0.00748615, -0.08921678, 0.9959841, 1.693],
[0.15958651, -0.98335294, -0.08688582, 0.9734],
[0.98715556, 0.15829519, 0.02159933, -2.7178],
[0.0, 0.0, 0.0, 1.0]])
},
{
'pos': [-0.7298, -0.1995, 0.3666],
'quat': [0.58847272, 0.44682507, 0.51443343, -0.43520737],
'hmat':
np.array([[0.09190557, 0.97193884, 0.21653695, -0.7298],
[-0.05249182, 0.22188379, -0.97365918, -0.1995],
[-0.99438321, 0.07811829, 0.07141119, 0.3666],
[0.0, 0.0, 0.0, 1.0]])
},
)
def test_pos_quat_to_hmat_and_inverse(self, pos, quat, hmat):
"""Tests hard-coded pos-quat-hmat triples if mj not avail."""
mat = transformations.pos_quat_to_hmat(pos, quat)
np.testing.assert_allclose(mat, hmat, atol=1e-7)
np.testing.assert_allclose(mat[3], [0, 0, 0, 1], atol=1e-7)
# Test inverse
new_pos, new_quat = transformations.hmat_to_pos_quat(hmat)
np.testing.assert_allclose(new_pos, pos, atol=1e-7)
self.assertTrue(
np.allclose(new_quat, quat, atol=1e-7) or
np.allclose(-new_quat, quat, atol=1e-7))
@parameterized.parameters(
{
'twist': [1, 0, 0, 0, 0, 0],
'cfg': (0, 0, 0, np.radians(0), np.radians(90), np.radians(0))
}, {
'twist': [1, 2, 3, -3, 2, -1],
'cfg': (-1, 2, 3, np.radians(30), np.radians(60), np.radians(90))
})
def test_velocity_transform_special(self, twist, cfg):
# Test for special values that often cause numerical issues.
x, y, z, rx, ry, rz = cfg
ht = transformations.poseuler_to_hmat(np.array([x, y, z, rx, ry, rz]),
'ZYZ')
tv = transformations.velocity_transform(ht)
tt = tv.dot(twist)
v2kdl = _vel_transform_kdl(ht, twist)
np.testing.assert_allclose(tt, v2kdl)
@parameterized.parameters(
{'wrench': [1, 0, 0, 0, 0, 0],
'cfg': (0, 0, 0, np.radians(0), np.radians(90), np.radians(0))},
{'wrench': [1, 2, 3, -3, 2, -1],
'cfg': (-1, 2, 3, np.radians(30), np.radians(60), np.radians(90))}
)
def test_force_transform_special(self, wrench, cfg):
# Test for special values that often cause numerical issues.
x, y, z, rx, ry, rz = cfg
ht = transformations.poseuler_to_hmat(
np.array([x, y, z, rx, ry, rz]), 'XYZ')
tw = transformations.force_transform(ht)
wt = tw.dot(wrench)
w2kdl = _force_transform_kdl(ht, wrench)
np.testing.assert_allclose(wt, w2kdl)
@parameterized.parameters(
{'state': [0, 0, 0]},
{'state': [1.0, 2.0, np.radians(60)]}
)
def test_homogeneous_conversion_2d_special(self, state):
# Test for special values that often cause numerical issues.
x = np.array(state)
ht = transformations.postheta_to_matrix_2d(x)
x2 = transformations.matrix_to_postheta_2d(ht)
np.testing.assert_allclose(x, x2)
@parameterized.parameters(
{'cfg': (0, 0, 0), 'vel': [0, 0, 0]},
{'cfg': (1, -0.5, np.radians(-30)), 'vel': [-1, 1.5, 0.5]}
)
def test_velocity_transform_2d_special(self, cfg, vel):
# Test for special values that often cause numerical issues.
x, y, theta = cfg
ht = transformations.postheta_to_matrix_2d(np.array([x, y, theta]))
v_conv = transformations.velocity_transform_2d(ht, vel)
v_oper = transformations.velocity_transform_2d(ht).dot(vel)
np.testing.assert_allclose(v_conv, v_oper)
@parameterized.parameters(
{'cfg': (0, 0, 0), 'force': [0, 0, 0]},
{'cfg': (1, -0.5, np.radians(-30)), 'force': [-1, 2, 3.22]}
)
def test_force_transform_2d_special(self, cfg, force):
# Test for special values that often cause numerical issues.
x, y, theta = cfg
ht = transformations.postheta_to_matrix_2d(np.array([x, y, theta]))
ft_conv = transformations.force_transform_2d(ht, force)
ft_oper = transformations.force_transform_2d(ht).dot(force)
np.testing.assert_allclose(ft_conv, ft_oper)
@parameterized.parameters(
{'angles': (0, 0, 0)},
{'angles': (-0.1, 0.4, -1.3)}
)
def test_euler_to_rmat_special(self, angles):
# Test for special values that often cause numerical issues.
r1, r2, r3 = angles
orderings = ('XYZ', 'XYX', 'XZY', 'ZYX', 'YZX', 'ZXY', 'YXZ', 'XZX', 'YXY',
'YZY', 'ZXZ', 'ZYZ')
for ordering in orderings:
r = transformations.euler_to_rmat(np.array([r1, r2, r3]), ordering)
euler_angles = transformations.rmat_to_euler(r, ordering)
np.testing.assert_allclose(euler_angles, [r1, r2, r3])
@parameterized.parameters(
{'rot': (np.pi, 0, 0)},
{'rot': (0, 0, 0)},
{'rot': (np.radians(10), np.radians(-30), np.radians(45))},
{'rot': (np.radians(45), np.radians(45), np.radians(45))},
{'rot': (0, np.pi, 0)},
{'rot': (0, 0, np.pi)},
)
def test_rmat_axis_angle_conversion_special(self, rot):
# Test for special values that often cause numerical issues.
forward = transformations.euler_to_rmat(np.array(rot), ordering='ZYZ')
w = transformations.rmat_to_axisangle(forward)
backward = transformations.axisangle_to_rmat(w)
np.testing.assert_allclose(forward, backward)
@parameterized.parameters(
{'rot6': np.zeros(6)},
{'rot6': np.ones(6)},
{'rot6': np.ones(6) * 1e-8},
{'rot6': np.array([1., 2., 3., 0., 0., 0.])},
{'rot6': np.array([0., 0., 0., 1., 2., 3.])},
{'rot6': np.array([1., 2., 3., 4., 5., 6.])},
{'rot6': np.array([1., 2., 3., 4., 5., 6.]) * -1},
)
def test_rot6_to_rmat(self, rot6):
# Test that rot6 converts to valid rotations for arbitrary inputs.
rmat = transformations.rot6_to_rmat(np.array(rot6))
should_be_identity = rmat.T @ rmat
np.testing.assert_allclose(should_be_identity, np.eye(3), atol=1e-15)
@parameterized.parameters(
{'euler': (np.pi, 0, 0)},
{'euler': (0, 0, 0)},
{'euler': (np.radians(10), np.radians(-30), np.radians(45))},
{'euler': (np.radians(45), np.radians(45), np.radians(45))},
{'euler': (0, np.pi, 0)},
{'euler': (0, 0, np.pi)},
)
def test_rmat_rot6_conversion_special(self, euler):
# Test for special values that often cause numerical issues.
rmat = transformations.euler_to_rmat(np.array(euler), ordering='ZYZ')
rot6 = transformations.rmat_to_rot6(rmat)
recovered_rmat = transformations.rot6_to_rmat(rot6)
np.testing.assert_allclose(rmat, recovered_rmat)
def test_rmat_rot6_conversion_random(self):
# Tests cycle-consistency for a set of random valid orientations.
for _ in range(_NUM_RANDOM_SAMPLES):
quat = self._random_quaternion()
original_rmat = transformations.quat_to_mat(quat)[:3, :3]
gs = transformations.rmat_to_rot6(original_rmat)
recovered_rmat = transformations.rot6_to_rmat(gs)
np.testing.assert_allclose(original_rmat, recovered_rmat)
@parameterized.parameters(
{'pos': (0, 0, 0), 'rot': (0, 0, 0)},
{'pos': (1, 2, 3), 'rot': (0, 0, 0)},
{'pos': (1, 2, 3), 'rot': (np.pi, 0., 0.)},
{'pos': (1, 2, 3),
'rot': (np.radians(30), np.radians(45), np.radians(60))}
)
def test_hmat_twist_conversion(self, pos, rot):
x, y, z = pos
r1, r2, r3 = rot
ordering = 'XYZ'
poseuler = np.array([x, y, z, r1, r2, r3])
ht = transformations.poseuler_to_hmat(
np.array([x, y, z, r1, r2, r3]), ordering)
xi = transformations.hmat_to_twist(ht)
ht2 = transformations.twist_to_hmat(xi)
poseuler2 = transformations.hmat_to_poseuler(ht2, ordering)
np.testing.assert_allclose(ht, ht2)
np.testing.assert_allclose(poseuler, poseuler2)
@parameterized.parameters(
{'pos': (0.1, 0.2, 0.3), 'quat': (1., 0., 0., 0.)},
{'pos': (0.1, 0.2, 0.3), 'quat': (0., 1., 0., 0.)},
{'pos': (0.1, 0.2, 0.3), 'quat': (0., 0., 1., 0.)},
{'pos': (0.1, 0.2, 0.3), 'quat': (0., 0., 0., 1.)},
{'pos': (0.1, 0.2, 0.3), 'quat': (0.5, 0.5, 0.5, 0.5)},
)
def test_hmat_twist_conversion_from_quat(self, pos, quat):
ht = transformations.quat_to_mat(quat)
ht[0:3, 3] = pos
xi = transformations.hmat_to_twist(ht)
ht2 = transformations.twist_to_hmat(xi)
quat2 = transformations.mat_to_quat(ht2)
pos2 = ht2[:3, 3]
np.testing.assert_allclose(ht, ht2, atol=1e-7) # needed to drop prec.
np.testing.assert_allclose(pos, pos2)
self.assertTrue(np.allclose(quat, quat2) or np.allclose(quat, -quat2))
@parameterized.parameters(
{'pos': (0, 0, 0), 'rot': (0, 0, 0)},
{'pos': (1, 2, 3), 'rot': (0, 0, 0)},
{'pos': (-1, -2, -3),
'rot': (np.radians(30), np.radians(-45), np.radians(60))}
)
def test_se3_integration(self, pos, rot):
# Tests whether sucessive applications of a homogeneous transform
# is equivalent to scaling the magnitude of the exponential coordinate
# representation of that transform.
# This is a useful result which illustrates that the twist parameterizes
# the 6D manifold of the rotation H compactly, and can be used to
# generalize or interpolate the effect of a transform over time.
x, y, z = pos
r1, r2, r3 = rot
n = 3
ordering = 'XYZ'
ht = transformations.poseuler_to_hmat(
np.array([x, y, z, r1, r2, r3]), ordering)
xi = transformations.hmat_to_twist(ht)
# verify that two applications of H is equivalent to doubling theta
ht2_mult = np.linalg.matrix_power(ht, n) # H.dot(H)
ht2_exp = transformations.twist_to_hmat(xi * n)
poseuler2_mult = transformations.hmat_to_poseuler(ht2_mult, ordering)
poseuler2_exp = transformations.hmat_to_poseuler(ht2_exp, ordering)
np.testing.assert_allclose(ht2_mult, ht2_exp)
np.testing.assert_allclose(poseuler2_mult, poseuler2_exp)
@parameterized.parameters(
{
'axisangle': np.array([0, 0, 0])
},
{'axisangle': np.array([np.pi / 6, -np.pi / 4, np.pi * 2. / 3])},
{'axisangle': np.array([np.pi / 2, np.pi, np.pi / 2])},
{'axisangle': np.array([np.pi, np.pi, np.pi])},
{'axisangle': np.array([-np.pi, -np.pi, -np.pi])},
)
def test_axis_angle_to_quat_special(self, axisangle):
# Test for special values that often cause numerical issues.
rmat = transformations.axisangle_to_rmat(axisangle)
quat_true = transformations.mat_to_quat(rmat)
quat_test = transformations.axisangle_to_quat(axisangle)
self.assertTrue(
np.allclose(quat_true, quat_test) or np.allclose(quat_true, -quat_test))
def test_axis_angle_to_quat_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
axisangle = self._random_state.rand(3)
rmat = transformations.axisangle_to_rmat(axisangle)
quat_true = transformations.mat_to_quat(rmat)
quat_test = transformations.axisangle_to_quat(axisangle)
self.assertTrue(
np.allclose(quat_true, quat_test) or
np.allclose(quat_true, -quat_test))
@parameterized.parameters(
{'euler_vec': np.array([0, 0, 0])},
{'euler_vec': np.array([np.pi / 6, -np.pi / 4, np.pi * 2. / 3])},
{'euler_vec': np.array([np.pi / 2, np.pi, np.pi / 2])},
{'euler_vec': np.array([np.pi, np.pi, np.pi])},
{'euler_vec': np.array([-np.pi, -np.pi, -np.pi])},
)
def test_quat_to_axis_angle_special(self, euler_vec):
# Test for special values that often cause numerical issues.
rmat = transformations.euler_to_rmat(euler_vec, ordering='XYZ')
quat = transformations.euler_to_quat(euler_vec, ordering='XYZ')
axisangle_true = transformations.rmat_to_axisangle(rmat)
axisangle_test = transformations.quat_to_axisangle(quat)
np.testing.assert_allclose(axisangle_true, axisangle_test)
@parameterized.parameters(
{'quat': np.array([0., 1., 2., 3.]),
'expected_quat': np.array([0., 1., 2., 3.])},
{'quat': np.array([1., 2., 3., 4.]),
'expected_quat': np.array([1., 2., 3., 4.])},
{'quat': np.array([-1., 2., 3., 4.]),
'expected_quat': np.array([1., -2., -3., -4.])},
{'quat': np.array([-1., -2., -3., -4.]),
'expected_quat': np.array([1., 2., 3., 4.])},
{'quat': np.array([
[0., 1., 2., 3.],
[1., 2., 3., 4.],
[-1., 2., 3., 4.],
[-1., -2., -3., -4.]]),
'expected_quat': np.array([
[0., 1., 2., 3.],
[1., 2., 3., 4.],
[1., -2., -3., -4.],
[1., 2., 3., 4.]])},
)
def test_quat_leading_positive(self, quat, expected_quat):
np.testing.assert_array_equal(
transformations.positive_leading_quat(quat), expected_quat)
def test_quat_to_axis_angle_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
quat = self._random_quaternion()
mat = transformations.quat_to_mat(quat)
axisangle_true = transformations.rmat_to_axisangle(mat[0:3, 0:3])
axisangle_test = transformations.quat_to_axisangle(quat)
np.testing.assert_allclose(axisangle_true, axisangle_test)
def test_quat_mul_vs_mat_mul_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
quat1 = self._random_quaternion()
quat2 = self._random_quaternion()
rmat1 = transformations.quat_to_mat(quat1)[0:3, 0:3]
rmat2 = transformations.quat_to_mat(quat2)[0:3, 0:3]
quat_prod = transformations.quat_mul(quat1, quat2)
rmat_prod_q = transformations.quat_to_mat(quat_prod)[0:3, 0:3]
rmat_prod = rmat1.dot(rmat2)
np.testing.assert_allclose(rmat_prod, rmat_prod_q)
def test_quat_mul_vs_mat_mul_random_batched(self):
quat1 = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
quat2 = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
quat_prod = transformations.quat_mul(quat1, quat2)
for k in range(_NUM_RANDOM_SAMPLES):
rmat1 = transformations.quat_to_mat(quat1[k])[0:3, 0:3]
rmat2 = transformations.quat_to_mat(quat2[k])[0:3, 0:3]
rmat_prod_q = transformations.quat_to_mat(quat_prod[k])[0:3, 0:3]
rmat_prod = rmat1.dot(rmat2)
np.testing.assert_allclose(rmat_prod, rmat_prod_q, atol=1e-5)
def test_quat_slerp_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
quat0 = self._random_quaternion()
quat1 = self._random_quaternion()
# Test poles
np.testing.assert_allclose(
transformations.quat_slerp(quat0, quat1, 0.0), quat0)
np.testing.assert_allclose(
transformations.quat_slerp(quat0, quat1, 1.0), quat1)
# Test slerp gives the same as rotating.
full_angle = self._random_state.uniform(0, 90)
frac = self._random_state.uniform(0, 1)
# Full rotation and partial rotations
full_quat_rot = transformations.euler_to_quat(
[np.radians(full_angle), 0., 0.])
partial_quat_rot = transformations.euler_to_quat(
[np.radians(full_angle) * frac, 0., 0.])
# Rotate the quaternion partially and check it is equivalent to slerp.
full_rotated_quat = transformations.quat_mul(quat0, full_quat_rot)
partial_rotated_quat = transformations.quat_mul(quat0, partial_quat_rot)
slerp_quat = transformations.quat_slerp(quat0, full_rotated_quat, frac)
np.testing.assert_allclose(partial_rotated_quat, slerp_quat, atol=1e-4)
# Test that it takes the shortest path
full_angle = self._random_state.uniform(0, 90)
frac = self._random_state.uniform(0, 1)
# Generate target quat by rotating fractional-angle around X from quat0.
quat_fract_rot = transformations.euler_to_quat(
[np.radians(-full_angle * frac), 0., 0.])
quat_interp_true = transformations.quat_mul(quat0, quat_fract_rot)
# Generate quat at target angle and interpolate using slerp.
quat_rot = transformations.euler_to_quat(
[np.radians(360. - full_angle), 0., 0.])
quat2 = transformations.quat_mul(quat0, quat_rot)
quat_interp_slerp = transformations.quat_slerp(quat0, quat2, frac)
# Generate alternative interplated quat by scaling log along relative quat
quat_interp_log = (
transformations.quat_mul(
quat0,
transformations.quat_exp(
transformations.quat_log(
transformations.quat_diff_passive(quat0, quat2)) * frac)))
self.assertTrue(
np.allclose(quat_interp_slerp, quat_interp_true, atol=1e-4) or
np.allclose(quat_interp_slerp, -1 * quat_interp_true, atol=1e-4))
self.assertTrue(
np.allclose(quat_interp_log, quat_interp_true, atol=1e-4) or
np.allclose(quat_interp_log, -1 * quat_interp_true, atol=1e-4))
def test_quat_diff_passive_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
# Get the source and target quaternions and their passive difference.
source = self._random_quaternion()
target = self._random_quaternion()
diff = transformations.quat_diff_passive(source, target)
# Take a vector expressed in the target frame and express it in the
# source frame using the difference.
vec_t = np.random.random(3)
vec_s = transformations.quat_rotate(diff, vec_t)
# Bring them both in the world frame and check they are the same.
vec_w1 = transformations.quat_rotate(source, vec_s)
vec_w2 = transformations.quat_rotate(target, vec_t)
np.testing.assert_allclose(vec_w1, vec_w2)
def test_quat_diff_passive_random_batched(self):
# Get the source and target quaternions and their passive difference.
source = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
target = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
diff = transformations.quat_diff_passive(source, target)
for k in range(_NUM_RANDOM_SAMPLES):
# Take a vector expressed in the target frame and express it in the
# source frame using the difference.
vec_t = np.random.random(3)
vec_s = transformations.quat_rotate(diff[k], vec_t)
# Bring them both in the world frame and check they are the same.
vec_w1 = transformations.quat_rotate(source[k], vec_s)
vec_w2 = transformations.quat_rotate(target[k], vec_t)
np.testing.assert_allclose(vec_w1, vec_w2)
def test_quat_diff_active_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
# Get the source and target quaternions and their active difference.
source = self._random_quaternion()
target = self._random_quaternion()
diff = transformations.quat_diff_active(source, target)
# Take a vector that has been rotated by source quaternion and rotate it
# by target quaternion by applying the difference.
vec_rotated_s = np.random.random(3)
vec_rotated_t = transformations.quat_rotate(diff, vec_rotated_s)
# Invert the rotations on both vectors and ensure the final vector is the
# same.
vec_1 = transformations.quat_rotate(
transformations.quat_inv(source), vec_rotated_s)
vec_2 = transformations.quat_rotate(
transformations.quat_inv(target), vec_rotated_t)
np.testing.assert_allclose(vec_1, vec_2)
def test_quat_diff_active_random_batched(self):
# Get the source and target quaternions and their passive difference.
source = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
target = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
diff = transformations.quat_diff_active(source, target)
for k in range(_NUM_RANDOM_SAMPLES):
# Take a vector that has been rotated by source quaternion and rotate it
# by target quaternion by applying the difference.
vec_rotated_s = np.random.random(3)
vec_rotated_t = transformations.quat_rotate(diff[k], vec_rotated_s)
# Invert the rotations on both vectors and ensure the final vector is the
# same.
vec_1 = transformations.quat_rotate(
transformations.quat_inv(source[k]), vec_rotated_s)
vec_2 = transformations.quat_rotate(
transformations.quat_inv(target[k]), vec_rotated_t)
np.testing.assert_allclose(vec_1, vec_2)
def test_quat_dist_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
# test with normalized quaternions for stability of test
source = self._random_quaternion()
target = self._random_quaternion()
self.assertGreater(transformations.quat_dist(source, target), 0)
np.testing.assert_allclose(
transformations.quat_dist(source, source), 0, atol=1e-9)
def test_quat_dist_random_batched(self):
# Test batched quat dist
source_quats = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
target_quats = np.stack(
[self._random_quaternion() for _ in range(_NUM_RANDOM_SAMPLES)], axis=0)
np.testing.assert_allclose(
transformations.quat_dist(source_quats, source_quats), 0, atol=1e-9)
np.testing.assert_equal(
transformations.quat_dist(source_quats, target_quats) > 0, 1)
@parameterized.parameters(
{'source': (1., 0., 0., 0.), 'target': (0., 1., 0., 0.), 'angle': np.pi},
{'source': (1., 0., 0., 0.),
'target': (0.86602540378, 0.5, 0., 0.),
'angle': np.pi / 3
},
{'source': (1., 0., 0., 0.),
'target': (1./ np.sqrt(2), 1./ np.sqrt(2), 0., 0.),
'angle': np.pi / 2},
{'source': np.array([
[1., 0., 0., 0.],
[1., 0., 0., 0.],
[1., 0., 0., 0.]]),
'target': np.array([
[0., 1., 0., 0.],
[0.86602540378, 0.5, 0., 0.],
[1./ np.sqrt(2), 1./ np.sqrt(2), 0., 0.]]),
'angle': np.array([np.pi, np.pi / 3, np.pi / 2])},
)
def test_quat_dist_deterministic(self, source, target, angle):
predicted_angle = transformations.quat_dist(source, target)
if np.asarray(source).ndim > 1:
self.assertSequenceAlmostEqual(angle, predicted_angle)
else:
self.assertAlmostEqual(angle, predicted_angle)
@parameterized.parameters(
{'rot': (np.pi, 0, 0), 'angle': np.pi},
{'rot': (0, 0, 0), 'angle': 0},
{'rot': (np.radians(10), np.radians(-30), np.radians(45)),
'angle': 0.9128419},
{'rot': (np.radians(45), np.radians(45), np.radians(45)),
'angle': 1.4975074},
{'rot': (0, np.pi, 0), 'angle': np.pi},
{'rot': (0, 0, np.pi), 'angle': np.pi},
{'rot': np.array([
[(0, np.pi, 0)],
[(0, 0, np.pi)]]), 'angle': np.array([np.pi, np.pi])},
)
def test_quat_angle(self, rot, angle):
# Test for special values that often cause numerical issues.
if np.asarray(rot).ndim > 1:
quat = np.stack([
transformations.euler_to_quat(np.array(roti), ordering='XYZ')
for roti in rot], axis=0)
computed_angle = transformations.quat_angle(quat)
self.assertSequenceAlmostEqual(angle, computed_angle)
else:
quat = transformations.euler_to_quat(np.array(rot), ordering='XYZ')
computed_angle = transformations.quat_angle(quat)
self.assertAlmostEqual(angle, computed_angle)
def test_quat_between_vectors_random(self):
# test quat_between_vectors with random vectors
for _ in range(_NUM_RANDOM_SAMPLES):
quat = self._random_quaternion()
source_vec = np.random.random(3)
target_vec = transformations.quat_rotate(quat, source_vec)
computed_quat = transformations.quat_between_vectors(
source_vec, target_vec)
computed_target = transformations.quat_rotate(computed_quat, source_vec)
np.testing.assert_allclose(target_vec, computed_target, atol=0.005)
def test_quat_between_vectors_inverse(self):
# test quat_between_vectors with inverse vectors
for _ in range(_NUM_RANDOM_SAMPLES):
source_vec = np.random.random(3)
target_vec = -source_vec
computed_quat = transformations.quat_between_vectors(source_vec,
target_vec)
computed_target = transformations.quat_rotate(computed_quat, source_vec)
np.testing.assert_allclose(computed_target, target_vec)
def test_quat_between_vectors_parallel(self):
# test quat_between_vectors with parallel vectors
for _ in range(_NUM_RANDOM_SAMPLES):
source_vec = np.random.random(3)
target_vec = source_vec
computed_quat = transformations.quat_between_vectors(source_vec,
target_vec)
computed_target = transformations.quat_rotate(computed_quat, source_vec)
np.testing.assert_allclose(computed_target, target_vec)
def test_quat_log_and_exp_random(self):
for _ in range(_NUM_RANDOM_SAMPLES):
quat = self._random_quaternion()
log_quat = transformations.quat_log(quat)
orig_quat = transformations.quat_exp(log_quat)
np.testing.assert_allclose(quat, orig_quat, atol=1e-07)
def test_quat_log_and_exp_random_batched(self):
# Test batching of quats
quat = np.stack(
[self._random_quaternion() for k in range(_NUM_RANDOM_SAMPLES)], axis=0)
log_quat = transformations.quat_log(quat)
orig_quat = transformations.quat_exp(log_quat)
np.testing.assert_allclose(quat, orig_quat, atol=1e-07)
def test_quat_integration(self):
for _ in range(_NUM_RANDOM_SAMPLES):
integrated_quat = self._random_quaternion()
target_quat = self._random_quaternion()
# Because of the numerical approximations, we do 5 integrations steps to
# reach the target and recompute the velocity to be applied at each step.
for _ in range(5):
# Get the quaternion difference between the current and target quat
diff = transformations.quat_diff_active(integrated_quat, target_quat)
# Get the angular velocity required to reach the target in one step
angle = transformations.quat_angle(diff)
axis = transformations.quat_axis(diff)
# Scale the velocity for numerical stability
vel = angle * axis * 0.999
integrated_quat = transformations.integrate_quat(integrated_quat, vel)
integrated_quat /= np.linalg.norm(integrated_quat)
self.assertTrue(
np.allclose(integrated_quat, target_quat, atol=1e-8) or
np.allclose(integrated_quat, -1 * target_quat, atol=1e-8))
def test_pos_rmat_to_hmat_batch(self):
test_pos_nobatch = transformations.pos_to_hmat([1., 2., 3.])
true_pos_nobatch = np.array([[1, 0, 0, 1.],
[0, 1, 0, 2.],
[0, 0, 1, 3.],
[0, 0, 0, 1.]])
np.testing.assert_allclose(test_pos_nobatch, true_pos_nobatch)
test_pos_batch = transformations.pos_to_hmat([
[1., 2., 3.], [4., 5., 6.]])
true_pos_batch = np.array([
[[1, 0, 0, 1.],
[0, 1, 0, 2.],
[0, 0, 1, 3.],
[0, 0, 0, 1.]],
[[1, 0, 0, 4.],
[0, 1, 0, 5.],
[0, 0, 1, 6.],
[0, 0, 0, 1.]],
])
np.testing.assert_allclose(test_pos_batch, true_pos_batch)
test_rmat_nobatch = transformations.rmat_to_hmat(np.eye(3))
true_rmat_nobatch = np.array([[1, 0, 0, 0.],
[0, 1, 0, 0.],
[0, 0, 1, 0.],
[0, 0, 0, 1.]])
np.testing.assert_allclose(test_rmat_nobatch, true_rmat_nobatch)
test_rmat_batch = transformations.rmat_to_hmat(
np.array([np.eye(3), np.eye(3)]))
true_rmat_batch = np.array([
[[1, 0, 0, 0.],
[0, 1, 0, 0.],
[0, 0, 1, 0.],
[0, 0, 0, 1.]],
[[1, 0, 0, 0.],
[0, 1, 0, 0.],
[0, 0, 1, 0.],
[0, 0, 0, 1.]],
])
np.testing.assert_allclose(test_rmat_batch, true_rmat_batch)
def _random_quaternion(self):
"""Returns a normalized quaternion."""
rand = self._random_state.rand(4)
return rand / np.linalg.norm(rand)
def _normalize_and_make_positive_leading(quat):
quat = quat.copy()
quat /= np.linalg.norm(quat)
if quat[0] < 0:
quat = -1 * quat
return quat
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/transformations/transformations_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type alias definitions for dm_robotics.transformations."""
from typing import Any, Union
# pylint:disable=g-import-not-at-top
try:
# This is only available for NumPy >= 1.20.
import numpy.typing
ArrayLike = numpy.typing.ArrayLike
except ImportError:
ArrayLike = Any
QuatArray = ArrayLike # [...,4] array of quaternions (w, i, j, k)
# [...,3] arrays of Euler angles
EulerArray = {
'XYZ': ArrayLike,
'XYX': ArrayLike,
'XZY': ArrayLike,
'ZYX': ArrayLike,
'YZX': ArrayLike,
'ZXY': ArrayLike,
'YXZ': ArrayLike,
'XZX': ArrayLike,
'YXY': ArrayLike,
'YZY': ArrayLike,
'ZXZ': ArrayLike,
'ZYZ': ArrayLike,
}
SomeEulerArray = Union[EulerArray['XYZ'], EulerArray['XYX'], EulerArray['XZY'],
EulerArray['ZYX'], EulerArray['YZX'], EulerArray['ZXY'],
EulerArray['YXZ'], EulerArray['XZX'], EulerArray['YXY'],
EulerArray['YZY'], EulerArray['ZXZ'], EulerArray['ZYZ'],]
AxisAngleArray = ArrayLike # [...,3] array of axis-angle rotations
PositionArray = ArrayLike # [...,3] array of 3d position vectors
AngVelArray = ArrayLike # [...,3] array of 3d angular velocity vectors
RotationMatrix = ArrayLike # [3,3] rotation matrix
RotationMatrix2d = ArrayLike # [2,2] rotation matrix
HomogeneousMatrix = ArrayLike # [4,4] homogeneous transformation matrix
HomogeneousMatrix2d = ArrayLike # [3,3] homogeneous matrix
Twist = ArrayLike # [6] twist
| dm_robotics-main | py/transformations/_types.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rigid-body transformations including velocities and static forces."""
# pylint: disable=unused-import
from dm_robotics.transformations._transformations import axisangle_to_euler
from dm_robotics.transformations._transformations import axisangle_to_quat
from dm_robotics.transformations._transformations import axisangle_to_rmat
from dm_robotics.transformations._transformations import cross_2d
from dm_robotics.transformations._transformations import cross_mat_from_vec3
from dm_robotics.transformations._transformations import euler_to_axisangle
from dm_robotics.transformations._transformations import euler_to_quat
from dm_robotics.transformations._transformations import euler_to_rmat
from dm_robotics.transformations._transformations import force_transform
from dm_robotics.transformations._transformations import force_transform_2d
from dm_robotics.transformations._transformations import hmat_inv
from dm_robotics.transformations._transformations import hmat_to_pos_quat
from dm_robotics.transformations._transformations import hmat_to_poseuler
from dm_robotics.transformations._transformations import hmat_to_twist
from dm_robotics.transformations._transformations import integrate_hmat
from dm_robotics.transformations._transformations import integrate_quat
from dm_robotics.transformations._transformations import mat_to_quat
from dm_robotics.transformations._transformations import matrix_to_postheta_2d
from dm_robotics.transformations._transformations import pos_quat_to_hmat
from dm_robotics.transformations._transformations import pos_to_hmat
from dm_robotics.transformations._transformations import poseuler_to_hmat
from dm_robotics.transformations._transformations import positive_leading_quat
from dm_robotics.transformations._transformations import postheta_to_matrix_2d
from dm_robotics.transformations._transformations import quat_angle
from dm_robotics.transformations._transformations import quat_axis
from dm_robotics.transformations._transformations import quat_between_vectors
from dm_robotics.transformations._transformations import quat_conj
from dm_robotics.transformations._transformations import quat_diff_active
from dm_robotics.transformations._transformations import quat_diff_passive
from dm_robotics.transformations._transformations import quat_dist
from dm_robotics.transformations._transformations import quat_exp
from dm_robotics.transformations._transformations import quat_inv
from dm_robotics.transformations._transformations import quat_log
from dm_robotics.transformations._transformations import quat_mul
from dm_robotics.transformations._transformations import quat_rotate
from dm_robotics.transformations._transformations import quat_slerp
from dm_robotics.transformations._transformations import quat_to_axisangle
from dm_robotics.transformations._transformations import quat_to_euler
from dm_robotics.transformations._transformations import quat_to_mat
from dm_robotics.transformations._transformations import rmat_to_axisangle
from dm_robotics.transformations._transformations import rmat_to_euler
from dm_robotics.transformations._transformations import rmat_to_hmat
from dm_robotics.transformations._transformations import rmat_to_rot6
from dm_robotics.transformations._transformations import rot6_to_rmat
from dm_robotics.transformations._transformations import rotate_vec6
from dm_robotics.transformations._transformations import rotation_matrix_2d
from dm_robotics.transformations._transformations import rotation_x_axis
from dm_robotics.transformations._transformations import rotation_y_axis
from dm_robotics.transformations._transformations import rotation_z_axis
from dm_robotics.transformations._transformations import twist_to_hmat
from dm_robotics.transformations._transformations import velocity_transform
from dm_robotics.transformations._transformations import velocity_transform_2d
# pytype: disable=import-error
# pylint: disable=g-import-not-at-top,reimported
try:
# Use faster C extension versions if _transformations_quat is available.
from dm_robotics.transformations._transformations_quat import axisangle_to_quat
from dm_robotics.transformations._transformations_quat import hmat_to_pos_quat
from dm_robotics.transformations._transformations_quat import integrate_quat
from dm_robotics.transformations._transformations_quat import mat_to_quat
from dm_robotics.transformations._transformations_quat import pos_quat_to_hmat
from dm_robotics.transformations._transformations_quat import quat_angle
from dm_robotics.transformations._transformations_quat import quat_conj
from dm_robotics.transformations._transformations_quat import quat_dist
from dm_robotics.transformations._transformations_quat import quat_exp
from dm_robotics.transformations._transformations_quat import quat_inv
from dm_robotics.transformations._transformations_quat import quat_log
from dm_robotics.transformations._transformations_quat import quat_mul
from dm_robotics.transformations._transformations_quat import quat_rotate
from dm_robotics.transformations._transformations_quat import quat_slerp
from dm_robotics.transformations._transformations_quat import quat_to_mat
# TODO(benmoran) Consider quaternion implementations of other functions:
# from dm_robotics.transformations._transformations import quat_axis
# from dm_robotics.transformations._transformations \
# import quat_between_vectors
# from dm_robotics.transformations._transformations import quat_diff_active
# from dm_robotics.transformations._transformations import quat_diff_passive
# from dm_robotics.transformations._transformations import quat_to_axisangle
# from dm_robotics.transformations._transformations import quat_to_euler
HAVE_NUMPY_QUATERNION = True
except ImportError:
HAVE_NUMPY_QUATERNION = False
# pytype: enable=import-error
# pylint: enable=g-import-not-at-top,reimported
| dm_robotics-main | py/transformations/transformations.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rigid-body transformations including velocities and static forces."""
from typing import Optional, Tuple, Union
from absl import logging
from dm_robotics.transformations import _types as types
import numpy as np
# Constants used to determine when a rotation is close to a pole.
_POLE_LIMIT = (1.0 - 1e-6)
_TOL = 1e-10
# Constant used to decide when to use the arcos over the arcsin
_TOL_ARCCOS = 1 / np.sqrt(2)
_IDENTITY_QUATERNION = np.array([1, 0, 0, 0], dtype=np.float64)
def _clip_within_precision(number: float, low: float,
high: float, precision: float = _TOL):
"""Clips input to the range [low, high], checking precision.
Args:
number: Number to be clipped.
low: Lower bound (inclusive).
high: Upper bound (inclusive).
precision: Tolerance.
Returns:
Input clipped to given range.
Raises:
ValueError: If number is outside given range by more than given precision.
"""
if number < low - precision or number > high + precision:
raise ValueError(
'Input {:.12f} not inside range [{:.12f}, {:.12f}] with precision {}'.
format(number, low, high, precision))
return np.clip(number, low, high)
def _batch_mm(m1, m2):
"""Batch matrix multiply.
Args:
m1: input lhs matrix with shape (batch, n, m).
m2: input rhs matrix with shape (batch, m, o).
Returns:
product matrix with shape (batch, n, o).
"""
return np.einsum('bij,bjk->bik', m1, m2)
def _rmat_to_euler_xyz(rmat: types.RotationMatrix) -> types.EulerArray['XYZ']:
"""Converts a 3x3 rotation matrix to XYZ euler angles."""
# | r00 r01 r02 | | cy*cz -cy*sz sy |
# | r10 r11 r12 | = | cz*sx*sy+cx*sz cx*cz-sx*sy*sz -cy*sx |
# | r20 r21 r22 | | -cx*cz*sy+sx*sz cz*sx+cx*sy*sz cx*cy |
if rmat[0, 2] > _POLE_LIMIT:
logging.log_every_n_seconds(logging.WARNING, 'Angle at North Pole', 60)
z = np.arctan2(rmat[1, 0], rmat[1, 1])
y = np.pi/2
x = 0.0
return np.array([x, y, z])
if rmat[0, 2] < -_POLE_LIMIT:
logging.log_every_n_seconds(logging.WARNING, 'Angle at South Pole', 60)
z = np.arctan2(rmat[1, 0], rmat[1, 1])
y = -np.pi/2
x = 0.0
return np.array([x, y, z])
z = -np.arctan2(rmat[0, 1], rmat[0, 0])
y = np.arcsin(rmat[0, 2])
x = -np.arctan2(rmat[1, 2], rmat[2, 2])
# order of return is the order of input
return np.array([x, y, z])
def _rmat_to_euler_xyx(rmat: types.RotationMatrix) -> types.EulerArray['XYX']:
"""Converts a 3x3 rotation matrix to XYX euler angles."""
# | r00 r01 r02 | | cy sy*sx1 sy*cx1 |
# | r10 r11 r12 | = | sy*sx0 cx0*cx1-cy*sx0*sx1 -cy*cx1*sx0-cx0*sx1 |
# | r20 r21 r22 | | -sy*cx0 cx1*sx0+cy*cx0*sx1 cy*cx0*cx1-sx0*sx1 |
if rmat[0, 0] < 1.0:
if rmat[0, 0] > -1.0:
y = np.arccos(_clip_within_precision(rmat[0, 0], -1., 1.))
x0 = np.arctan2(rmat[1, 0], -rmat[2, 0])
x1 = np.arctan2(rmat[0, 1], rmat[0, 2])
return np.array([x0, y, x1])
else:
# Not a unique solution: x1_angle - x0_angle = atan2(-r12,r11)
y = np.pi
x0 = -np.arctan2(-rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, y, x1])
else:
# Not a unique solution: x1_angle + x0_angle = atan2(-r12,r11)
y = 0.0
x0 = -np.arctan2(-rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, y, x1])
def _rmat_to_euler_zyx(rmat: types.RotationMatrix) -> types.EulerArray['ZYX']:
"""Converts a 3x3 rotation matrix to ZYX euler angles."""
if rmat[2, 0] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
x = np.arctan2(rmat[0, 1], rmat[0, 2])
y = -np.pi/2
z = 0.0
return np.array([z, y, x])
if rmat[2, 0] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
x = np.arctan2(rmat[0, 1], rmat[0, 2])
y = np.pi/2
z = 0.0
return np.array([z, y, x])
x = np.arctan2(rmat[2, 1], rmat[2, 2])
y = -np.arcsin(rmat[2, 0])
z = np.arctan2(rmat[1, 0], rmat[0, 0])
# order of return is the order of input
return np.array([z, y, x])
def _rmat_to_euler_xzy(rmat: types.RotationMatrix) -> types.EulerArray['XZY']:
"""Converts a 3x3 rotation matrix to XZY euler angles."""
if rmat[0, 1] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
y = np.arctan2(rmat[1, 2], rmat[1, 0])
z = -np.pi/2
x = 0.0
return np.array([x, z, y])
if rmat[0, 1] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
y = np.arctan2(rmat[1, 2], rmat[1, 0])
z = np.pi/2
x = 0.0
return np.array([x, z, y])
y = np.arctan2(rmat[0, 2], rmat[0, 0])
z = -np.arcsin(rmat[0, 1])
x = np.arctan2(rmat[2, 1], rmat[1, 1])
# order of return is the order of input
return np.array([x, z, y])
def _rmat_to_euler_yzx(rmat: types.RotationMatrix) -> types.EulerArray['YZX']:
"""Converts a 3x3 rotation matrix to YZX euler angles."""
if rmat[1, 0] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
x = -np.arctan2(rmat[0, 2], rmat[0, 1])
z = np.pi/2
y = 0.0
return np.array([y, z, x])
if rmat[1, 0] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
x = -np.arctan2(rmat[0, 2], rmat[0, 1])
z = -np.pi/2
y = 0.0
return np.array([y, z, x])
x = -np.arctan2(rmat[1, 2], rmat[1, 1])
z = np.arcsin(rmat[1, 0])
y = -np.arctan2(rmat[2, 0], rmat[0, 0])
# order of return is the order of input
return np.array([y, z, x])
def _rmat_to_euler_zxy(rmat: types.RotationMatrix) -> types.EulerArray['ZXY']:
"""Converts a 3x3 rotation matrix to ZXY euler angles."""
if rmat[2, 1] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
y = np.arctan2(rmat[0, 2], rmat[0, 0])
x = np.pi/2
z = 0.0
return np.array([z, x, y])
if rmat[2, 1] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
y = np.arctan2(rmat[0, 2], rmat[0, 0])
x = -np.pi/2
z = 0.0
return np.array([z, x, y])
y = -np.arctan2(rmat[2, 0], rmat[2, 2])
x = np.arcsin(rmat[2, 1])
z = -np.arctan2(rmat[0, 1], rmat[1, 1])
# order of return is the order of input
return np.array([z, x, y])
def _rmat_to_euler_yxz(rmat: types.RotationMatrix) -> types.EulerArray['YXZ']:
"""Converts a 3x3 rotation matrix to YXZ euler angles."""
if rmat[1, 2] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
z = -np.arctan2(rmat[0, 1], rmat[0, 0])
x = -np.pi/2
y = 0.0
return np.array([y, x, z])
if rmat[1, 2] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
z = -np.arctan2(rmat[0, 1], rmat[0, 0])
x = np.pi/2
y = 0.0
return np.array([y, x, z])
z = np.arctan2(rmat[1, 0], rmat[1, 1])
x = -np.arcsin(rmat[1, 2])
y = np.arctan2(rmat[0, 2], rmat[2, 2])
# order of return is the order of input
return np.array([y, x, z])
def _rmat_to_euler_xzx(rmat: types.RotationMatrix) -> types.EulerArray['XZX']:
"""Converts a 3x3 rotation matrix to XZX euler angles."""
# | r00 r01 r02 | | cz -sz*cx1 sz*sx1 |
# | r10 r11 r12 | = | cx0*sz cx0*cz*cx1-sx0*sx1 -sx0*cx1-cx0*cz*sx1 |
# | r20 r21 r22 | | sx0*sz sx0*cz*cx1+cx0*sx1 cx0*cx1-sx0*cz*sx1 |
if rmat[0, 0] < 1.0:
if rmat[0, 0] > -1.0:
z = np.arccos(_clip_within_precision(rmat[0, 0], -1., 1.))
x0 = np.arctan2(rmat[2, 0], rmat[1, 0])
x1 = np.arctan2(rmat[0, 2], -rmat[0, 1])
return np.array([x0, z, x1])
else:
# Not a unique solution: x0_angle - x1_angle = atan2(r12,r11)
z = np.pi
x0 = np.arctan2(rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, z, x1])
else:
# Not a unique solution: x0_angle + x1_angle = atan2(-r12, r11)
z = 0.0
x0 = np.arctan2(-rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, z, x1])
def _rmat_to_euler_yxy(rmat: types.RotationMatrix) -> types.EulerArray['YXY']:
"""Converts a 3x3 rotation matrix to YXY euler angles."""
# | r00 r01 r02 | = | -sy0*sy1*cx+cy0*cy1 sx*sy0 sy0*cx*cy1+sy1*cy0 |
# | r10 r11 r12 | = | sx*sy1, cx -sx*cy1 |
# | r20 r21 r22 | = | -sy0*cy1-sy1*cx*cy0 sx*cy0 -sy0*sy1+cx*cy0*cy1 |
if rmat[1, 1] < 1.0:
if rmat[1, 1] > -1.0:
x = np.arccos(_clip_within_precision(rmat[1, 1], -1., 1.))
y0 = np.arctan2(rmat[0, 1], rmat[2, 1])
y1 = np.arctan2(rmat[1, 0], -rmat[1, 2])
return np.array([y0, x, y1])
else:
# Not a unique solution: y0_angle - y1_angle = atan2(r02, r22)
x = np.pi
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, x, y1])
else:
# Not a unique solution: y0_angle + y1_angle = atan2(r02, r22)
x = 0.0
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, x, y1])
def _rmat_to_euler_yzy(rmat: types.RotationMatrix) -> types.EulerArray['YZY']:
"""Converts a 3x3 rotation matrix to YZY euler angles."""
# | r00 r01 r02 | = | -sy0*sy1+cy0*cy1*cz -sz*cy0 sy0*cy1+sy1*cy0*cz |
# | r10 r11 r12 | = | sz*cy1 cz sy1*sz |
# | r20 r21 r22 | = | -sy0*cy1*cz-sy1*cy0 sy0*sz -sy0*sy1*cz+cy0*cy1 |
if rmat[1, 1] < 1.0:
if rmat[1, 1] > -1.0:
z = np.arccos(_clip_within_precision(rmat[1, 1], -1., 1.))
y0 = np.arctan2(rmat[2, 1], -rmat[0, 1])
y1 = np.arctan2(rmat[1, 2], rmat[1, 0])
return np.array([y0, z, y1])
else:
# Not a unique solution: y0_angle - y1_angle = atan2(r02, r22)
z = np.pi
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, z, y1])
else:
# Not a unique solution: y0_angle + y1_angle = atan2(r02, r22)
z = 0.0
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, z, y1])
def _rmat_to_euler_zxz(rmat: types.RotationMatrix) -> types.EulerArray['ZXZ']:
"""Converts a 3x3 rotation matrix to ZXZ euler angles."""
# | r00 r01 r02 | = | -sz0*sz1*cx+cz0*cz1 -sz0*cx*cz1-sz1*cz0 sx*sz0 |
# | r10 r11 r12 | = | sz0*cz1+sz1*cx*cz0 -sz0*sz1+cx*cz0*cz1 -sx*cz0 |
# | r20 r21 r22 | = | sx*sz1 sx*cz1 cx |
if rmat[2, 2] < 1.0:
if rmat[2, 2] > -1.0:
x = np.arccos(_clip_within_precision(rmat[2, 2], -1., 1.))
z0 = np.arctan2(rmat[0, 2], -rmat[1, 2])
z1 = np.arctan2(rmat[2, 0], rmat[2, 1])
return np.array([z0, x, z1])
else:
# Not a unique solution: z0_angle - z1_angle = atan2(r10, r00)
x = np.pi
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, x, z1])
else:
# Not a unique solution: z0_angle + z1_angle = atan2(r10, r00)
x = 0.0
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, x, z1])
def _rmat_to_euler_zyz(rmat: types.RotationMatrix) -> types.EulerArray['ZYZ']:
"""Converts a 3x3 rotation matrix to ZYZ euler angles."""
# | r00 r01 r02 | = | -sz0*sz1+cy*cz0*cz1 -sz0*cz1-sz1*cy*cz0 sy*cz0 |
# | r10 r11 r12 | = | sz0*cy*cz1+sz1*cz0 -sz0*sz1*cy+cz0*cz1 sy*sz0 |
# | r20 r21 r22 | = | -sy*cz1 sy*sz1 cy |
if rmat[2, 2] < 1.0:
if rmat[2, 2] > -1.0:
y = np.arccos(_clip_within_precision(rmat[2, 2], -1., 1.))
z0 = np.arctan2(rmat[1, 2], rmat[0, 2])
z1 = np.arctan2(rmat[2, 1], -rmat[2, 0])
return np.array([z0, y, z1])
else:
# Not a unique solution: z0_angle - z1_angle = atan2(r10, r00)
y = np.pi
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, y, z1])
else:
# Not a unique solution: z0_angle + z1_angle = atan2(r10, r00)
y = 0.0
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, y, z1])
def _axis_rotation(theta, full: bool):
"""Returns the theta dim, cos and sin, and blank matrix for axis rotation."""
n = 1 if np.isscalar(theta) else len(theta)
ct = np.cos(theta)
st = np.sin(theta)
if full:
rmat = np.zeros((n, 4, 4))
rmat[:, 3, 3] = 1.
else:
rmat = np.zeros((n, 3, 3))
return n, ct, st, rmat
# map from full rotation orderings to euler conversion functions
_eulermap = {
'XYZ': _rmat_to_euler_xyz,
'XYX': _rmat_to_euler_xyx,
'XZY': _rmat_to_euler_xzy,
'ZYX': _rmat_to_euler_zyx,
'YZX': _rmat_to_euler_yzx,
'ZXY': _rmat_to_euler_zxy,
'YXZ': _rmat_to_euler_yxz,
'XZX': _rmat_to_euler_xzx,
'YXY': _rmat_to_euler_yxy,
'YZY': _rmat_to_euler_yzy,
'ZXZ': _rmat_to_euler_zxz,
'ZYZ': _rmat_to_euler_zyz,
}
def cross_mat_from_vec3(v):
"""Returns the skew-symmetric matrix cross-product operator.
Args:
v: A 3x1 vector.
Returns:
A matrix cross-product operator P (3x3) for the vector v = [x,y,z]^T,
such that v x b = Pb for any 3-vector b
"""
x, y, z = v[0], v[1], v[2]
return np.array([[0, -z, y],
[z, 0, -x],
[-y, x, 0]])
def axisangle_to_euler(axisangle: types.AxisAngleArray,
ordering: str = 'XYZ') -> types.SomeEulerArray:
"""Returns euler angles corresponding to the exponential coordinates.
Args:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length.
ordering: Desired euler angle ordering.
Returns: A euler triple
"""
rmat = axisangle_to_rmat(axisangle)
return rmat_to_euler(rmat, ordering)
def axisangle_to_rmat(axisangle: types.AxisAngleArray) -> types.RotationMatrix:
"""Returns rotation matrix corresponding to the exponential coordinates.
See Murray1994: A Mathematical Introduction to Robotic Manipulation
Args:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length.
Returns: A tuple (w, theta)
R: a 3x3 numpy array describing the rotation
"""
theta = np.linalg.norm(axisangle)
if np.allclose(theta, 0):
s_theta = cross_mat_from_vec3(axisangle)
return np.eye(3) + s_theta + s_theta.dot(s_theta) * 0.5
else:
wn = axisangle / theta
s = cross_mat_from_vec3(wn)
return np.eye(3) + s * np.sin(theta) + s.dot(s) * (1-np.cos(theta))
# LINT.IfChange
def axisangle_to_quat(axisangle: types.AxisAngleArray) -> types.QuatArray:
"""Returns the quaternion corresponding to the provided axis-angle vector.
Args:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length
Returns:
quat: A quaternion [w, i, j, k]
"""
theta = np.linalg.norm(axisangle)
if np.allclose(theta, 0):
return _IDENTITY_QUATERNION
else:
wn = axisangle/theta
return np.hstack([np.cos(theta/2), wn * np.sin(theta/2)])
# LINT.ThenChange(_transformations_quat.py)
def euler_to_axisangle(euler_vec: types.SomeEulerArray,
ordering: str = 'XYZ') -> types.AxisAngleArray:
"""Returns the euler angles corresponding to the provided axis-angle vector.
Args:
euler_vec: The euler angle rotations.
ordering: Desired euler angle ordering.
Returns:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length
"""
rmat = euler_to_rmat(euler_vec, ordering=ordering)
return rmat_to_axisangle(rmat)
def euler_to_quat(euler_vec: types.SomeEulerArray,
ordering: str = 'XYZ') -> types.QuatArray:
"""Returns the quaternion corresponding to the provided euler angles.
Args:
euler_vec: The euler angle rotations.
ordering: Desired euler angle ordering.
Returns:
quat: A quaternion [w, i, j, k]
"""
mat = euler_to_rmat(euler_vec, ordering=ordering)
return mat_to_quat(mat)
def euler_to_rmat(
euler_vec: types.SomeEulerArray,
ordering: str = 'ZXZ',
full: bool = False,
extrinsic: bool = False
) -> Union[types.HomogeneousMatrix, types.RotationMatrix]:
"""Returns rotation matrix (or transform) for the given Euler rotations.
Euler*** methods compose a Rotation matrix corresponding to the given
rotations r1, r2, r3 following the given rotation ordering.
This operation follows the INTRINSIC rotation convention, i.e. defined w.r.t
the axes of the rotating system. Intrinsic rotations are evaluated in the
order provided. E.g. for XYZ we return rotX(r1) * rotY(r2) * rotZ(r3).
This is equivalent to ZYX extrinsic, because rotZ is evaluated first in the
fixed frame, which is then transformed by rotY and rotX.
From Wikipedia: http://en.wikipedia.org/wiki/Euler_angles
Any extrinsic rotation is equivalent to an extrinsic rotation by the same
angles but with inverted order of elemental rotations, and vice-versa. For
instance, the extrinsic rotations x-y'-z" by angles alpha, beta, gamma are
equivalent to the extrinsic rotations z-y-x by angles gamma, beta, alpha.
Args:
euler_vec: The euler angle rotations.
ordering: euler angle ordering string (see _euler_orderings).
full: If true, returns a full 4x4 transform.
extrinsic: Whether to use the extrinsic or intrinsic rotation convention.
Returns:
The rotation matrix or homogeneous transform corresponding to the given
Euler rotation.
"""
# map from partial rotation orderings to rotation functions
rotmap = {'X': rotation_x_axis, 'Y': rotation_y_axis, 'Z': rotation_z_axis}
rotations = [rotmap[c] for c in ordering]
if extrinsic:
rotations.reverse()
euler_vec = np.atleast_2d(euler_vec)
rots = []
for i in range(len(rotations)):
rots.append(rotations[i](euler_vec[:, i], full))
if rots[0].ndim == 3:
result = _batch_mm(_batch_mm(rots[0], rots[1]), rots[2])
return result.squeeze()
else:
return (rots[0].dot(rots[1])).dot(rots[2])
def positive_leading_quat(quat: types.QuatArray) -> types.QuatArray:
"""Returns the positive leading version of the quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
The equivalent quaternion [w, i, j, k] with w > 0.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
quat = np.where(np.tile(quat[..., 0:1] < 0, quat.shape[-1]), -quat, quat)
return quat
# LINT.IfChange
def quat_conj(quat: types.QuatArray) -> types.QuatArray:
"""Return conjugate of quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
A quaternion [w, -i, -j, -k] representing the inverse of the rotation
defined by `quat` (not assuming normalization).
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
return np.stack(
[quat[..., 0], -quat[..., 1],
-quat[..., 2], -quat[..., 3]], axis=-1).astype(np.float64)
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def quat_inv(quat: types.QuatArray) -> types.QuatArray:
"""Return inverse of quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
A quaternion representing the inverse of the original rotation.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
return quat_conj(quat) / np.sum(quat * quat, axis=-1, keepdims=True)
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def quat_mul(quat1: types.QuatArray, quat2: types.QuatArray) -> types.QuatArray:
"""Multiply quaternions.
This function supports inputs with or without leading batch dimensions.
Args:
quat1: A quaternion [w, i, j, k].
quat2: A quaternion [w, i, j, k].
Returns:
The quaternion product, aka hamiltonian product.
"""
# Ensure quats are np.arrays in case a tuple or a list is passed
quat1, quat2 = np.asarray(quat1), np.asarray(quat2)
# Construct a 4x4 matrix representation of quat1 for use with matmul
w1, x1, y1, z1 = [quat1[..., i] for i in range(4)]
qmat = np.stack(
[np.stack([w1, -x1, -y1, -z1], axis=-1),
np.stack([x1, w1, -z1, y1], axis=-1),
np.stack([y1, z1, w1, -x1], axis=-1),
np.stack([z1, -y1, x1, w1], axis=-1)],
axis=-2)
# Compute (batched) hamiltonian product
qdot = qmat @ np.expand_dims(quat2, axis=-1)
return np.squeeze(qdot, axis=-1)
# LINT.ThenChange(_transformations_quat.py)
def quat_diff_passive(source_quat: types.QuatArray,
target_quat: types.QuatArray) -> types.QuatArray:
"""Passive quaternion difference between source and target quaternions.
This quaternion difference is used when one is trying to find the quaternion
that brings a vector expressed in the target frame to the same vector
expressed in the source frame.
Note: `source_quat` and `target_quat` should represent world-frame
orientations, i.e. both should rotate a vector expressed in their respective
frames to world.
This is the passive quaternion difference as the vector is not moving, only
the frame in which it is expressed.
For more information on active/passive rotations please refer to:
https://www.tu-chemnitz.de/informatik/KI/edu/robotik/ws2016/lecture-02_p2.pdf
This function supports inputs with or without leading batch dimensions.
Args:
source_quat: A unit quaternion [w, i, j, k] representing a passive rotation
from the source frame to the world frame.
target_quat: A unit quaternion [w, i, j, k] representing a passive rotation
from the target frame to the world frame.
Returns:
A normalized quaternion representing the rotation that brings a vector
expressed in the target frame to the same vector being expressed in the
source frame.
"""
# Ensure quats are np.arrays in case a tuple or a list is passed
source_quat, target_quat = np.asarray(source_quat), np.asarray(target_quat)
quat = quat_mul(quat_conj(source_quat), target_quat)
return quat / np.linalg.norm(quat, axis=-1, keepdims=True)
def quat_diff_active(source_quat: types.QuatArray,
target_quat: types.QuatArray) -> types.QuatArray:
"""Active quaternion difference between source and target quaternions.
Given the unit vectors of the source frame (expressed in the world frame),
this function gives the quaternion that rotates these vectors into the unit
vectors of the target frame (expressed in the world frame).
Note: `source_quat` and `target_quat` should represent active rotations of
vectors, i.e. both should rotate the unit vectors of the world frame
into the unit vectors of their respective frame (expressed in the world
frame).
This is the active quaternion difference as the vectors are being rotated
while the reference frame they are expressed in stays the same.
For more information on active/passive rotations please refer to:
https://www.tu-chemnitz.de/informatik/KI/edu/robotik/ws2016/lecture-02_p2.pdf
This function supports inputs with or without leading batch dimensions.
Args:
source_quat: A unit quaternion [w, i, j, k], or multi-dimensional array of
unit quaternions.
target_quat: A unit quaternion [w, i, j, k], or multi-dimensional array of
unit quaternions.
Returns:
A normalized quaternion representing the rotation that brings the source
frame into the target frame.
"""
# Ensure quats are np.arrays in case a tuple or a list is passed
source_quat, target_quat = np.asarray(source_quat), np.asarray(target_quat)
quat = quat_mul(target_quat, quat_conj(source_quat))
return quat / np.linalg.norm(quat, axis=-1, keepdims=True)
# LINT.IfChange
def quat_log(quat: types.QuatArray, tol: float = _TOL) -> types.QuatArray:
"""Log of a quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
tol: numerical tolerance to prevent nan.
Returns:
4D array representing the log of `quat`. This is analogous to
`rmat_to_axisangle`.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
q_norm = np.linalg.norm(quat + tol, axis=-1, keepdims=True)
a = quat[..., 0:1]
v = np.stack([quat[..., 1], quat[..., 2], quat[..., 3]], axis=-1)
# Clip to 2*tol because we subtract it here
v_new = v / np.linalg.norm(v + tol, axis=-1, keepdims=True) * np.arccos(
a / q_norm)
return np.stack(
[np.log(q_norm[..., 0]), v_new[..., 0], v_new[..., 1], v_new[..., 2]],
axis=-1)
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def quat_exp(quat: types.QuatArray, tol: float = _TOL) -> types.QuatArray:
"""Exp of a quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
tol: numerical tolerance to prevent nan.
Returns:
Exp of quaternion.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
a = quat[..., 0:1]
v = np.stack([quat[..., 1], quat[..., 2], quat[..., 3]], axis=-1)
v_norm = np.linalg.norm(v + tol, axis=-1, keepdims=True)
v_new = np.exp(a) * v/v_norm * np.sin(v_norm)
a_new = np.exp(a) * np.cos(v_norm)
return np.stack([a_new[..., 0], v_new[..., 0], v_new[..., 1], v_new[..., 2]],
axis=-1)
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def quat_dist(source: types.QuatArray, target: types.QuatArray) -> np.ndarray:
"""Computes distance between source and target quaternions.
This function supports inputs with or without leading batch dimensions.
Note: operates on unit quaternions
Args:
source: A unit quaternion [w, i, j, k].
target: A unit quaternion [w, i, j, k].
Returns:
The rotational distance from source to target in radians.
"""
# Ensure quats are np.arrays in case a tuple or a list is passed
source, target = np.asarray(source), np.asarray(target)
quat_err = quat_mul(source, quat_inv(target))
quat_err /= np.linalg.norm(quat_err, axis=-1, keepdims=True)
return quat_angle(quat_err)
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def quat_angle(quat: types.QuatArray) -> np.ndarray:
"""Computes the angle of the rotation encoded by the unit quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A unit quaternion [w, i, j, k]. The norm of this vector should be 1.
Returns:
The angle in radians of the rotation encoded by the quaternion.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
# Ensure the quaternion is positive leading to get the shortest angle.
quat = positive_leading_quat(quat)
# We have w = cos(angle/2) with w the real part of the quaternion and
# ||Im(q)|| = sin(angle/2) with Im(q) the imaginary part of the quaternion.
# We choose the method that is less sensitive to a noisy evaluation of the
# difference.
condition = quat[..., 0] < _TOL_ARCCOS
angle = np.where(
condition,
2 * np.arccos(quat[..., 0], where=condition),
2 * np.arcsin(np.linalg.norm(quat[..., 1:], axis=-1), where=~condition))
return angle
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def quat_rotate(quat: types.QuatArray,
vec: types.PositionArray) -> types.PositionArray:
"""Rotate a vector by a unit quaternion.
Args:
quat: A unit quaternion [w, i, j, k]. The norm of this vector should be 1.
vec: A 3-vector representing a position.
Returns:
The rotated vector.
"""
vec = np.atleast_2d(vec)
qvec = np.hstack([np.zeros(vec.shape[0:-1] + (1,)), vec])
return quat_mul(quat_mul(quat, qvec), quat_conj(quat))[:, 1:].squeeze()
# LINT.ThenChange(_transformations_quat.py)
def quat_between_vectors(source_vec: types.PositionArray,
target_vec: types.PositionArray) -> types.QuatArray:
"""Returns the minimal quaternion that rotates `source_vec` to `target_vec`.
An explanation for the math can be found here (under Quaternion Result):
http://www.euclideanspace.com/maths/algebra/vectors/angleBetween/index.htm
The input vectors can be any non-zero vectors. The returned unit quaternion is
the shortest arc rotation from source_vec to target_vec.
Args:
source_vec: A 3-vector representing the source vector.
target_vec: A 3-vector representing the target vector.
Returns:
A quaternion rotation between source and target vectors, such that
quat_rotate(quat, source_vec) == target_vec.
"""
if np.linalg.norm(source_vec) == 0 or np.linalg.norm(target_vec) == 0:
raise ValueError('Source or target vector is a 0 vector; cannot compute '
'rotation for a vector with no direction.')
dot_product_normalized = np.dot(source_vec / np.linalg.norm(source_vec),
target_vec / np.linalg.norm(target_vec))
# check if source and target vectors are parallel with same direction
if dot_product_normalized > _POLE_LIMIT:
# return identity rotation
return _IDENTITY_QUATERNION
# check if source and target vectors are parallel with opposite direction
elif dot_product_normalized < -_POLE_LIMIT:
# In this case we need to return a 180 degree rotation around any vector
# that is orthogonal to source_vec.
# To compute the orthogonal vector, we can take the cross product of the
# source vector and any other vector that is nonparallel to source_vec.
# To find a nonparallel vector, we can take these 3 unit vectors and find
# which one has the smallest dot product.
unit_vectors = np.eye(3)
min_dotproduct = np.argmin(np.dot(unit_vectors, source_vec))
nonparallel_vector = unit_vectors[min_dotproduct]
# Compute the orthogonal vector.
orthogonal_vector = np.cross(source_vec, nonparallel_vector)
# Return the 180 degree rotation around this orthogonal vector.
return np.concatenate(
([0], orthogonal_vector / np.linalg.norm(orthogonal_vector)))
# compute the i, j, k terms of the quaternion
ijk = np.cross(source_vec, target_vec)
real = np.linalg.norm(source_vec) * np.linalg.norm(target_vec) + np.dot(
source_vec, target_vec)
q_rotation = np.concatenate(([real], ijk))
return q_rotation / np.linalg.norm(q_rotation)
# LINT.IfChange
def quat_slerp(quat0: types.QuatArray, quat1: types.QuatArray,
fraction: float) -> types.QuatArray:
"""Return spherical linear interpolation between two unit quaternions.
Equivalent to:
quat_mul(
quat0, quat_exp(quat_log(quat_diff_passive(quat0, quat1)) * fraction)
)
Args:
quat0: A unit quaternion [w, i, j, k].
quat1: A unit quaternion [w, i, j, k].
fraction: Scalar between 0.0 and 1.0.
Returns:
A unit quaternion `fraction` of the way from quat0 to quat1.
Raises:
ValueError: If invalid fraction passed.
"""
q0 = np.array(quat0)
q1 = np.array(quat1)
if fraction < 0.0 or fraction > 1.0:
raise ValueError('fraction must be between 0 and 1 (inclusive)')
if fraction < _TOL:
return q0
elif fraction > 1.0 - _TOL:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < _TOL:
return q0
if d < 0.0:
# If the dot product is negative, slerp won't take the shorter path.
# Note that v1 and -v1 are equivalent when the negation is applied to all
# four components. Fix by reversing one quaternion.
d = -d
q1 *= -1.0
angle = np.arccos(_clip_within_precision(d, -1., 1.))
if abs(angle) < _TOL:
return q0
isin = 1.0 / np.sin(angle)
s0 = np.sin((1.0 - fraction) * angle) * isin
s1 = np.sin(fraction * angle) * isin
interp_quat = q0 * s0 + q1 * s1
return interp_quat / np.linalg.norm(interp_quat)
# LINT.ThenChange(_transformations_quat.py)
def quat_axis(quat: types.QuatArray) -> types.AxisAngleArray:
"""Returns the rotation axis of the corresponding quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A unit quaternion [w, i, j, k].
Returns:
axisangle: A 3x1 normalized numpy array describing the axis of rotation.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
return quat[..., 1:4] / np.linalg.norm(quat[..., 1:4], axis=-1, keepdims=True)
def quat_to_axisangle(quat: types.QuatArray) -> types.AxisAngleArray:
"""Returns the axis-angle corresponding to the provided quaternion.
Args:
quat: A unit quaternion [w, i, j, k].
Returns:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
# Ensure the quaternion is positive leading to get the shortest angle.
quat = positive_leading_quat(quat)
angle = quat_angle(quat)
if angle < _TOL:
return np.zeros(3)
else:
axis = quat_axis(quat)
return axis * angle
def quat_to_euler(quat: types.QuatArray,
ordering: str = 'XYZ') -> types.SomeEulerArray:
"""Returns the euler angles corresponding to the provided quaternion.
Args:
quat: A unit quaternion [w, i, j, k].
ordering: Desired euler angle ordering.
Returns:
euler_vec: The euler angle rotations.
"""
mat = quat_to_mat(quat)
return rmat_to_euler(mat[0:3, 0:3], ordering=ordering)
# LINT.IfChange
def quat_to_mat(quat: types.QuatArray) -> types.HomogeneousMatrix:
"""Return homogeneous rotation matrix from quaternion.
Args:
quat: A unit quaternion [w, i, j, k].
Returns:
A 4x4 homogeneous matrix with the rotation corresponding to `quat`.
"""
q = np.array(quat, dtype=np.float64, copy=True)
nq = np.dot(q, q)
if nq < _TOL:
return np.identity(4)
q *= np.sqrt(2.0 / nq)
q = np.outer(q, q)
return np.array(
((1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0),
(q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0),
(q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0),
(0.0, 0.0, 0.0, 1.0)),
dtype=np.float64)
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def pos_quat_to_hmat(pos: types.PositionArray,
quat: types.QuatArray) -> types.HomogeneousMatrix:
"""Returns a 4x4 Homogeneous transform for the given configuration.
Args:
pos: A cartesian position [x, y, z].
quat: A unit quaternion [w, i, j, k].
Returns:
A 4x4 Homogenous transform as a numpy array.
"""
hmat = quat_to_mat(quat)
hmat[:3, 3] = pos
return hmat
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def integrate_quat(quat: types.QuatArray,
vel: types.AngVelArray) -> types.QuatArray:
"""Integrates the unit quaternion by the given angular velocity.
For information on this operation see:
https://www.ashwinnarayan.com/post/how-to-integrate-quaternions/
Args:
quat: A unit quaternion [w, i, j, k] to integrate.
vel: The 3D angular velocity used to integrate the orientation. It is
assumed that the angular velocity is given in the same frame as the
quaternion and it has been properly scaled by the timestep over which the
integration is done. In particular the velocity should NOT be given in the
frame of the rotating object.
Returns:
The normalized integrated quaternion.
"""
vel = np.concatenate(([0], vel))
quat = quat + 0.5 * quat_mul(vel, quat)
return quat / np.linalg.norm(quat)
# LINT.ThenChange(_transformations_quat.py)
def rotation_x_axis(
theta: np.ndarray,
full: bool = False) -> Union[types.RotationMatrix, types.HomogeneousMatrix]:
"""Returns a rotation matrix of a rotation about the X-axis.
Supports vector-valued theta, in which case the returned array is of shape
(len(t), 3, 3), or (len(t), 4, 4) if full=True. If theta is scalar the batch
dimension is squeezed out.
Args:
theta: The rotation amount.
full: If true, returns a full 4x4 transfom.
"""
n, ct, st, rmat = _axis_rotation(theta, full)
rmat[:, 0, 0:3] = np.array([[1, 0, 0]])
rmat[:, 1, 0:3] = np.vstack([np.zeros(n), ct, -st]).T
rmat[:, 2, 0:3] = np.vstack([np.zeros(n), st, ct]).T
return rmat.squeeze()
def rotation_y_axis(
theta: np.ndarray,
full: bool = False) -> Union[types.RotationMatrix, types.HomogeneousMatrix]:
"""Returns a rotation matrix of a rotation about the Y-axis.
Supports vector-valued theta, in which case the returned array is of shape
(len(t), 3, 3), or (len(t), 4, 4) if full=True. If theta is scalar the batch
dimension is squeezed out.
Args:
theta: The rotation amount.
full: If true, returns a full 4x4 transfom.
"""
n, ct, st, rmat = _axis_rotation(theta, full)
rmat[:, 0, 0:3] = np.vstack([ct, np.zeros(n), st]).T
rmat[:, 1, 0:3] = np.array([[0, 1, 0]])
rmat[:, 2, 0:3] = np.vstack([-st, np.zeros(n), ct]).T
return rmat.squeeze()
def rotation_z_axis(
theta: np.ndarray,
full: bool = False) -> Union[types.RotationMatrix, types.HomogeneousMatrix]:
"""Returns a rotation matrix of a rotation about the z-axis.
Supports vector-valued theta, in which case the returned array is of shape
(len(t), 3, 3), or (len(t), 4, 4) if full=True. If theta is scalar the batch
dimension is squeezed out.
Args:
theta: The rotation amount.
full: If true, returns a full 4x4 transfom.
"""
n, ct, st, rmat = _axis_rotation(theta, full)
rmat[:, 0, 0:3] = np.vstack([ct, -st, np.zeros(n)]).T
rmat[:, 1, 0:3] = np.vstack([st, ct, np.zeros(n)]).T
rmat[:, 2, 0:3] = np.array([[0, 0, 1]])
return rmat.squeeze()
def rmat_to_axisangle(rmat: types.RotationMatrix) -> types.AxisAngleArray:
"""Returns exponential coordinates (w * theta) for the given rotation matrix.
See Murray1994: A Mathematical Introduction to Robotic Manipulation
Args:
rmat: a 3x3 numpy array describing the rotation.
Returns:
A 3D numpy unit-vector describing the axis of rotation, scaled by the angle
required to rotate about this axis to achieve `rmat`.
"""
theta = np.arccos(
_clip_within_precision((np.trace(rmat) - 1) / 2, -1., 1.))
if np.allclose(theta, 0):
return np.zeros(3)
w = 1./np.sin(theta) * np.array([
rmat[2, 1] - rmat[1, 2],
rmat[0, 2] - rmat[2, 0],
rmat[1, 0] - rmat[0, 1]])
wnorm = np.linalg.norm(w)
if np.allclose(wnorm, 0.):
# rotation matrix is symmetric, fall back to eigen-decomposition
w, v = np.linalg.eig(rmat)
i = np.where(abs(np.real(w) - 1.0) < _TOL)[0][0] # index of eigenvalue=1
return np.real(v[:, i]) * theta
else:
wnormed = w / np.linalg.norm(w)
return wnormed * theta
def rmat_to_euler(rmat: types.RotationMatrix,
ordering: str = 'ZXZ') -> types.SomeEulerArray:
"""Returns the euler angles corresponding to the provided rotation matrix.
Args:
rmat: The rotation matrix.
ordering: (str) Desired euler angle ordering.
Returns:
Euler angles corresponding to the provided rotation matrix.
"""
return _eulermap[ordering](rmat)
def rmat_to_rot6(rmat: types.RotationMatrix) -> np.ndarray:
"""Projects rotation matrix to 6-dim "Gram-Schmidt-able" representation.
The "rot6" representation is a 6-DOF representation of an orientation that is
homeomorphic with SO(3). It is not minimal like an euler or axis-angle, but
it is smooth over the full range of rotations, unlike eulers, quaternions, and
axis-angle representations. See the original paper for details:
"On the Continuity of Rotation Representations in Neural Networks"
https://arxiv.org/pdf/1812.07035.pdf
Args:
rmat: A 3x3 rotation matrix, or larger rank-2 matrix containing a 3x3
rotation matrix in the leading 3-dimensions (e.g. a homogeneous 4x4).
Returns:
A 6-dimensional array containing the first two columns of `rmat`. This
representation can be mapped back to `rmat` using `rot6_to_rmat`.
"""
return rmat[:3, :2].T.flatten() # concatenate the first 2 columns of `rmat`.
# LINT.IfChange
def mat_to_quat(
mat: Union[types.RotationMatrix,
types.HomogeneousMatrix]) -> types.QuatArray:
"""Return quaternion from homogeneous or rotation matrix.
Args:
mat: A homogeneous transform or rotation matrix
Returns:
A quaternion [w, i, j, k].
"""
if mat.shape == (3, 3):
tmp = np.eye(4)
tmp[0:3, 0:3] = mat
mat = tmp
q = np.empty((4,), dtype=np.float64)
t = np.trace(mat)
if t > mat[3, 3]:
q[0] = t
q[3] = mat[1, 0] - mat[0, 1]
q[2] = mat[0, 2] - mat[2, 0]
q[1] = mat[2, 1] - mat[1, 2]
else:
i, j, k = 0, 1, 2
if mat[1, 1] > mat[0, 0]:
i, j, k = 1, 2, 0
if mat[2, 2] > mat[i, i]:
i, j, k = 2, 0, 1
t = mat[i, i] - (mat[j, j] + mat[k, k]) + mat[3, 3]
q[i + 1] = t
q[j + 1] = mat[i, j] + mat[j, i]
q[k + 1] = mat[k, i] + mat[i, k]
q[0] = mat[k, j] - mat[j, k]
q *= 0.5 / np.sqrt(t * mat[3, 3])
return q
# LINT.ThenChange(_transformations_quat.py)
# LINT.IfChange
def hmat_to_pos_quat(
hmat: types.HomogeneousMatrix
) -> Tuple[types.PositionArray, types.QuatArray]:
"""Return a cartesian position and quaternion from a homogeneous matrix.
Args:
hmat: A homogeneous transform or rotation matrix
Returns:
A tuple containing:
- A cartesian position [x, y, z].
- A quaternion [w, i, j, k].
"""
return hmat[:3, 3], mat_to_quat(hmat)
# LINT.ThenChange(_transformations_quat.py)
def rot6_to_rmat(rot6: np.ndarray) -> np.ndarray:
"""Maps a 6-dim "Gram-Schmidt-able" representation back to a rotation matrix.
The "rot6" representation is a 6-DOF representation of an orientation that is
homeomorphic with SO(3). It is not minimal like an euler or axis-angle, but
it is smooth over the full range of rotations, unlike eulers, quaternions, and
axis-angle representations. See the original paper for details:
"On the Continuity of Rotation Representations in Neural Networks"
https://arxiv.org/pdf/1812.07035.pdf
Args:
rot6: An arbitrary 6-dimensional array representing a rotation. This
representation can be obtained from an `rmat` using `rmat_to_rot6`.
Returns:
A rotation matrix obtained by normalizing and orthogonalizing the contents
of `rot6`.
"""
xu = rot6[0:3]
yu = rot6[3:6]
tol = 1e-6 # Tolerace below which the rot6 is replaced by a canonical basis.
eps = 1e-5 # Safety factor to avoid zero case.
def safe_interp(v: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Safe interpolation between input vector and a default basis."""
# This function interpolates `v` and `b` a function of the norm of `v`.
# The interpolation has a critical value in which the interpolation can be
# zero if `v = -lambda * b`. We handle this by "jumping" over this value
# for `alpha` with safety-factor `epsilon`. Achieved by defining a function
# for alpha which grows from 0 to `crit - epsilon` over the range [0, tol].
norm = np.linalg.norm(v)
# Critical value for `v = -lambda * b` case, considering only projection
# along `b` (if there are off-axis components then the is no failure mode).
crit = (1 - eps) / (1 - v @ b)
if norm < tol:
alpha = crit * norm / tol
else:
alpha = 1.
return alpha * v + (1 - alpha) * b
# Interpolate `xu` and `yu` if they're close to zero.
xu = safe_interp(xu, np.array([1, 0, 0], dtype=xu.dtype))
yu = safe_interp(yu, np.array([0, 1, 0], dtype=yu.dtype))
# If xu and yu are parallel, add arbitrary offset to allow orthogonalization.
if np.allclose(np.cross(xu, yu), 0., atol=tol):
yu = np.array([0, 1, 0], dtype=yu.dtype) + yu
# Rotation matrix obtained by orthogonalizing and normalizing.
xn = xu / np.linalg.norm(xu)
zu = np.cross(xn, yu)
zn = zu / np.linalg.norm(zu)
yn = np.cross(zn, xn)
return np.stack([xn, yn, zn], axis=1)
def hmat_inv(hmat: types.HomogeneousMatrix) -> types.HomogeneousMatrix:
"""Numerically stable inverse of homogeneous transform."""
rot = hmat[0:3, 0:3]
pos = hmat[0:3, 3]
hinv = np.eye(4)
hinv[0:3, 3] = rot.T.dot(-pos)
hinv[0:3, 0:3] = rot.T
return hinv
def hmat_to_poseuler(ht: types.HomogeneousMatrix, ordering: str) -> np.ndarray:
"""Returns a configuration vector for the given homogeneous transform.
Args:
ht: The homogeneous transform.
ordering: Desired euler angle ordering.
Returns:
A 6x1 configuration vector containing the x,y,z position and r1, r2, r3
euler-angles from the provided homogeneous transform ht, following the given
rotation ordering.
"""
return np.hstack([ht[0:3, 3], rmat_to_euler(ht[0:3, 0:3], ordering)])
def hmat_to_twist(ht: types.HomogeneousMatrix) -> np.ndarray:
"""Returns the exponential coordinates for the homogeneous transform H.
See Murray1994: A Mathematical Introduction to Robotic Manipulation
Lynch & Park 2017: Modern Robotics: Mechanics, Planning, and Control
Args:
ht: A 4x4 numpy array containing a homogeneous transform.
Returns:
A 6-vector representing the instantaneous velocity and normalized axis of
rotation, scaled by the magnitude of the twist. Intuitively, if this twist
is integrated for unit time (by `twist_to_hmat`) it will recover `ht`.
"""
r = ht[0:3, 0:3]
p = ht[0:3, 3]
if np.allclose(r, np.eye(3), atol=1e-6):
wn = np.zeros(3)
v = p
theta = 1.
else:
w = rmat_to_axisangle(r)
theta = np.linalg.norm(w)
wn = w/theta
s = cross_mat_from_vec3(wn)
wn2d = np.atleast_2d(wn).T
a = (np.eye(3) - r).dot(s) + wn2d.dot(wn2d.T) * theta
v = np.linalg.pinv(a).dot(p)
xi = np.hstack([v, wn])
return xi * theta
def pos_to_hmat(pos: types.PositionArray) -> types.HomogeneousMatrix:
"""Returns homogeneous translation matrix for the given position.
Args:
pos: 1-dim position vector, or 2-dim tensor of positions with batch in
leading dimension.
"""
pos = np.atleast_2d(pos)
hmat = np.zeros((pos.shape[0], 4, 4))
hmat[:, np.arange(4), np.arange(4)] = 1
hmat[:, 0:3, 3] = pos
return hmat.squeeze()
def rmat_to_hmat(rmat: types.RotationMatrix) -> types.HomogeneousMatrix:
"""Returns homogeneous translation matrix for the given rotation matrix.
Args:
rmat: 2-dim rotation matrix, or 3-dim tensor of matrices with batch in
leading dimension.
"""
if rmat.ndim == 2:
rmat = np.expand_dims(rmat, 0)
hmat = np.zeros((rmat.shape[0], 4, 4))
hmat[:, :3, :3] = rmat
hmat[:, 3, 3] = 1.
return hmat.squeeze()
def poseuler_to_hmat(pe: np.ndarray, ordering: str) -> types.HomogeneousMatrix:
"""Returns a 4x4 Homogeneous transform for the given configuration.
Args:
pe: position (x, y, z) and euler angles (r1, r2, r3) following the
order specified in ordering (e.g. "XYZ").
ordering: The ordering of euler angles in the configuration array.
Returns:
A 4x4 Homogenous transform as a numpy array.
"""
pos = pe[0:3]
euler_vec = pe[3:]
hmat = euler_to_rmat(euler_vec, ordering, full=True)
hmat[0:3, 3] = pos
return hmat
def velocity_transform(ht: types.HomogeneousMatrix,
vel: Optional[types.ArrayLike] = None) -> np.ndarray:
"""Returns a 6x6 matrix for mapping velocities to the defined frame.
If R is the rotation part of ht, and p the translation, and v the linear
component of the twist and w the angular, this function computes the following
matrix operator:
[R, (p+)R][v]
[0, R ][w]
Where "x" is cross-product, and "p+" is the 3x3 cross-product operator for
3-vector p.
Usage: recall that v is interpreted as the velocity of a point attached to the
origin of some frame A. We can use velocity_transform to determine the
equivalent velocity at a point in frame B relative to A using H_B_A, the
transform from A to B (i.e. the pose of A in frame B). E.g. to compute the
velocity v_orig of the origin at another point in the body frame, we use:
v_pt = velocity_transform(H_point_origin, v_orig)
Where H_point_origin defines the transform from the origin to target point.
Args:
ht: A transform to the frame the target frame.
vel: If provided, return the transformed velocity, else the full 6x6
transform.
Returns:
A 6x6 matrix for mapping velocities, as 6d twists (vx,vy,vz,wx,wy,wz) to the
frame defined in the homogeneous transform ht.
"""
r = ht[0:3, 0:3]
p = ht[0:3, 3]
pcross = cross_mat_from_vec3(p)
tv = np.vstack([np.hstack([r, pcross.dot(r)]),
np.hstack([np.zeros((3, 3)), r])])
if vel is None:
return tv
else:
return tv.dot(vel)
def twist_to_hmat(xi: np.ndarray) -> types.HomogeneousMatrix:
"""Returns homogeneous transform from exponential coordinates xi=[w, v],theta.
The magnitude of the angle is encoded in the length of w if w is nonzero, else
in the magnitude of v.
See Murray 1994: A Mathematical Introduction to Robotic Manipulation or
Lynch & Park 2017: Modern Robotics: Mechanics, Planning, and Control
Args:
xi: A 6-vector containing:
v - 3-vector representing the instantaneous velocity.
w - 3-vector representing the axis of rotation.
Scaled by the magnitude of the rotation.
Returns:
H: A 4x4 numpy array containing a homogeneous transform.
"""
v = xi[0:3]
w = xi[3:6]
ht = np.eye(4)
if np.allclose(w, 0):
r = np.eye(3)
p = v # assume already scaled by theta
else:
w = xi[3:6]
theta = np.linalg.norm(w)
wn = w/theta
vn = v/theta
s = cross_mat_from_vec3(wn)
r = np.eye(3) + s * np.sin(theta) + s.dot(s) * (1-np.cos(theta))
p = (np.eye(3) - r).dot(s.dot(vn)) + wn * (wn.T.dot(vn)) * theta
ht[0:3, 0:3] = r
ht[0:3, 3] = p
return ht
#########################
# Control-Support Utils #
#########################
def force_transform(ht: types.HomogeneousMatrix,
wrench: Optional[types.ArrayLike] = None) -> np.ndarray:
"""Returns a 6x6 matrix for mapping forces as 6D wrenches.
If R is the rotation part of H, and p the translation, and f the linear
component of the wrench and t the angular, this function computes the
following matrix operator:
[R, 0][f]
[(p+)R, R][t]
Where x is cross-product, and p+ is the 3x3 cross-product operator for
the 3-vector p.
Args:
ht: A transform to the frame the target frame.
wrench: If provided, return the transformed wrench, else the full 6x6
transform.
Returns:
A 6x6 transform matrix.
"""
r = ht[0:3, 0:3]
p = ht[0:3, 3]
pcross = cross_mat_from_vec3(p)
tw = np.vstack([np.hstack([r, np.zeros((3, 3))]),
np.hstack([pcross.dot(r), r])])
if wrench is None:
return tw
else:
return tw.dot(wrench)
def rotate_vec6(mat: Union[types.RotationMatrix, types.HomogeneousMatrix],
vec6: np.ndarray) -> np.ndarray:
"""Returns a rotated 6-vector based on rotation component of mat.
Args:
mat: A homogeneous transform or rotation matrix.
vec6: A 6-vector to rotate, e.g. twist, wrench, accel, etc.
"""
rmat = mat[0:3, 0:3]
rvec = np.zeros(6)
rvec[0:3] = rmat.dot(vec6[0:3])
rvec[3:6] = rmat.dot(vec6[3:6])
return rvec
def integrate_hmat(
hmat: types.HomogeneousMatrix,
twist: types.Twist,
rotate_twist_to_hmat: bool = True) -> types.HomogeneousMatrix:
"""Integrates hmat by the given twist.
This function is useful for driving a position reference around using a
velocity signal, e.g. a spacenav or joystick.
Args:
hmat: The homogeneous transform to integrate.
twist: A 6-dof twist containing the linear velocity and angular velocity
axis. If the angular velocity is nonzero, the norm of this axis is
interpreted as the angle to integrate both components over. Otherwise
the magnitude of the linear component is used. See `twist_to_hmat`.
rotate_twist_to_hmat: If True, rotate twist into the hmat frame (assumes
twist is defined in the same frame as hmat, e.g. world). Else interpret
twist as local to hmat.
Returns:
hmat_new: The resulting transform.
"""
if rotate_twist_to_hmat:
twist_local = rotate_vec6(hmat.T, twist)
else:
twist_local = twist
hmat_delta = twist_to_hmat(twist_local)
return hmat.dot(hmat_delta)
################
# 2D Functions #
################
def postheta_to_matrix_2d(pose: np.ndarray) -> types.HomogeneousMatrix2d:
"""Converts 2D pose vector (x, y, theta) to 2D homogeneous transform matrix.
Args:
pose: (np.array) Pose vector with x,y,theta elements.
Returns:
A 3x3 transform matrix.
"""
ct = np.cos(pose[2])
st = np.sin(pose[2])
return np.array([
[ct, -st, pose[0]],
[st, ct, pose[1]],
[0., 0., 1.]
])
def matrix_to_postheta_2d(mat: types.HomogeneousMatrix2d) -> np.ndarray:
"""Converts 2D homogeneous transform matrix to a 2D pose vector (x, y, theta).
Args:
mat: (np.array) 3x3 transform matrix.
Returns:
An x,y,theta 2D pose.
"""
return np.array([mat[0, 2], mat[1, 2], np.arctan2(mat[1, 0], mat[0, 0])])
def rotation_matrix_2d(theta: float) -> types.RotationMatrix2d:
ct = np.cos(theta)
st = np.sin(theta)
return np.array([
[ct, -st],
[st, ct]
])
def cross_2d(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Performs a 2D cross product, returning a scalar or vector as appropriate.
Two vectors -> scalar
Vector and scalar -> vector
Scalar and vector -> vector
Args:
a: first argument (scalar or vector).
b: second argument (scalar or vector).
Returns:
A 2D cross product of the two given vectors.
Raises:
Exception: if the vector lengths are incompatible.
"""
va, vb = np.atleast_1d(a), np.atleast_1d(b)
l1, l2 = len(va), len(vb)
if l1 == 2 and l2 == 2:
# Perform the cross product on two vectors.
# In 2D this produces a scalar.
return va[0] * vb[1] - va[1] * vb[0]
elif l1 == 2 and l2 == 1:
# Perform the cross product on a vector and a scalar.
# In 2D this produces a vector.
return np.array([vb[0] * va[1], -vb[0] * va[0]])
elif l1 == 1 and l2 == 2:
# Perform the cross product on a scalar and a vector.
# In 2D this produces a vector.
return np.array([-va[0] * vb[1], va[0] * vb[0]])
else:
raise Exception('Unsupported argument vector lengths')
def velocity_transform_2d(ht: types.HomogeneousMatrix2d,
vel: Optional[types.ArrayLike] = None) -> np.ndarray:
"""Returns a matrix for mapping 2D velocities.
This is a 2-dimensional version of velocity_transform which expects a numpy
homogeneous transform and a numpy velocity array
Args:
ht: A 3x3 numpy homogeneous transform to the target frame.
vel: A numpy velocity vector (3x1 mini-twist). If provided, return the
transformed velocity. Else the full 3x3 transform operator which can be
used to transform velocities.
"""
r = ht[0:2, 0:2]
p = ht[0:2, 2]
# linear part is two columns of rotation and a column of cross product
tv = np.hstack([r, np.array([[p[1], -p[0]]]).T])
# angular part is identity b/c angular vel not affect by in-plane transform
tv = np.vstack([tv, np.array([0., 0., 1.])])
if vel is None:
return tv
else:
return tv.dot(vel)
def force_transform_2d(ht: types.HomogeneousMatrix2d,
force_torque: Optional[types.ArrayLike] = None):
"""Returns a matrix for mapping 2D forces.
This is a 2-dimensional version of force_transform which expects a numpy
homogeneous transform and a numpy force-torque array
Args:
ht: A 3x3 numpy homogeneous transform to the target frame.
force_torque: A numpy force-torque vector (3x1).
Returns:
A 3x3 transform matrix.
"""
# extract position and cos(theta) and sin(theta) from transform
x, y, ct, st = ht[0, 2], ht[1, 2], ht[0, 0], ht[1, 0]
# linear part is two columns of rotation and nothing ()
tv = np.hstack([ht[0:2, 0:2], np.zeros((2, 1))])
# angular part needs to compute: [-y, x].T * R * ft_xy + ft_theta
# i.e. angular part is two columns of cross product and one of identity
tv = np.vstack([tv, np.array([[x*st - y*ct, x*ct + y*st, 1]])])
if force_torque is None:
return tv
else:
return tv.dot(force_torque)
| dm_robotics-main | py/transformations/_transformations.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_robotics-main | py/transformations/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package building script."""
import setuptools
def _get_requirements(requirements_file): # pylint: disable=g-doc-args
"""Returns a list of dependencies for setup() from requirements.txt.
Currently a requirements.txt is being used to specify dependencies. In order
to avoid specifying it in two places, we're going to use that file as the
source of truth.
"""
with open(requirements_file) as f:
return [_parse_line(line) for line in f if line]
def _parse_line(s):
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split("#")
return requirement.strip()
setuptools.setup(
name="dm_robotics-transformations",
package_dir={"dm_robotics.transformations": ""},
packages=["dm_robotics.transformations"],
version="0.5.0",
license="Apache 2.0",
author="DeepMind",
description="A library for Rigid Body Transformations",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_robotics/tree/main/py/transformations",
setup_requires=["wheel >= 0.31.0"],
install_requires=_get_requirements("requirements.txt"),
extras_require={"quaternion": "numpy-quaternion == 2021.11.4.15.26.3"},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
],
zip_safe=True,
)
| dm_robotics-main | py/transformations/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rigid-body transformations including velocities and static forces."""
from typing import Tuple, Union
from dm_robotics.transformations import _types as types
import numpy as np
import quaternion
_TOL = 1e-10
_IDENTITY = quaternion.from_float_array([1, 0, 0, 0])
# Any functions in this file should always be drop-in replacements for functions
# in _transformations.py.
# LINT.IfChange
def axisangle_to_quat(axisangle: types.AxisAngleArray) -> types.QuatArray:
"""Returns the quaternion corresponding to the provided axis-angle vector.
Args:
axisangle: A [..,3] numpy array describing the axis of rotation, with angle
encoded by its length
Returns:
quat: A quaternion [w, i, j, k]
"""
quat = quaternion.from_rotation_vector(axisangle)
return quaternion.as_float_array(quat)
def quat_conj(quat: types.QuatArray) -> types.QuatArray:
"""Return conjugate of quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
A quaternion [w, -i, -j, -k] representing the inverse of the rotation
defined by `quat` (not assuming normalization).
"""
quat = quaternion.from_float_array(quat)
return quaternion.as_float_array(quat.conj())
def quat_inv(quat: types.QuatArray) -> types.QuatArray:
"""Return inverse of quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
A quaternion representing the inverse of the original rotation.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = quaternion.from_float_array(quat)
return quaternion.as_float_array(1.0 / quat)
def quat_mul(quat1: types.QuatArray, quat2: types.QuatArray) -> types.QuatArray:
"""Multiply quaternions.
This function supports inputs with or without leading batch dimensions.
Args:
quat1: A quaternion [w, i, j, k].
quat2: A quaternion [w, i, j, k].
Returns:
The quaternion product, aka hamiltonian product.
"""
quat1 = quaternion.from_float_array(quat1)
quat2 = quaternion.from_float_array(quat2)
return quaternion.as_float_array(quat1 * quat2)
def quat_log(quat: types.QuatArray, tol: float = _TOL) -> types.QuatArray:
"""Log of a quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
tol: numerical tolerance to prevent nan.
Returns:
4D array representing the log of `quat`. This is analogous to
`rmat_to_axisangle`.
"""
if tol == 0:
quat = quaternion.from_float_array(quat)
return quaternion.as_float_array(np.log(quat))
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
q_norm = np.linalg.norm(quat + tol, axis=-1, keepdims=True)
a = quat[..., 0:1]
v = np.stack([quat[..., 1], quat[..., 2], quat[..., 3]], axis=-1)
# Clip to 2*tol because we subtract it here
v_new = v / np.linalg.norm(
v + tol, axis=-1, keepdims=True) * np.arccos(a / q_norm)
return np.stack(
[np.log(q_norm[..., 0]), v_new[..., 0], v_new[..., 1], v_new[..., 2]],
axis=-1)
def quat_exp(quat: types.QuatArray, tol: float = _TOL) -> types.QuatArray:
"""Exp of a quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
tol: numerical tolerance to prevent nan.
Returns:
Exp of quaternion.
"""
if tol == 0:
quat = quaternion.from_float_array(quat)
return quaternion.as_float_array(np.exp(quat))
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
a = quat[..., 0:1]
v = np.stack([quat[..., 1], quat[..., 2], quat[..., 3]], axis=-1)
v_norm = np.linalg.norm(v + tol, axis=-1, keepdims=True)
v_new = np.exp(a) * v / v_norm * np.sin(v_norm)
a_new = np.exp(a) * np.cos(v_norm)
return np.stack([a_new[..., 0], v_new[..., 0], v_new[..., 1], v_new[..., 2]],
axis=-1)
def quat_dist(source: types.QuatArray, target: types.QuatArray) -> np.ndarray:
"""Computes distance between source and target quaternions.
This function supports inputs with or without leading batch dimensions.
Note: operates on unit quaternions
Args:
source: An array [...,4] of unit quaternions [w, i, j, k].
target: An array [...,4] of unit quaternions [w, i, j, k].
Returns:
The rotational distance from source to target in radians.
"""
source = quaternion.from_float_array(source)
target = quaternion.from_float_array(target)
return quaternion.rotation_intrinsic_distance(source, target)
def quat_angle(quat: types.QuatArray) -> np.ndarray:
"""Computes the angle of the rotation encoded by the unit quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A unit quaternion [w, i, j, k]. The norm of this vector should be 1.
Returns:
The angle in radians of the rotation encoded by the quaternion.
"""
quat = quaternion.from_float_array(quat)
return quaternion.rotation_intrinsic_distance(_IDENTITY, quat)
def quat_rotate(quat: types.QuatArray,
vec: types.PositionArray) -> types.PositionArray:
"""Rotate a vector by a unit quaternion.
Args:
quat: A unit quaternion [w, i, j, k]. The norm of this vector should be 1.
vec: A 3-vector representing a position.
Returns:
The rotated vector.
"""
quat = quaternion.from_float_array(quat)
vec = quaternion.from_vector_part(vec)
return quaternion.as_vector_part(quat * vec * quat.conj())
def quat_slerp(quat0: types.QuatArray, quat1: types.QuatArray,
fraction: float) -> types.QuatArray:
"""Return spherical linear interpolation between two unit quaternions.
Equivalent to:
quat_mul(
quat0, quat_exp(quat_log(quat_diff_passive(quat0, quat1)) * fraction)
)
Args:
quat0: A unit quaternion [w, i, j, k].
quat1: A unit quaternion [w, i, j, k].
fraction: Scalar between 0.0 and 1.0.
Returns:
A unit quaternion `fraction` of the way from quat0 to quat1.
Raises:
ValueError: If invalid fraction passed.
"""
quat0 = quaternion.from_float_array(quat0)
quat1 = quaternion.from_float_array(quat1)
quat = quaternion.slerp_evaluate(quat0, quat1, fraction)
return quaternion.as_float_array(quat)
def quat_to_mat(quat: types.QuatArray) -> types.HomogeneousMatrix:
"""Return homogeneous rotation matrix from quaternion.
Args:
quat: A unit quaternion [w, i, j, k].
Returns:
A 4x4 homogeneous matrix with the rotation corresponding to `quat`.
"""
quat = quaternion.from_float_array(quat)
mat = np.eye(4, dtype=np.float64)
mat[:3, :3] = quaternion.as_rotation_matrix(quat)
return mat
def pos_quat_to_hmat(pos: types.PositionArray,
quat: types.QuatArray) -> types.HomogeneousMatrix:
"""Returns a 4x4 Homogeneous transform for the given configuration.
Args:
pos: A cartesian position [x, y, z].
quat: A unit quaternion [w, i, j, k].
Returns:
A 4x4 Homogenous transform as a numpy array.
"""
hmat = quat_to_mat(quat)
hmat[:3, 3] = pos
return hmat
def integrate_quat(quat: types.QuatArray,
vel: types.AngVelArray) -> types.QuatArray:
"""Integrates the unit quaternion by the given angular velocity.
For information on this operation see:
https://www.ashwinnarayan.com/post/how-to-integrate-quaternions/
Args:
quat: A unit quaternion [w, i, j, k] to integrate.
vel: The 3D angular velocity used to integrate the orientation. It is
assumed that the angular velocity is given in the same frame as the
quaternion and it has been properly scaled by the timestep over which the
integration is done. In particular the velocity should NOT be given in the
frame of the rotating object.
Returns:
The normalized integrated quaternion.
"""
vel = np.concatenate(([0], vel))
quat = quat + 0.5 * quat_mul(vel, quat)
return quat / np.linalg.norm(quat)
def mat_to_quat(
mat: Union[types.RotationMatrix,
types.HomogeneousMatrix]) -> types.QuatArray:
"""Return quaternion from homogeneous or rotation matrix.
Args:
mat: A homogeneous transform or rotation matrix
Returns:
A quaternion [w, i, j, k].
"""
quat = quaternion.from_rotation_matrix(mat)
return quaternion.as_float_array(quat)
def hmat_to_pos_quat(
hmat: types.HomogeneousMatrix
) -> Tuple[types.PositionArray, types.QuatArray]:
"""Return a cartesian position and quaternion from a homogeneous matrix.
Args:
hmat: A homogeneous transform or rotation matrix
Returns:
A tuple containing:
- A cartesian position [x, y, z].
- A quaternion [w, i, j, k].
"""
return hmat[:3, 3], mat_to_quat(hmat)
# LINT.ThenChange(_transformations.py)
| dm_robotics-main | py/transformations/_transformations_quat.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A manual test runner that runs each test file in a separate process.
This is needed because there is global state in spec_utils used to switch off
validation after some time (for performance on real robots). This automatic
switch-off causes the validation test to fail because validation is switched
off when it comes to run, unless each test starts in a new process.
"""
import os
import subprocess
import sys
_MODULE = "dm_robotics.manipulation"
_EXCLUDED_PATHS = ["build", "./build", ".tox", "./.tox", "venv", "./venv"]
def test_file_paths(top_dir):
"""Yields the path to the test files in the given directory."""
def excluded_path(name):
return any(name.startswith(path) for path in _EXCLUDED_PATHS)
for dirpath, dirnames, filenames in os.walk(top_dir):
# do not search tox or other hidden directories:
remove_indexes = [
i for i, name in enumerate(dirnames) if excluded_path(name)
]
for index in reversed(remove_indexes):
del dirnames[index]
for filename in filenames:
if filename.endswith("test.py"):
yield os.path.join(dirpath, filename)
def module_name_from_file_path(pathname):
# dirname will be like: "./file.py", "./dir/file.py" or "./dir1/dir2/file.py"
# convert this to a module.name:
submodule_name = pathname.replace("./", "").replace("/", ".")[0:-3]
return _MODULE + "." + submodule_name
def run_test(test_module_name):
return subprocess.call([sys.executable, "-m", test_module_name]) == 0
if __name__ == "__main__":
dir_to_search = sys.argv[1]
success = True
for test_path in test_file_paths(dir_to_search):
module_name = module_name_from_file_path(test_path)
success &= run_test(module_name)
sys.exit(0 if success else 1)
| dm_robotics-main | py/manipulation/run_tests.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_robotics-main | py/manipulation/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package building script."""
import setuptools
def _get_requirements(requirements_file): # pylint: disable=g-doc-args
"""Returns a list of dependencies for setup() from requirements.txt.
Currently a requirements.txt is being used to specify dependencies. In order
to avoid specifying it in two places, we're going to use that file as the
source of truth.
Lines starting with -r will be ignored. If the requirements are split across
multiple files, call this function multiple times instead and sum the results.
"""
def line_should_be_included(line):
return line and not line.startswith("-r")
with open(requirements_file) as f:
return [_parse_line(line) for line in f if line_should_be_included(line)]
def _parse_line(s):
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split("#")
return requirement.strip()
setuptools.setup(
name="dm_robotics-manipulation",
package_dir={"dm_robotics.manipulation": ""},
packages=[
"dm_robotics.manipulation",
"dm_robotics.manipulation.props",
"dm_robotics.manipulation.props.parametric_object",
"dm_robotics.manipulation.props.parametric_object.rgb_objects",
"dm_robotics.manipulation.props.rgb_objects",
"dm_robotics.manipulation.props.utils",
"dm_robotics.manipulation.standard_cell",
],
version="0.5.0",
license="Apache 2.0",
author="DeepMind",
description="Parametrically defined mesh objects",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_robotics/tree/main/py/manipulation",
python_requires=">=3.7, <3.11",
setup_requires=["wheel >= 0.31.0"],
install_requires=(_get_requirements("requirements.txt") +
_get_requirements("requirements_external.txt")),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering",
],
zip_safe=True,
include_package_data=True,
)
| dm_robotics-main | py/manipulation/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_object.py."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.manipulation.props import mesh_object
import numpy as np
# Internal file import.
# Internal resources import.
_TEST_ASSETS_PATH = os.path.join(
os.path.dirname(__file__),
'utils/test_assets')
def _create_texture(img_size):
img = np.random.normal(loc=0, scale=1, size=img_size)
return img
def _create_texture_file(texture_filename):
# create custom texture and save it.
img_size = [4, 4]
texture = _create_texture(img_size=img_size)
with open(texture_filename, 'wb') as f:
f.write(texture)
class MeshObjectTest(parameterized.TestCase):
def test_create_object(self):
mesh_file = os.path.join(_TEST_ASSETS_PATH, 'octahedron.obj')
prop_name = 'p1'
prop = mesh_object.MeshProp(name=prop_name, visual_meshes=[mesh_file])
self.assertEqual(prop.name, prop_name)
self.assertEmpty(prop.textures)
def test_create_with_custom_texture(self):
mesh_file = os.path.join(_TEST_ASSETS_PATH, 'octahedron.obj')
out_dir = self.create_tempdir().full_path
texture_file = os.path.join(out_dir, 'tmp.png')
_create_texture_file(texture_file)
prop_name_1 = 'p1_custom_texture'
prop_1 = mesh_object.MeshProp(
name=prop_name_1, visual_meshes=[mesh_file], texture_file=texture_file)
self.assertEqual(prop_1.name, prop_name_1)
texture_1 = prop_1.textures[0]
self.assertNotEmpty(texture_1)
prop_name_2 = 'p2_custom_texture'
prop_2 = mesh_object.MeshProp(
name=prop_name_2, visual_meshes=[mesh_file], texture_file=texture_file)
self.assertEqual(prop_2.name, prop_name_2)
texture_2 = prop_2.textures[0]
self.assertNotEmpty(texture_2)
self.assertSequenceEqual(texture_1, texture_2)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/manipulation/props/mesh_object_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A prop originated in mesh files.
A "prop" is any object within an environment which the robot can manipulate. A
`MeshProp` allows users to use objects modeled in CAD within their Mujoco
simulations.
"""
import os
from typing import Optional, Sequence, Union
from dm_control import mjcf
from dm_robotics.manipulation.props.utils import mesh_formats_utils
from dm_robotics.moma import prop
# Internal file import.
# The default value of '1 1 1 1' has the effect of leaving the texture
# unchanged. Refer to http://www.mujoco.org/book/XMLreference.html#material .
DEFAULT_COLOR_RGBA = '1 1 1 1'
MIN_MASS = 0.001
_MUJOCO_SUPPORTED_MESH_TYPES = ('.stl', '.msh')
_MUJOCO_TEXTURE_TYPES = ('.png')
_DEFAULT_SIZE = 0.005
_DEFAULT_POS = 0
_DEFAULT_FRICTION = (0.5, 0.005, 0.0001)
class MeshProp(prop.Prop):
"""Represents an object originated in XML and meshes."""
def _build_meshes_from_list(self,
mesh_list: Union[Sequence[str],
Sequence[Sequence[float]]],
mesh_prefix: str = 'visual') -> int:
"""Creates mesh assets from mesh files.
Args:
mesh_list: list of mesh files or pre-loaded meshes.
mesh_prefix: prefix for asset names.
Returns:
Number of processed meshes.
"""
mesh_idx = 0
for mesh_source in mesh_list:
name = 'mesh_%s_%s_%02d' % (mesh_prefix, self.name, mesh_idx)
if isinstance(mesh_source, str):
_, extension = os.path.splitext(mesh_source)
if extension in _MUJOCO_SUPPORTED_MESH_TYPES:
with open(mesh_source, 'rb') as f:
self._mjcf_root.asset.add(
'mesh',
name=name,
scale=self._size,
file=mjcf.Asset(f.read(), extension))
mesh_idx += 1
elif extension == '.obj':
msh_strings = mesh_formats_utils.obj_file_to_mujoco_msh(mesh_source)
for msh_string in msh_strings:
self._mjcf_root.asset.add(
'mesh',
name=name,
scale=self._size,
file=mjcf.Asset(msh_string, '.msh'))
mesh_idx += 1
else:
raise ValueError(f'Unsupported object extension: {extension}')
else: # TODO(b/195733842): add tests.
meshes, faces = mesh_source
for vertices, face in zip(meshes, faces):
self._mjcf_root.asset.add(
'mesh', name=name, scale=self._size, vertex=vertices, face=face)
mesh_idx += 1
return mesh_idx
def _build(self,
visual_meshes: Sequence[str],
collision_meshes: Optional[Sequence[str]] = None,
texture_file: Optional[str] = None,
name: Optional[str] = 'mesh_object',
size: Optional[Sequence[float]] = None,
color: Optional[str] = None,
pos: Optional[Sequence[float]] = None,
masses: Optional[Sequence[float]] = None,
mjcf_model_export_dir: Optional[str] = None) -> None:
"""Creates mesh assets from mesh files.
Args:
visual_meshes: list of paths to mesh files for a single asset.
collision_meshes: list of mesh files to use as collision volumes.
texture_file: path to the texture file of the mesh.
name: name of the mesh in MuJoCo.
size: scaling value for the object size.
color: an RGBA color in `str` format (from MuJoCo, for example '1 0 0 1'
for red). A color will overwrite any object texture. `None` (default)
will either use the texture, if provided, or the default color defined
in DEFAULT_COLOR_RGBA.
pos: initial position of the mesh. If not set, defaults to the origin.
masses: masses of the mesh files.
mjcf_model_export_dir: directory path where to save the mjcf.model in MJCF
(XML) format.
"""
root = mjcf.element.RootElement(model=name)
root.worldbody.add('body', name='prop_root')
super()._build(name=name, mjcf_root=root, prop_root='prop_root')
collision_meshes = collision_meshes or visual_meshes
self._size = size or [_DEFAULT_SIZE] * 3
self._pos = pos or [_DEFAULT_POS] * 3
self._color_to_replace_texture = color
self._mjcf_model_export_dir = mjcf_model_export_dir
self._visual_mesh_count = self._build_meshes_from_list(
visual_meshes, mesh_prefix='visual')
self._collision_mesh_count = self._build_meshes_from_list(
collision_meshes, mesh_prefix='collision')
self._visual_dclass = self._mjcf_root.default.add(
'default', dclass='visual')
self._visual_dclass.geom.group = 1
self._visual_dclass.geom.conaffinity = 0
self._visual_dclass.geom.contype = 0
if not masses:
self._visual_dclass.geom.mass = MIN_MASS
self._visual_dclass.geom.rgba = (
DEFAULT_COLOR_RGBA if color is None else list(color))
self._collision_dclass = self.mjcf_model.default.add(
'default', dclass='collision')
self._collision_dclass.geom.group = 4
self._collision_dclass.geom.conaffinity = 1
self._collision_dclass.geom.contype = 1
self._collision_dclass.geom.solref = (.004, 1)
self._collision_dclass.geom.condim = 6
self._collision_dclass.geom.friction = _DEFAULT_FRICTION
if not masses:
self._collision_dclass.geom.mass = 0.2 / self._collision_mesh_count
self._masses = masses
self._bbox_coords_axisp = [[
-self._size[0] * 0.5, -self._size[0] * 0.5, -self._size[0] * 0.5
], [self._size[0] * 0.5, self._size[0] * 0.5, self._size[0] * 0.5]]
if texture_file:
with open(texture_file, 'rb') as f:
self._mjcf_root.asset.add(
'texture',
name='tex_object',
type='2d',
file=mjcf.Asset(f.read(), '.png'))
self._main_mat = self._mjcf_root.asset.add(
'material', name='mat_texture', texture='tex_object')
self._make_model()
def _add(self, kind, parent=None, **kwargs):
parent = parent or self._mjcf_root.worldbody
result = parent.add(kind, **kwargs)
return result
def _add_geom(self, parent=None, **kwargs):
return self._add(kind='geom', parent=parent, **kwargs)
def _make_model(self):
# make visual geoms
for i in range(self._visual_mesh_count):
geom_name = 'mesh_%s_%02d_visual' % (self.name, i)
mesh_ref = 'mesh_visual_%s_%02d' % (self.name, i)
if self._color_to_replace_texture:
self._add_geom( # 'color' is used for visual mesh.
name=geom_name,
type='mesh',
mesh=mesh_ref,
pos=self._pos,
dclass=self._visual_dclass,
rgba=self._color_to_replace_texture)
else: # textured material will be used instead of color.
self._add_geom(
name=geom_name,
type='mesh',
mesh=mesh_ref,
pos=self._pos,
dclass=self._visual_dclass,
material='mat_texture')
# make collision geoms
for i in range(self._collision_mesh_count):
geom = self._add_geom(
name='mesh_%s_%02d_collision' % (self.name, i),
type='mesh',
mesh='mesh_collision_%s_%02d' % (self.name, i),
pos=self._pos,
dclass=self._collision_dclass)
if self._masses:
geom.mass = self._masses[i]
if self._mjcf_model_export_dir:
mjcf.export_with_assets(self.mjcf_model, self._mjcf_model_export_dir)
@property
def color(self):
return self._visual_dclass.geom.rgba
@property
def textures(self) -> Sequence[bytes]:
# Extract textures from the object.
textures = []
assets = self.mjcf_model.get_assets()
for asset_name, asset_data in assets.items():
_, file_extension = os.path.splitext(asset_name)
if file_extension.lower() in _MUJOCO_TEXTURE_TYPES:
textures.append(asset_data)
return textures
| dm_robotics-main | py/manipulation/props/mesh_object.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage collections of props."""
import collections
from typing import Any, Callable, Union
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
VersionedSequence = collections.namedtuple('VersionedSequence',
['version', 'ids'])
class PropSetDict(dict):
"""A dictionary that supports a function evaluation on every key access.
Extends the standard dictionary to provide dynamic behaviour for object sets.
"""
def __getitem__(self, key: Any) -> VersionedSequence:
# The method is called during [] access.
# Provides a collection of prop names.
return self._evaluate(dict.__getitem__(self, key))
def __repr__(self) -> str:
return f'{type(self).__name__}({super().__repr__()})'
def get(self, key) -> VersionedSequence:
return self.__getitem__(key)
def values(self):
values = super().values()
return [self._evaluate(x) for x in values]
def items(self):
new_dict = {k: self._evaluate(v) for k, v in super().items()}
return new_dict.items()
def _evaluate(
self, sequence_or_function: Union[VersionedSequence,
Callable[[], VersionedSequence]]
) -> VersionedSequence:
"""Based on the type of an argument, execute different actions.
Supports static sequence containers or functions that create such. When the
argument is a contrainer, the function returns the argument "as is". In case
a callable is provided as an argument, it will be evaluated to create a
container.
Args:
sequence_or_function: A sequence or a function that creates a sequence.
Returns:
A versioned set of names.
"""
if isinstance(sequence_or_function, VersionedSequence):
return sequence_or_function
new_sequence = sequence_or_function()
return new_sequence
| dm_robotics-main | py/manipulation/props/object_collection.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""RGB Objects datasets.
RGB-object datasets are created by applying set of transformations to a cube.
First collection of objects contains 15 objects with assigned colors (red,
green or blue). Naming scheme reflects a color and a group, for example 'r1' is
a red object from group 1. All names are from a set (r, g, b) x (1, 2, 3, 5, 6)
when group 4 is missing from the training set. We also call them
"single-dimensional objects" as they have a single axis of deformation.
Real objects were 3D printed in specific colors following this name convention.
In addition, 5 object triplets have been defined from these objects, each one
containing 1 red, 1 green and 1 blue objects.
Second collection of objects have been generated starting from a seed object
and a set of maximally deformed objects r2, r3, r5, r6, r7, r8. All other
objects are generated by interpolations. 10 objects are inserted linearly
sampling in between the seed and 'rn': dn, fn, en, gn, hn, xn, ln, bn, mn, yn.
Other objects 'Omn' are added with the usual algebra Omn = (Om + On)//2.
A total of 152 objects are in the dataset.
Parameter bounds and specific values used in object creation can be found here
manipulation/props/parametric_object/rgb_objects/parametric_rgb_object.py
"""
import collections
import dataclasses
import enum
import functools
import os
import typing
from typing import Callable, Dict, Iterable, Optional, Sequence, Tuple
from dm_robotics.manipulation.props import mesh_object
from dm_robotics.manipulation.props import object_collection
from dm_robotics.manipulation.props.parametric_object.rgb_objects import parametric_rgb_object
from dm_robotics.manipulation.props.parametric_object.rgb_objects import rgb_object_names
import numpy as np
# Internal resources import.
if typing.TYPE_CHECKING:
from dm_robotics.manipulation.props.parametric_object import parametric_object
# Workaround for invalid undefined-variable error in PyLint 2.5.
_Singleton = object_collection.Singleton
PropsSetType = object_collection.VersionedSequence
@enum.unique
class PropsVersion(enum.Enum):
"""Supported revisions of the RGB-objects.
Several iterations of RGB-objects have been designed. Listed below are those
we support in simulation and for real robot manipulation.
"""
# placeholder line for pre-release internal version number.
RGB_OBJECTS_V1_3 = parametric_rgb_object.RgbVersion.v1_3
V1_3 = PropsVersion.RGB_OBJECTS_V1_3 # Shortened version name.
@dataclasses.dataclass
class PropsDatasetType:
"""Class to describe access to a mesh dataset."""
version: PropsVersion # RGB-object dataset version.
ids: Iterable[str] # List of object names.
mesh_paths: Iterable[str] # List of directories to fetch mesh files from.
scale: float # Mesh scaling factor.
# A function to get an object name from a mesh file name.
get_object_id_func: Callable[[str], str]
_RESOURCES_ROOT_DIR = (
os.path.join(os.path.dirname(__file__), 'assets'))
## RGB-objects definitions.
_RGB_OBJECTS_MESHES = [
os.path.join(_RESOURCES_ROOT_DIR, 'rgb_v1.3/meshes/test_triplets'),
os.path.join(_RESOURCES_ROOT_DIR, 'rgb_v1.3/meshes/train'),
os.path.join(_RESOURCES_ROOT_DIR, 'rgb_v1.3/meshes/heldout')
]
_RGB_OBJECTS_PARAMS = rgb_object_names.RgbObjectsNames(
parametric_rgb_object.RgbVersion.v1_3).nicknames
# Objects which name starts with 'd' letter are excluded from the dataset,
# They are sampled too closely in the parametric space to the seed object and
# so have little additional value.
_D_OBJECTS = [x for x in _RGB_OBJECTS_PARAMS if x.startswith('d')]
# s0 is the seed object which is also used in the test triplets.
RGB_OBJECTS_TEST_SET = sorted([
's0', 'r2', 'r3', 'r5', 'r6', 'b2', 'b3', 'b5', 'b6', 'g2', 'g3', 'g5', 'g6'
])
RGB_OBJECTS_FULL_SET = list((set(_RGB_OBJECTS_PARAMS) - set(_D_OBJECTS)).union(
set(RGB_OBJECTS_TEST_SET)))
# Held-out set consists of all objects with a single axis of deformation, e.g.
# 't2' etc.
_SINGLE_DEFORMATION_OBJECTS = [x for x in RGB_OBJECTS_FULL_SET if len(x) == 2]
# All single dimension objects are intended for a validataion.
RGB_OBJECTS_HELDOUT_SET = sorted(_SINGLE_DEFORMATION_OBJECTS)
RGB_OBJECTS_TRAIN_SET = list(
set(RGB_OBJECTS_FULL_SET) - set(RGB_OBJECTS_HELDOUT_SET) -
set(_D_OBJECTS) - set(RGB_OBJECTS_TEST_SET))
# Arbitrary single letters to use in object naming.
_DEFORMATION_VALUES = ['f', 'e', 'h', 'x', 'l', 'm', 'y', 'r', 'u', 'v']
# 1 and 4 are intentionally excluded.
DEFORMATION_HELDOUT_AXES = ['2', '3', '5', '6']
DEFORMATION_TRAIN_AXES = [
'23', '25', '26', '35', '36', '37', '38', '56', '57', '58', '67'
]
DEFORMATION_AXES = (DEFORMATION_TRAIN_AXES + DEFORMATION_HELDOUT_AXES)
def _define_deformation_axes() -> Dict[str, Iterable[str]]:
"""Defines object sets for each axis of deformation."""
rgb_objects_dim = {}
for a in DEFORMATION_AXES:
rgb_objects_dim[a] = []
for v in _DEFORMATION_VALUES:
obj_id = f'{v}{a}'
# There are excluded objects we need to check for.
if obj_id in RGB_OBJECTS_FULL_SET:
rgb_objects_dim[a].append(f'{v}{a}')
return rgb_objects_dim
RGB_OBJECTS_DIM = _define_deformation_axes()
# Filename format is <object_id>_<set of parameters>.<mesh file extension>
_RGB_OBJECTS_ID_FROM_FILE_FUNC = lambda filename: filename.split('_')[0]
RGB_OBJECTS_MESH_SCALE = 1.0
PROP_FEATURES: Dict[PropsVersion, PropsDatasetType] = {
V1_3:
PropsDatasetType(
version=V1_3,
ids=RGB_OBJECTS_FULL_SET,
mesh_paths=_RGB_OBJECTS_MESHES,
scale=RGB_OBJECTS_MESH_SCALE,
get_object_id_func=_RGB_OBJECTS_ID_FROM_FILE_FUNC),
}
DEFAULT_COLOR_SET: Dict[str, Sequence[int]] = {
'RED': [1, 0, 0, 1],
'GREEN': [0, 1, 0, 1],
'BLUE': [0, 0, 1, 1]
}
def random_triplet(
rgb_version: PropsVersion = V1_3,
id_list: Optional[Iterable[str]] = None,
id_list_red: Optional[Iterable[str]] = None,
id_list_green: Optional[Iterable[str]] = None,
id_list_blue: Optional[Iterable[str]] = None) -> PropsSetType:
"""Get a triplet of 3 randomly chosen props.
The function provides a distinct set of 3 prop names. The user can then use
each one of the names to instantinate `RgbObjectProp` object and provide the
desired color in the constructor.
Args:
rgb_version: RGB-Objects version.
id_list: A list of ids to restrict sampling of objects from.
id_list_red: A list of ids for the red object. It overrides id_list.
id_list_green: A list of ids for the green object. It overrides id_list.
id_list_blue: A list of ids for the blue object. It overrides id_list.
Returns:
Random triplet of prop names without replacement.
"""
if id_list:
for object_id in id_list:
if object_id not in PROP_FEATURES[rgb_version].ids:
raise ValueError(f'id_list includes {object_id} which is not part of '
f'{rgb_version}')
else:
id_list = PROP_FEATURES[rgb_version].ids
if not id_list_red:
id_list_red = id_list
if not id_list_green:
id_list_green = id_list
if not id_list_blue:
id_list_blue = id_list
# Placeholder line for version used pre-release.
prop_ids = [
np.random.choice(id_list_red, 1)[0],
np.random.choice(id_list_green, 1)[0],
np.random.choice(id_list_blue, 1)[0]
]
return PropsSetType(version=rgb_version, ids=prop_ids)
def fixed_random_triplet(rgb_version: PropsVersion = V1_3) -> PropsSetType:
"""Gets one of the predefined triplets randomly.
Args:
rgb_version: RGB-Objects version. Only v1.3 supported.
Returns:
Triplet of object names from a predefined set.
"""
if rgb_version == V1_3:
obj_triplet = np.random.choice(
[s for s in PROP_TRIPLETS_TEST if s.startswith('rgb_test_triplet')])
return PROP_TRIPLETS_TEST[obj_triplet]
else:
raise ValueError(
'Sampling predefined tiplets of objects is not implemented for %s' %
rgb_version.name)
def _define_blue_prop_triplets(
base_str='rgb_blue_dim',
id_list=None,
axes=tuple(DEFORMATION_AXES)) -> Dict[str, functools.partial]:
"""Defines object sets for each axis of deformation."""
blue_obj_triplets = {}
for a in axes:
# Blue objects according to axes of deformation. Red and green are
# random from the full set.
blue_obj_triplets[f'{base_str}{a}'] = functools.partial(
random_triplet,
rgb_version=V1_3,
id_list=id_list,
id_list_blue=RGB_OBJECTS_DIM[a])
return blue_obj_triplets
PROP_TRIPLETS_TEST: Dict[str, PropsSetType] = {
# Object groups as per 'Triplets v1.0':
# https://github.com/deepmind/dm_robotics/blob/main/py/manipulation/props/rgb_objects/README.md#rgb-objects--for-robotic-manipulation
'rgb_test_triplet1': PropsSetType(V1_3, ('r3', 's0', 'b2')),
'rgb_test_triplet2': PropsSetType(V1_3, ('r5', 'g2', 'b3')),
'rgb_test_triplet3': PropsSetType(V1_3, ('r6', 'g3', 'b5')),
'rgb_test_triplet4': PropsSetType(V1_3, ('s0', 'g5', 'b6')),
'rgb_test_triplet5': PropsSetType(V1_3, ('r2', 'g6', 's0')),
}
# Placeholder line for triplets used pre-release.
RANDOM_PROP_TRIPLETS_FUNCTIONS = object_collection.PropSetDict({
# Return changing triplets on every access.
'rgb_train_random':
functools.partial(
random_triplet, rgb_version=V1_3, id_list=RGB_OBJECTS_TRAIN_SET),
'rgb_heldout_random':
functools.partial(
random_triplet, rgb_version=V1_3, id_list=RGB_OBJECTS_HELDOUT_SET),
'rgb_test_random': # Randomly loads one of the 5 test triplets.
functools.partial(fixed_random_triplet, rgb_version=V1_3),
# Placeholder line for test/train sets used pre-release.
})
PROP_TRIPLETS = object_collection.PropSetDict({
**PROP_TRIPLETS_TEST,
**RANDOM_PROP_TRIPLETS_FUNCTIONS,
})
def _get_all_meshes(rgb_version: PropsVersion = V1_3) -> Iterable[str]:
"""Get all mesh files from a list of directories."""
meshes = []
mesh_paths = PROP_FEATURES[rgb_version].mesh_paths
for path in mesh_paths:
for _, _, filenames in os.walk(path):
meshes.extend(
os.path.join(path, f)
for f in filenames
if f.endswith('.stl'))
return meshes
class RgbObjectParameters:
"""Parameters and bounds for RGB objects from version 1.0 and above.
Methods and variables have "generated" in their names to refer to the CAD
pipeline in which the mesh dataset was originated.
"""
supported_versions = PropsVersion
_dataset_generated_params = {} # contains CAD params for all RGB versions.
for ver in supported_versions:
_dataset_generated_params[ver] = rgb_object_names.RgbObjectsNames(
ver.value).nicknames # dictionary of CAD params per dataset version.
@classmethod
def min_max(
cls, rgb_version
) -> Tuple['collections.OrderedDict[str, rgb_object_names.ParametersDict]',
'collections.OrderedDict[str, rgb_object_names.ParametersDict]']:
"""Calculates min and max values for all dataset parameters."""
all_params = RgbObjectParameters._dataset_generated_params[rgb_version]
params_min = next(iter(all_params.values())).copy() # OrderedDict init.
params_max = next(iter(all_params.values())).copy() # OrderedDict init.
for param_dict in all_params.values():
for k, v in param_dict.items():
params_min[k] = min(params_min[k], v)
params_max[k] = max(params_max[k], v)
return (params_min, params_max)
def __init__(self,
rgb_version: PropsVersion = V1_3,
obj_id: Optional[str] = None):
self._rgb_version = rgb_version
if rgb_version not in self.supported_versions:
raise ValueError('Object %s. Version %s not supported. Supported: %s' %
(obj_id, rgb_version, self.supported_versions))
self._generated_params = RgbObjectParameters._dataset_generated_params[
rgb_version][obj_id]
self._parametric_object = parametric_rgb_object.RgbObject(rgb_version.value)
@property
def rgb_version(self):
return self._rgb_version
@property
def generated_params(self) -> 'parametric_object.ParametersDict':
"""Returns CAD params in {'sds': 4, 'shr': 0, ...} format."""
return self._generated_params
@property
def parametric_object(self) -> 'parametric_object.ParametricObject':
"""Returns parametric_rgb_object.RgbObject with object generation info.
Example how to validate parameters with parametric_object.ParametricObject
instance:
parametric_object = ... # get instantinated object.
params = {'sds': 4, 'shr': 0, ...} # get or create params.
parametric_object.shape.check_instance(params) # returns True/False
"""
return self._parametric_object
class RgbDataset(metaclass=_Singleton):
"""A single instance of a dataset exists per process."""
def __init__(self):
self._dataset_stl_paths = {i: None for i in PropsVersion}
def clear_cache(self):
self._dataset_stl_paths = {i: None for i in PropsVersion}
def __call__(self, rgb_version: PropsVersion = V1_3):
if not self._dataset_stl_paths[rgb_version]:
self._dataset_stl_paths[rgb_version] = _get_all_meshes(rgb_version)
return self._dataset_stl_paths[rgb_version]
class RgbObjectProp(mesh_object.MeshProp):
"""RGB-Object from meshes."""
def _build(self,
rgb_version: PropsVersion = V1_3,
obj_id: Optional[str] = None,
name: str = 'rgb_object',
size: Optional[Sequence[float]] = None,
color: Optional[str] = None,
pos: Optional[Sequence[float]] = None,
randomize_size: bool = False,
mjcf_model_export_dir: Optional[str] = None):
stl_paths = RgbDataset()(rgb_version)
if size is None:
size = [PROP_FEATURES[rgb_version].scale] * 3
if randomize_size:
# Size of the prop is randomly scaled by [0.8, 1.1]
# of the initial value. Size randomization encourages a more conservative
# agent behaviour.
size = np.array(size)
size *= np.random.random() * .3 + .8
size = list(size)
for path in stl_paths:
object_id_from_file = PROP_FEATURES[rgb_version].get_object_id_func(
os.path.basename(path))
if obj_id == object_id_from_file:
if rgb_version in RgbObjectParameters.supported_versions:
self._object_params = RgbObjectParameters(rgb_version, obj_id)
else:
self._object_params = None
return super()._build(
visual_meshes=[path],
name=name,
size=size,
color=color,
pos=pos,
mjcf_model_export_dir=mjcf_model_export_dir)
raise ValueError('Object ID %s does not exist in directories %s' %
(obj_id, PROP_FEATURES[rgb_version].mesh_paths))
@property
def object_params(self) -> Optional[RgbObjectParameters]:
"""None for unsupported versions."""
return self._object_params
class RandomRgbObjectProp(mesh_object.MeshProp):
"""Uniformly sample one object mesh from a mesh directory.
Currently, the picked random object could belong to any of train/test sets.
The planned open source version will provide random objects from the train set
only.
"""
def _build(self,
rgb_version: PropsVersion = V1_3,
name: str = 'rgb_object',
size: Optional[Sequence[float]] = None,
color: Optional[str] = None,
pos: Optional[Sequence[float]] = None,
mjcf_model_export_dir: Optional[str] = None):
mesh_dataset = RgbDataset()
stl_path = np.random.choice(mesh_dataset(rgb_version))
if size is None:
size = [PROP_FEATURES[rgb_version].scale] * 3
object_id_from_file = PROP_FEATURES[rgb_version].get_object_id_func(
os.path.basename(stl_path))
if rgb_version in RgbObjectParameters.supported_versions:
self._object_params = RgbObjectParameters(rgb_version,
object_id_from_file)
else:
self._object_params = None
return super()._build(
visual_meshes=[stl_path],
name=name,
size=size,
color=color,
pos=pos,
mjcf_model_export_dir=mjcf_model_export_dir)
@property
def object_params(self) -> Optional[RgbObjectParameters]:
"""None for unsupported versions."""
return self._object_params
| dm_robotics-main | py/manipulation/props/rgb_objects/rgb_object.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rgb_object.py."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.manipulation.props import object_collection
from dm_robotics.manipulation.props.rgb_objects import rgb_object
import numpy as np
class RgbObjectTest(parameterized.TestCase):
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3, 152))
def test_all_rgb_objects_creation(self, rgb_version, num_objects):
self.assertLen(rgb_object.PROP_FEATURES[rgb_version].ids, num_objects)
colors = list(rgb_object.DEFAULT_COLOR_SET.values())
for (i, obj_id) in enumerate(rgb_object.PROP_FEATURES[rgb_version].ids):
color = colors[i % len(colors)]
prop = rgb_object.RgbObjectProp(
rgb_version=rgb_version, obj_id=obj_id, name=obj_id, color=color)
self.assertEqual(prop.name, obj_id)
np.testing.assert_array_equal(prop.color, color)
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3))
def test_random_props_creation(self, rgb_version):
num_runs = 40
for i in range(num_runs):
obj_id = str(i)
prop = rgb_object.RandomRgbObjectProp(
rgb_version=rgb_version, name=obj_id)
self.assertEqual(prop.name, obj_id)
def test_triplets_creation(self):
for prop_triplet in rgb_object.PROP_TRIPLETS.values():
for obj_id in prop_triplet.ids:
prop = rgb_object.RgbObjectProp(
rgb_version=prop_triplet.version, obj_id=obj_id, name=obj_id)
self.assertEqual(prop.name, obj_id)
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3))
def test_dynamic_triplets_creation(self, rgb_version):
# Test on a new dictionary.
names = ["a1", "a2", "a3"]
(id_list_red, id_list_green, id_list_blue) = [[x] for x in names]
d = object_collection.PropSetDict({
"s1_tuple":
rgb_object.PropsSetType(rgb_version, tuple(names)),
"s2_list":
rgb_object.PropsSetType(rgb_version, list(names)),
"s3_dynamic":
functools.partial(
rgb_object.random_triplet,
rgb_version=rgb_object.V1_3,
id_list_red=id_list_red,
id_list_green=id_list_green,
id_list_blue=id_list_blue),
})
for v in d.values():
self.assertSequenceEqual(v.ids, names)
def test_random_prop_triplet(self):
for triplet in rgb_object.RANDOM_PROP_TRIPLETS_FUNCTIONS.values():
for obj_id in triplet.ids:
prop = rgb_object.RgbObjectProp(
rgb_version=triplet.version, obj_id=obj_id, name=obj_id)
self.assertEqual(prop.name, obj_id)
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3))
def test_random_triplet(self, rgb_version):
for _ in range(20):
prop_triplet = rgb_object.random_triplet(rgb_version).ids
self.assertIsInstance(prop_triplet, list)
self.assertLen(prop_triplet, 3)
for _ in range(10):
prop_triplet = rgb_object.random_triplet().ids
self.assertIsInstance(prop_triplet, list)
self.assertLen(prop_triplet, 3)
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3))
def test_generated_params(self, rgb_version):
for obj_id in rgb_object.PROP_FEATURES[rgb_version].ids:
prop = rgb_object.RgbObjectProp(rgb_version=rgb_version, obj_id=obj_id)
self.assertEqual(prop.object_params.rgb_version, rgb_version)
generated_params = prop.object_params.generated_params
generated_param_names = (
prop.object_params.parametric_object.shape.param_names)
self.assertEqual(len(generated_params), len(generated_param_names))
self.assertEqual(
list(generated_params.keys()), list(generated_param_names))
# Validity of shape bounds. 2 available methods.
self.assertTrue(
prop.object_params.parametric_object.shape.check_instance(
generated_params))
param_bounds_validator = prop.object_params.parametric_object.shape_bounds
self.assertTrue(param_bounds_validator(generated_params))
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3))
def test_min_max(self, rgb_version):
(params_min,
params_max) = rgb_object.RgbObjectParameters.min_max(rgb_version)
self.assertEqual(params_min["sds"], 4)
self.assertEqual(params_max["sds"], 10)
def test_random_set_id_not_in_list_error(self):
with self.assertRaisesWithLiteralMatch(
ValueError, "id_list includes g13 which is not part of "
"PropsVersion.RGB_OBJECTS_V1_3"):
_ = rgb_object.random_triplet(
rgb_version=rgb_object.V1_3, id_list=["x2", "m3", "g13"]).ids
def test_random_triplet_id_list(self):
for _ in range(20):
prop_triplet = rgb_object.random_triplet(
rgb_version=rgb_object.V1_3,
id_list_red=["x5"],
id_list_green=["y3"],
id_list_blue=["v23"],
).ids
self.assertEqual(set(prop_triplet), set(["x5", "y3", "v23"]))
for _ in range(20):
prop_triplet = rgb_object.random_triplet(
rgb_version=rgb_object.V1_3,
id_list_red=rgb_object.RGB_OBJECTS_DIM["6"],
id_list_green=rgb_object.RGB_OBJECTS_DIM["23"],
id_list_blue=rgb_object.RGB_OBJECTS_DIM["67"],
).ids
self.assertIn(prop_triplet[0], rgb_object.RGB_OBJECTS_DIM["6"])
self.assertIn(prop_triplet[1], rgb_object.RGB_OBJECTS_DIM["23"])
self.assertIn(prop_triplet[2], rgb_object.RGB_OBJECTS_DIM["67"])
for _ in range(20):
prop_triplet = rgb_object.random_triplet(
rgb_version=rgb_object.V1_3,
id_list=rgb_object.RGB_OBJECTS_DIM["3"],
).ids
self.assertIn(prop_triplet[0], rgb_object.RGB_OBJECTS_DIM["3"])
self.assertIn(prop_triplet[1], rgb_object.RGB_OBJECTS_DIM["3"])
self.assertIn(prop_triplet[2], rgb_object.RGB_OBJECTS_DIM["3"])
def test_random_triplet_id_list_only_blue(self):
outside_56_once = False
for _ in range(20):
prop_triplet = rgb_object.random_triplet(
rgb_version=rgb_object.V1_3,
id_list_blue=rgb_object.RGB_OBJECTS_DIM["56"],
).ids
if prop_triplet[0] not in rgb_object.RGB_OBJECTS_DIM["56"]:
outside_56_once = True
self.assertIn(prop_triplet[0], rgb_object.RGB_OBJECTS_FULL_SET)
self.assertIn(prop_triplet[1], rgb_object.RGB_OBJECTS_FULL_SET)
self.assertIn(prop_triplet[2], rgb_object.RGB_OBJECTS_DIM["56"])
# If the red object is always from dim 56, there is a bug.
self.assertTrue(outside_56_once)
def test_object_set_is_locked(self):
self.assertEqual(
set(rgb_object.RGB_OBJECTS_FULL_SET),
set([ # sorted alphabetically.
"b2", "b3", "b5", "b6",
"e2", "e23", "e25", "e26", "e3", "e35", "e36", "e37", "e38", "e5",
"e56", "e57", "e58", "e6", "e67",
"f2", "f23", "f26", "f3", "f35", "f36", "f37", "f38", "f5", "f56",
"f57", "f58", "f6", "f67",
"g2", "g3", "g5", "g6",
"h2", "h23", "h25", "h26", "h3", "h35", "h36", "h37", "h38", "h5",
"h56", "h57", "h58", "h6", "h67",
"l2", "l23", "l25", "l26", "l3", "l35", "l37", "l38", "l5", "l56",
"l57", "l58", "l6", "l67",
"m2", "m23", "m25", "m26", "m3", "m35", "m37", "m38", "m5", "m56",
"m57", "m58", "m6", "m67",
"r2", "r23", "r25", "r26", "r3", "r35", "r37", "r38", "r5", "r56",
"r57", "r58", "r6",
"s0",
"u2", "u23", "u25", "u26", "u3", "u35", "u36", "u37", "u38", "u5",
"u56", "u57", "u58", "u6", "u67",
"v2", "v23", "v25", "v26", "v3", "v35", "v37", "v38", "v5", "v56",
"v57", "v58", "v6", "v67",
"x2", "x23", "x25", "x26", "x3", "x35", "x36", "x37", "x38", "x5",
"x56", "x57", "x58", "x6", "x67",
"y2", "y23", "y25", "y26", "y3", "y35", "y37", "y38", "y5", "y56",
"y57", "y58", "y6", "y67"
]))
self.assertEqual(
set(rgb_object.RGB_OBJECTS_TRAIN_SET),
set([ # sorted alphabetically.
"e23", "e25", "e26", "e35", "e36", "e37", "e38", "e56", "e57",
"e58", "e67",
"f23", "f26", "f35", "f36", "f37", "f38", "f56", "f57", "f58",
"f67",
"h23", "h25", "h26", "h35", "h36", "h37", "h38", "h56", "h57",
"h58", "h67",
"l23", "l25", "l26", "l35", "l37", "l38", "l56", "l57", "l58",
"l67",
"m23", "m25", "m26", "m35", "m37", "m38", "m56", "m57", "m58",
"m67",
"r23", "r25", "r26", "r35", "r37", "r38", "r56", "r57", "r58",
"u23", "u25", "u26", "u35", "u36", "u37", "u38", "u56", "u57",
"u58", "u67",
"v23", "v25", "v26", "v35", "v37", "v38", "v56", "v57", "v58",
"v67",
"x23", "x25", "x26", "x35", "x36", "x37", "x38", "x56",
"x57", "x58", "x67",
"y23", "y25", "y26", "y35", "y37", "y38", "y56", "y57", "y58",
"y67",
]))
self.assertEqual(
set(rgb_object.RGB_OBJECTS_TEST_SET),
set([ # sorted alphabetically.
"b2", "b3", "b5", "b6", "g2", "g3", "g5", "g6", "r2", "r3",
"r5", "r6", "s0"
]))
self.assertEqual(
set(rgb_object.RGB_OBJECTS_TEST_SET),
set([ # sorted alphabetically.
"b2", "b3", "b5", "b6", "g2", "g3", "g5", "g6", "r2", "r3",
"r5", "r6", "s0",
]))
@parameterized.named_parameters(("rgb_v1_3", rgb_object.V1_3))
def test_dataset(self, rgb_version):
stl_paths = rgb_object.RgbDataset()(rgb_version)
self.assertNotEmpty(stl_paths)
if __name__ == "__main__":
absltest.main()
| dm_robotics-main | py/manipulation/props/rgb_objects/rgb_object_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that defines parametric objects.
The class ParametricObject is used to collect the object shape (ParametricShape)
and texture (ParametricTexture). Both shape and texture are represented as a
collection of parameters. Parameters live in a space which is the union of
bounding box conditions (e.g. m <= param_1 <= M); bounding boxes can also
be specified in terms of other paramers (e.g. m <= param_1 <= param_2).
"""
import collections
import enum
import logging
import re
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
@enum.unique
class ParametersUnits(enum.Enum):
MILLIMETER = 'mm'
CENTIMETER = 'cm'
METER = 'm'
DEGREE = 'deg'
RADIAN = 'rad'
ADIMENSIONAL = ''
@enum.unique
class ParametersTypes(enum.Enum):
INTEGER = int
FLOAT = float
class ParametersDict(collections.UserDict):
"""A dictionary that allows some arithmetic on dictionaries.
This class inherirs from a `UserDict` and overloads the operators `+`,
`-`, `*`, `//` to allow some arithmetic operations on dictionaries that
describe `ParametricProperties`. Additions and subtractions operate on
two dictionaries (`A` and `B`) key by key adding/subtracting the
corresponding values (e.g. `A-B`). Multiplications and divisions operate
with a scalar float or int (`s`) on a dictionary (`A`) from the right-hand
side (e.g. `A*s` or `A//s`) and return a dictionary where all values have
been multiplied or divided by the scalar. With multiplications and
divisions, parameters are casted according to their type as specified in
the constructor (default types are assumed int).
"""
def __init__(self,
other: Optional[Mapping[str, Any]], *,
param_types: Optional[Tuple[ParametersTypes]] = None):
super().__init__()
if param_types is None:
self._param_types = (ParametersTypes.INTEGER,) * len(other)
else:
self._param_types = param_types
self.update(**other)
def __add__(self, other):
r = dict()
if isinstance(other, self.__class__):
if self.param_types != other.param_types:
ValueError('The added ParametersDict have non-matching param_types.')
for self_key, other_key in zip(self, other):
if self_key != other_key:
raise ValueError('The added ParametersDict have non-matching keys.')
r[self_key] = self[self_key] + other[other_key]
return ParametersDict(r, param_types=self.param_types)
else:
raise TypeError(f'unsupported __add__ operand type(s) '
f'for +: {self.__class__} and {type(other)}')
def __sub__(self, other):
r = dict()
if isinstance(other, self.__class__):
if self.param_types != other.param_types:
ValueError('Subtracted ParametersDict have non-matching param_types.')
for self_key, other_key in zip(self, other):
if self_key != other_key:
raise ValueError('The added ParametersDict have non-matching keys.')
r[self_key] = self[self_key] - other[other_key]
return ParametersDict(r, param_types=self.param_types)
else:
raise TypeError(f'unsupported __sub__ operand type(s) '
f'for -: {self.__class__} and {type(other)}')
def __mul__(self, scale):
r = dict()
if isinstance(scale, float) or isinstance(scale, int):
for self_key, type_key in zip(self, self._param_types):
r[self_key] = type_key.value(self[self_key] * scale)
return ParametersDict(r, param_types=self.param_types)
else:
raise TypeError(f'unsupported __mul__ operand type(s) '
f'for *: {self.__class__} and {type(scale)} ')
def __floordiv__(self, scale):
r = dict()
if isinstance(scale, float) or isinstance(scale, int):
for self_key in self:
r[self_key] = self[self_key] // scale
return ParametersDict(r, param_types=self.param_types)
else:
raise TypeError(f'unsupported __floordiv__ operand type(s) '
f'for //: {self.__class__} and {type(scale)} ')
def __truediv__(self, scale):
r = dict()
if isinstance(scale, float) or isinstance(scale, int):
for self_key, type_key in zip(self, self._param_types):
r[self_key] = type_key.value(self[self_key]/scale)
return ParametersDict(r, param_types=self.param_types)
else:
raise TypeError(f'unsupported __truediv__ operand type(s) '
f'for /: {self.__class__} and {type(scale)} ')
def distance(self, other):
r = 0
if isinstance(other, self.__class__):
for self_key, other_key in zip(self, other):
if self_key != other_key:
raise ValueError('The added ParametersDict have non-matching keys.')
r = r + (self[self_key] - other[other_key]) ** 2
return r
else:
raise TypeError(f'unsupported operand type(s) '
f'for `distance`: {self.__class__} and {type(other)}')
@property
def param_types(self) -> Tuple[ParametersTypes]:
"""Returns the tuple that contains the types of the parameters.
Returns:
tuple with the types of the parameters.
"""
return self._param_types
class ParametricMinMaxBounds():
"""A class to parametrically describe the parametric properties of an object.
Each parameter is specified with three quantities which correspond
to the name of the parameters, their minimum and their maximum value. The
name of each parameter is a string, the minimum and maximum can be either a
integer or a string, the latter option if they refer to the value of another
parameter. The minimum and maximum values are specified as tuple of lists
with minimum and maximum values. Each list specifies a subset for min and max
values of each parameter, the range of values for parameters is union of
all the subsets. Parameters can be either integers or floats. For each
parameter we have a string `si` (alphabetic only), a K-tuple of min (`mi1`,
..., `miK`) and a K-tuple for max (`Mi1`, ..., `MiK`) values. The string is
used to specify the name of the parameters (`s1`, ..., `sN`). The K-tuples
contain the mins ((`m11`, ..., `mN1`), ..., (`m1K`, ..., `mNK`)) and the max
((`M11`, ...,`MN1`), ..., (`M1K`, ..., `MNK`)). Both min and max can be
integers or strings; if a string then it has to coincide with the name of
another parameter which will be used as the min or max value. The valid set of
parameters is the union of K-subsets, each subset being (`m1k <= s1 <= M1k`,
..., `mNk <= sN <= MNk`) with `k` = `1`, ..., `K`.
"""
def __init__(
self,
param_names_bounds: Dict[str, Sequence[List[Union[int, str]]]],
param_dict_types: Optional[Dict[str, ParametersTypes]] = None) -> None:
"""The ParametricShape class initialiazer.
Args:
param_names_bounds: a dictionary with keys corresponding to the parameters
names. Values needs to be to a list of lists. These lists have exactly
two elements. The first element is the min value for the given parameter.
The second element is the max value for the given parameter.
param_dict_types: (optional) a dictionary with keys corresponding to the
parameters names and values corresponding to the type of paramesters
(currently only int and flot supported).
"""
param_names = tuple(param_names_bounds.keys())
param_bounds = tuple(param_names_bounds.values())
# Check the same number of min-max is provided
if param_bounds:
result = all(len(elem) == len(param_bounds[0]) for elem in param_bounds)
if not result:
raise ValueError('The provided bounds do noy have the same '
'dimension for all provided parameters')
else:
raise ValueError('The provided sequences of bounds are empty.')
# Check all min-max values are present
for n in param_names:
for b in param_names_bounds[n]:
if len(b) != 2:
raise ValueError(f'The provided bounds for {n} have wrong size. '
f'Expecting size 2 but I have found size {len(b)}')
number_bounds = len(param_bounds[0])
param_mins, param_maxs = [], []
for j in range(0, number_bounds):
param_mins.append(tuple([d[j][0] for d in param_bounds]))
param_maxs.append(tuple([d[j][1] for d in param_bounds]))
param_mins, param_maxs = tuple(param_mins), tuple(param_maxs)
for param_min, param_max in zip(param_mins, param_maxs):
if len(param_names) != len(param_min) or len(param_min) != len(
param_max):
raise ValueError('ParametricProperties '
'initialized with different sizes.\n'
f'param_names length is: {len(param_names)}\n'
f'param_min length is: {len(param_min)}\n'
f'param_max length is: {len(param_max)}\n')
for n in param_names:
if not n.isalpha():
raise ValueError('Property names should be alphabetic only. ' +
(f'Found a non-alpahbetic character: {n}. ') +
'Numbers are reserved for parameters value. '
'See the `get_name` mathod of this class.')
self._param_mins = param_mins
self._param_maxs = param_maxs
self._param_names = param_names
if param_dict_types is None:
self._param_types = tuple([])
else:
self._param_types = tuple(param_dict_types.values())
def __call__(self, values: ParametersDict) -> bool:
"""A function to check the validity of the provided parameters.
The function requires a dictionary which contain all parameter values.
The dictionary is structured as follows:
{'par1': values[0], ..., 'parN': values[N-1]}
Args:
values: a dictionary ['params': value] for property.
Returns:
true/false depending on the instanced values being within linear bounds.
"""
check_belong_subset_union = False
check_belong_subset_error = ''
for param_min, param_max in zip(self._param_mins, self._param_maxs):
# Variable to check parameters belong to at least one subset
check_belong_subset = True
# Check that provided dictionary has all properties names.
if not all(name in values for name in self._param_names):
raise ValueError('The provided dictionary misses some parameters. '
f'Class instantianted with: {self._param_names}. '
f'Parameter provided: {values.keys()}.')
# Check that provided dictionary are compatible with max and min values
for p, lb, ub in zip(self._param_names, param_min, param_max):
value = values[p]
# Define the lowerbound, conditional on `lb` being a `str` or an `int`
if isinstance(lb, int):
lower = lb
elif isinstance(lb, str):
lower = values[lb]
# Define the upperbound, conditional on `ub` being a `str` or an `int`
if isinstance(ub, int):
upper = ub
elif isinstance(ub, str):
upper = values[ub]
if not lower <= value <= upper:
check_belong_subset = False
check_belong_subset_error = check_belong_subset_error + (
f'{p} : '
f'{lower} <= {value} <= {upper}\n')
if not check_belong_subset:
check_belong_subset_error = check_belong_subset_error +(
f'Checking min values: {param_min} \n' +
f'and max values: {param_max} \n' +
f'for the object parameters: {values} \n')
check_belong_subset_union = check_belong_subset_union or check_belong_subset
if not check_belong_subset_union:
logging.info('Wrong object configuration:\n%s', check_belong_subset_error)
return False
# Testing the provided values are of the right type
if self._param_types:
for p, t in zip(self._param_names, self._param_types):
if not isinstance(values[p], t.value):
logging.info('Types for parameters %s are wrong: value %s not %s',
self._param_names, values[p], t)
logging.info('Parameter types are: %s. ', self._param_types)
logging.info('Parameter instance is: %s. ', values)
return False
return True
class ParametricProperties:
"""A class to parametrically describe the parametric properties of an object.
ParametricProperties class manipulates parametric properties. A parametric
property is the collection of two things: (1) the parameters names in the form
of a N-tuple of string (e.g. ('param1_name', ..., 'paramN_name')); (2) a
function which takes a dict (e.g. {'param1_name': val1, ...,
'paramN_name': valN}) and checks if the given values are valid. Both the
param names and the function are specified in the constructor. A simple
example of a parametric object can be a cube which has only one parameter
(the length of the cube edge); the validity check is for this parameter to be
grather than zero.
"""
def __init__(
self,
param_names: Tuple[str],
param_bounds_function: ParametricMinMaxBounds,
param_units: Optional[Tuple[ParametersUnits]] = None,
param_types: Optional[Tuple[ParametersTypes]] = None) -> None:
"""The ParametricShape class initialiazer.
Args:
param_names: a tuple containing the parameter names
param_bounds_function: a function that returns a bool given a dictionary
which defines an instance of a parametric object. This function returns
true if the given instance is valid (i.e. inside the bounds), false
otherwise. The second element is the max value for the given parameter.
param_units: a tuple containing the unit of measure for the provided
parameters. If the parameter is a length accepted units are
millimeter, centimeter and meter (i.e. 'mm', 'cm', 'm'). If the
parameter is an angle accepted units are degree and radian (i.e. 'deg',
'rad'). If the parameter is adimensional then an empty string should be
provided. If no param_units is provided all parameters are
considered adimensional.
param_types: a tuple containing the types (int/float) for the provided
parameters. If no param_types is provided all parameters are
considered integers.
"""
self._param_bounds_function = param_bounds_function
self._param_names = param_names
if param_units is None:
self._param_units = (ParametersUnits.ADIMENSIONAL,) * len(
self._param_names)
else:
self._param_units = param_units
if param_types is None:
self._param_types = (ParametersTypes.INTEGER,) * len(self._param_names)
else:
self._param_types = param_types
def check_instance(self, values: ParametersDict) -> bool:
"""A function to check the validity of the provided parameters.
The function requires a dictionary which contain all parameter values.
The dictionary is structured as follows:
{'par1': values[0], ..., 'parN': values[N-1]}
Args:
values: a dictionary ['params': value] for property.
Returns:
true/false depending on the instanaced values being valid or not.
"""
return self._param_bounds_function(values)
def get_name(self, property_dict: ParametersDict) -> str:
"""Creates a string which can be used as name for a parametric property.
The input dictionary should have the following structure:
{'par1': values[0], ..., 'parN': values[N-1]}
Args:
property_dict: a dictionary containing ['params': value].
Returns:
string with the names of the parameters separated by '_'.
"""
self.check_instance(property_dict)
return '_'.join('{}{}'.format(k, v) for k, v in property_dict.items())
@property
def param_names(self) -> Tuple[str, ...]:
"""Returns the tuple that contains the names of the parameters.
Returns:
tuple with the names of the parameters.
"""
return self._param_names
@property
def param_types(self) -> Tuple[ParametersTypes]:
"""Returns the tuple that contains the types of the parameters.
Returns:
tuple with the types of the parameters.
"""
return self._param_types
@property
def param_units(self) -> Tuple[ParametersUnits]:
"""Returns the tuple that contains the units of the parameters.
Returns:
tuple with the units of the parameters.
"""
return self._param_units
def get_dict(self, param_string: str) -> ParametersDict:
"""Creates a dict given the string that describes the parametric properties.
The input string should have the following structure:
'par1values[0]_ ... _parNvalues[N-1]'
The output dict will have the following structure:
{'par1': values[0], ..., 'parN': values[N-1]}
Numeric values (i.e. values[0], ..., values[N-1]) can be `int` or `float`.
If `float` we assume the number is represented with a string of the form x.y
(i.e. `first0.01_second3_third12.25` is converted to {'first': 0.01,
'second': 3, 'third': 12.25}).
Args:
param_string: a srting that describes the parametric property.
Returns:
a dict that describes the parametric property.
"""
array_names = re.findall('[a-zA-Z]+', param_string)
array_values = re.findall(r'(\d+(?:\.\d+)?)', param_string)
array = param_string.split('_')
if len(array_names) != len(array_values):
raise ValueError(f'Something wrong converting: {param_string}.')
if len(array_names) != len(array):
raise ValueError(f'Something wrong with delimiters: {param_string}.')
shape_dict = ParametersDict({})
iter_names = iter(self.param_names)
for value, name in zip(array_values, array_names):
if name != next(iter_names):
raise ValueError(f'Wrong parameters in given string: {param_string}.')
try:
shape_dict[name] = int(value)
except ValueError:
try:
shape_dict[name] = float(value)
except ValueError:
raise ValueError(f'Cannot convert {value} to int or float.')
try:
iter_names.__next__()
except StopIteration:
return shape_dict
raise ValueError('Error: not all parameters were provided.')
class ParametricObject:
"""A class to parametrically describe an object.
The object class is a collection of a parametric shape and a parametric
texture.
"""
def __init__(self,
object_shape: ParametricProperties,
object_texture: ParametricProperties):
"""The ParametricObject class initialiazer.
Args:
object_shape: the ParametricProperties to describe the object shape.
object_texture: the ParametricProperties to describe the object texture.
"""
self.shape = object_shape
self.texture = object_texture
def check_instance(self,
shape_dict: ParametersDict,
texture_dict: ParametersDict) -> bool:
"""A function to check the validity of the provided parameters.
Args:
shape_dict: a dictionary containing ['params': value] for shape.
texture_dict: a dictionary containing ['params': value] for texture.
Returns:
true if and only if both shape_dict and texture_dict are valid
"""
check_shape = self.shape.check_instance(shape_dict)
check_texture = self.texture.check_instance(texture_dict)
return check_shape and check_texture
def get_name(
self,
shape_dict: ParametersDict,
texture_dict: ParametersDict) -> str:
"""Creates a string which can be used as name of the parametric object.
The input dictionary should have the following structure:
{'par1': values[0], ..., 'parN': values[N-1]}
The output string will have the following structure:
'par1values[0]_ ... _parNvalues[N-1]'
Args:
shape_dict: a dictionary describing the parametric shape.
texture_dict: a dictionary describing the parametric texture.
Returns:
a string which is the concatenation of shape and texture.
"""
shape_name = self.shape.get_name(shape_dict)
texture_name = self.texture.get_name(texture_dict)
return shape_name + '_' + texture_name
| dm_robotics-main | py/manipulation/props/parametric_object/parametric_object.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_object.py."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.manipulation.props.parametric_object import parametric_object
class PropertyTest(parameterized.TestCase):
def test_size_mismatch_in_init(self):
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[1, 2, 3]]})
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[1]]})
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[]]})
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': []})
def test_check_instance_assertions(self):
param_names = ('p', 'q', 'r')
param_check = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[0, 255]]})
prop = parametric_object.ParametricProperties(param_names, param_check)
prop.check_instance({'p': 122, 'q': 122, 'r': 122})
self.assertEqual(prop.param_names, ('p', 'q', 'r'))
self.assertEqual(len(prop.param_units), len(prop.param_names))
self.assertEqual(len(prop.param_types), len(prop.param_names))
reply = prop.check_instance({'p': 500, 'q': 0, 'r': 0})
self.assertEqual(False, reply)
reply = prop.check_instance({'p': 0, 'q': -500, 'r': 0})
self.assertEqual(False, reply)
param_check = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [['p', 'r']], 'r': [[0, 255]]})
prop = parametric_object.ParametricProperties(param_names, param_check)
prop.check_instance({'p': 0, 'q': 122, 'r': 255})
reply = prop.check_instance({'p': 0, 'q': 255, 'r': 122})
self.assertEqual(False, reply)
reply = prop.check_instance({'p': 122, 'q': 0, 'r': 255})
self.assertEqual(False, reply)
with self.assertRaises(ValueError):
prop.check_instance({'p': 0, 'q': 255})
param_names = ('p0', 'p1', 'p2')
with self.assertRaises(ValueError):
param_check = parametric_object.ParametricMinMaxBounds({
'p0': [[0, 255]], 'p1': [[0, 255]], 'p2': [[0, 255]]}).check_instance
def test_get_dict(self):
names = ('first', 'second', 'third')
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]})
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.get_dict('first0_second0_third0')
with self.assertRaises(ValueError):
_ = prop.get_dict('first0_second0')
with self.assertRaises(ValueError):
_ = prop.get_dict('first0_second0_fourth0')
with self.assertRaises(ValueError):
_ = prop.get_dict('first0_second0_')
def test_set_types(self):
names = ('first', 'second', 'third')
types = {'first': parametric_object.ParametersTypes.INTEGER,
'second': parametric_object.ParametersTypes.INTEGER,
'third': parametric_object.ParametersTypes.INTEGER}
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]}, types)
prop = parametric_object.ParametricProperties(names, checks)
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122})
self.assertEqual(True, reply)
reply = prop.check_instance({'first': 0.0, 'second': 0.0, 'third': 0.0})
self.assertEqual(False, reply)
prop_shape = parametric_object.ParametricProperties(names, checks)
prop_texture = parametric_object.ParametricProperties(names, checks)
prop = parametric_object.ParametricObject(prop_shape, prop_texture)
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122},
{'first': 0, 'second': 255, 'third': 122})
self.assertEqual(True, reply)
names = ('first', 'second', 'third')
types = {'first': parametric_object.ParametersTypes.FLOAT,
'second': parametric_object.ParametersTypes.FLOAT,
'third': parametric_object.ParametersTypes.FLOAT}
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]}, types)
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'first': 0.0, 'second': 0.0, 'third': 0.0})
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122})
self.assertEqual(False, reply)
types = {'first': parametric_object.ParametersTypes.FLOAT,
'second': parametric_object.ParametersTypes.INTEGER,
'third': parametric_object.ParametersTypes.FLOAT}
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]}, types)
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'first': 0.0, 'second': 0, 'third': 0.0})
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122})
self.assertEqual(False, reply)
names = ('p', 'q', 'r')
checks = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[0, 255]]})
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'p': 0.0, 'q': 0, 'r': 0.0})
_ = prop.check_instance({'p': 0, 'q': 255.0, 'r': 122})
def test_parameters_min_max_tuples(self):
# 0 <= p0, p1, p2 <=1
# 3 <= p0, p1, p2 <=4
names = ('first', 'second', 'third')
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 1], [3, 4]],
'second': [[0, 1], [3, 4]],
'third': [[0, 1], [3, 4]]})
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'first': 0, 'second': 0, 'third': 0})
_ = prop.check_instance({'first': 1, 'second': 1, 'third': 1})
_ = prop.check_instance({'first': 3, 'second': 3, 'third': 3})
_ = prop.check_instance({'first': 4, 'second': 4, 'third': 4})
reply = prop.check_instance({'first': 2, 'second': 2, 'third': 2})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 2, 'second': 3, 'third': 3})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 2, 'second': 3, 'third': 2})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 3, 'second': 3, 'third': 2})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 5, 'second': 3, 'third': 3})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 1, 'second': 3, 'third': 3})
self.assertEqual(False, reply)
# if a == 2, 0 <= b <= c, 0 <= c <=10
# if 3 <= a <= 10, 0 <= b <= 10, 0 <= c <=10
names = ('a', 'b', 'c')
checks = parametric_object.ParametricMinMaxBounds({
'a': [[2, 2], [3, 10]],
'b': [[0, 'c'], [0, 10]],
'c': [[0, 10], [0, 10]]})
prop = parametric_object.ParametricProperties(names, checks)
# if a == 2, 0 <= b <= c, 0 <= c <=10
# if 3 <= a <= 10, 0 <= b <= 10, 0 <= c <=10
with self.assertRaises(ValueError):
checks = parametric_object.ParametricMinMaxBounds({
'a': [[2, 2], [3, 10]],
'b': [[0, 'c'], [0, 10]],
'c': [[0, 10]]})
_ = prop.check_instance({'a': 2, 'b': 2, 'c': 10})
_ = prop.check_instance({'a': 3, 'b': 5, 'c': 2})
reply = prop.check_instance({'a': 2, 'b': 5, 'c': 2})
self.assertEqual(False, reply)
def test_add_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
c = parametric_object.ParametersDict({'k3': 5, 'k4': 6})
d = parametric_object.ParametersDict({'k1': 7, 'k4': 8})
r = a + b
self.assertEqual(r['k1'], 4)
self.assertEqual(r['k2'], 6)
with self.assertRaises(TypeError):
r = a + 1
with self.assertRaises(ValueError):
r = a + c
with self.assertRaises(ValueError):
r = a + d
def test_sub_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
c = parametric_object.ParametersDict({'k3': 5, 'k4': 6})
d = parametric_object.ParametersDict({'k1': 7, 'k4': 8})
r = a - b
self.assertEqual(r['k1'], -2)
self.assertEqual(r['k2'], -2)
with self.assertRaises(TypeError):
r = a - 1
with self.assertRaises(ValueError):
r = a - c
with self.assertRaises(ValueError):
r = a - d
def test_mult_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
r = a * 0.5
self.assertEqual(r['k1'], int(a['k1']*1/2))
self.assertEqual(r['k2'], int(a['k2']*1/2))
with self.assertRaises(TypeError):
r = a * b
with self.assertRaises(TypeError):
r = 0.5 * b
def test_truediv_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
r = a // 2
self.assertEqual(r['k1'], int(a['k1'] // 2))
self.assertEqual(r['k2'], int(a['k2'] // 2))
with self.assertRaises(TypeError):
r = a // b
with self.assertRaises(TypeError):
r = 0.5 // b
def test_types_conversion(self):
names = ('first', 'second', 'third')
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]})
prop = parametric_object.ParametricProperties(names, checks)
dictionary = {'first': 0, 'second': 3, 'third': 2}
param_dict = parametric_object.ParametersDict(dictionary)
name = prop.get_name(param_dict)
self.assertEqual(name, 'first0_second3_third2')
dictionary = {'first': 0.0, 'second': 0.1, 'third': 2.0}
param_dict = parametric_object.ParametersDict(dictionary)
name = prop.get_name(param_dict)
self.assertEqual(name, 'first0.0_second0.1_third2.0')
dictionary = {'first': 1.0, 'second': 3.0, 'third': 4}
param_dict = parametric_object.ParametersDict(dictionary)
name = prop.get_name(param_dict)
reconstruction = prop.get_dict(name)
self.assertEqual(dictionary, reconstruction)
def test_types_algebra(self):
dictionary = {'first': 0, 'second': 3, 'third': 2}
types = (parametric_object.ParametersTypes.INTEGER,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict * 1.1
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], 3)
self.assertAlmostEqual(param_half['third'], 2)
types = (parametric_object.ParametersTypes.FLOAT,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict * 1.1
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], 3.3)
self.assertAlmostEqual(param_half['third'], 2.2)
dictionary = {'first': 0, 'second': 3, 'third': 2}
types = (parametric_object.ParametersTypes.INTEGER,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict / 3
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], int(3/3))
self.assertAlmostEqual(param_half['third'], int(2/3))
types = (parametric_object.ParametersTypes.FLOAT,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict / 3
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], float(3/3))
self.assertAlmostEqual(param_half['third'], float(2/3))
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/manipulation/props/parametric_object/parametric_object_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_rgb_object.py."""
import logging
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.manipulation.props.parametric_object.rgb_objects import parametric_rgb_object
from dm_robotics.manipulation.props.parametric_object.rgb_objects import rgb_object_names
RgbVersion = parametric_rgb_object.RgbVersion
class RgbObjectTest(parameterized.TestCase):
@parameterized.named_parameters(
('cylinder', {'sds': 2, 'scx': 50, 'scy': 50, 'scz': 50, 'shr': 0,
'shx': 0, 'shy': 0, 'hlw': 0, 'drf': 0}),
('cube', {'sds': 4, 'scx': 50, 'scy': 50, 'scz': 50, 'shr': 0,
'shx': 0, 'shy': 0, 'hlw': 0, 'drf': 0}),
('triangle', {'sds': 3, 'scx': 50, 'scy': 50, 'scz': 50, 'shr': 0,
'shx': 0, 'shy': 0, 'hlw': 0, 'drf': 0}))
def test_rgb_shape_init_and_instances(self, shape):
my_rgb = parametric_rgb_object.RgbObject()
if not my_rgb.shape.check_instance(shape):
raise ValueError(f'The provided RGB-object is invalid {shape}.')
def test_rgb_object(self):
# Retrieve the RGB-objects specifications creating an instance of the class
my_rgb = parametric_rgb_object.RgbObject()
# Checking the shape tuple
rgb_shape_names = ('sds', 'shr', 'drf', 'hlw', 'shx', 'shy', 'scx', 'scy',
'scz')
self.assertTupleEqual(my_rgb.shape.param_names, rgb_shape_names)
# Checking the texture tuple
rgb_texture_tuple = ('r', 'g', 'b')
self.assertTupleEqual(my_rgb.texture.param_names, rgb_texture_tuple)
# Checking the shape tuple with iterators
names = iter(my_rgb.shape.param_names)
for ni, si in zip(names, rgb_shape_names):
self.assertEqual(ni, si)
with self.assertRaises(StopIteration):
next(names)
@parameterized.named_parameters(
('octagon', {'sds': 8, 'shr': 0, 'drf': 0, 'hlw': 0,
'shx': 0, 'shy': 0, 'scx': 50, 'scy': 50, 'scz': 50}),
('cube', {'sds': 4, 'shr': 0, 'drf': 0, 'hlw': 0,
'shx': 0, 'shy': 0, 'scx': 50, 'scy': 50, 'scz': 50}),
('triangle', {'sds': 3, 'shr': 0, 'drf': 0, 'hlw': 0,
'shx': 0, 'shy': 0, 'scx': 50, 'scy': 50, 'scz': 50}))
def test_check_instance(self, shape):
my_rgb = parametric_rgb_object.RgbObject()
texture = {'r': 0, 'g': 122, 'b': 255}
if not my_rgb.check_instance(shape, texture):
raise ValueError(f'The provided RGB-object is invalid {shape}, {texture}')
def test_param_names(self):
my_rgb = parametric_rgb_object.RgbObject()
# Checking the shape tuple
rgb_shape_tuple = ('sds', 'shr', 'drf', 'hlw', 'shx', 'shy', 'scx', 'scy',
'scz')
self.assertTupleEqual(my_rgb.shape.param_names, rgb_shape_tuple)
# Checking the texture tuple
rgb_texture_tuple = ('r', 'g', 'b')
self.assertTupleEqual(my_rgb.texture.param_names, rgb_texture_tuple)
# Checking the shape tuple with iterators
names = iter(my_rgb.shape.param_names)
for ni, si in zip(names, rgb_shape_tuple):
self.assertEqual(ni, si)
with self.assertRaises(StopIteration):
next(names)
def test_param_types(self):
my_rgb = parametric_rgb_object.RgbObject()
# Checking the types tuple
reply = my_rgb.shape.check_instance({'sds': 4, 'shr': 0, 'drf': 0,
'hlw': 20, 'shx': 0, 'shy': 0,
'scx': 50, 'scy': 50, 'scz': 50})
self.assertEqual(True, reply)
reply = my_rgb.shape.check_instance({'sds': 4.0, 'shr': 0.0, 'drf': 0.0,
'hlw': 20.0, 'shx': 0.0, 'shy': 0.0,
'scx': 50.0, 'scy': 50.0, 'scz': 50.0})
self.assertEqual(False, reply)
@parameterized.parameters(
('b1', 'sds4_shr0_drf0_hlw20_shx0_shy0_scx50_scy50_scz50'),
('b2', 'sds8_shr0_drf0_hlw0_shx0_shy0_scx45_scy45_scz50'),
('b3', 'sds4_shr48_drf0_hlw0_shx0_shy0_scx46_scy49_scz63'),
('b5', 'sds4_shr0_drf0_hlw0_shx0_shy31_scx50_scy50_scz50'),
('b6', 'sds4_shr0_drf0_hlw0_shx0_shy0_scx32_scy48_scz96'),
('g1', 'sds4_shr0_drf0_hlw15_shx0_shy0_scx50_scy50_scz50'),
('g2', 'sds6_shr0_drf0_hlw0_shx0_shy0_scx46_scy46_scz50'),
('g3', 'sds4_shr25_drf0_hlw0_shx0_shy0_scx51_scy51_scz60'),
('g5', 'sds4_shr0_drf0_hlw0_shx0_shy20_scx50_scy50_scz50'),
('g6', 'sds4_shr0_drf0_hlw0_shx0_shy0_scx40_scy56_scz80'),
('r1', 'sds4_shr0_drf0_hlw35_shx0_shy0_scx50_scy50_scz50'),
('r2', 'sds10_shr0_drf0_hlw0_shx0_shy0_scx45_scy45_scz50'),
('r3', 'sds4_shr75_drf0_hlw0_shx0_shy0_scx41_scy49_scz71'),
('r5', 'sds4_shr0_drf0_hlw0_shx0_shy42_scx50_scy50_scz50'),
('r6', 'sds4_shr0_drf0_hlw0_shx0_shy0_scx29_scy29_scz150'))
def test_get_name_v1_0(self, nickname, full_name):
my_rgb = parametric_rgb_object.RgbObject()
rgb_objects_versioned_names = rgb_object_names.RgbObjectsNames()
shape_params = rgb_objects_versioned_names.nicknames[nickname]
self.assertEqual(my_rgb.shape.get_name(shape_params), full_name)
def test_v1_3(self):
my_rgb = parametric_rgb_object.RgbObject(version=RgbVersion.v1_3)
rgb_objects_versioned_names = rgb_object_names.RgbObjectsNames(
version=RgbVersion.v1_3)
for params in rgb_objects_versioned_names.nicknames.values():
self.assertTrue(my_rgb.shape.check_instance(params))
def test_versions(self):
_ = parametric_rgb_object.RgbObject(version=RgbVersion.v1_0)
_ = parametric_rgb_object.RgbObject(version=RgbVersion.v1_3)
v1_0 = rgb_object_names.RgbObjectsNames(version=RgbVersion.v1_0)
v1_3 = rgb_object_names.RgbObjectsNames(version=RgbVersion.v1_3)
logging.info('Version 1.0 has %s objects', len(v1_0.nicknames))
logging.info('Version 1.3 has %s objects', len(v1_3.nicknames))
self.assertIn('s0', v1_3.nicknames)
self.assertNotIn('d2', v1_3.nicknames)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/manipulation/props/parametric_object/rgb_objects/parametric_rgb_object_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define some common object shortcuts for RGB objects.
We can configure generative objects somewhat freely in parameters, but in
current experiments, we use a discrete set of nicknamed objects. These objects
are defined with a per-object constant set of parameters. For easier use, these
are specified here.
The initial version of RGB-objects (named RGB30) was created manually and is not
a part of the current parametric object pipeline.
There is a visualization of the objects and more information can be found here:
https://sites.google.com/corp/google.com/rgb--stacking#h.p_Hbvm_ijsde_K
"""
import collections
import copy
import itertools
from typing import Dict, Tuple
from dm_robotics.manipulation.props.parametric_object import parametric_object
from dm_robotics.manipulation.props.parametric_object.rgb_objects import parametric_rgb_object
# RGB-objects v1.0 are created with 3 deformations of a seed object (a cube with
# a 50mm side): G minor deformation, B average deformation, R major
# deformation. Deformations are chosen by sampling independently 5 parameters of
# the RGB-shapes. We have chosen: 1 - hollowness; 2 - number of sides;
# 3 - shrinking; 4 - not used; 5 - shear; 6 - form factor.
ParametersDict = parametric_object.ParametersDict
RgbVersion = parametric_rgb_object.RgbVersion
# Breaking these over > 100 lines does not help visibility so:
# pylint: disable=line-too-long, bad-whitespace
# pyformat: disable
_OBJECTS_V1_0 = collections.OrderedDict({
"g1": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 15, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50 }),
"b1": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 20, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50 }),
"r1": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 35, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50 }),
"g2": ParametersDict({ "sds": 6, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 46, "scy": 46, "scz": 50 }),
"b2": ParametersDict({ "sds": 8, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 45, "scy": 45, "scz": 50 }),
"r2": ParametersDict({ "sds": 10, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 45, "scy": 45, "scz": 50 }),
"g3": ParametersDict({ "sds": 4, "shr": 25, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 51, "scy": 51, "scz": 60 }),
"b3": ParametersDict({ "sds": 4, "shr": 48, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 46, "scy": 49, "scz": 63 }),
"r3": ParametersDict({ "sds": 4, "shr": 75, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 41, "scy": 49, "scz": 71 }),
"g5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 20, "scx": 50, "scy": 50, "scz": 50 }),
"b5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 31, "scx": 50, "scy": 50, "scz": 50 }),
"r5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 42, "scx": 50, "scy": 50, "scz": 50 }),
"g6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 40, "scy": 56, "scz": 80 }),
"b6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 32, "scy": 48, "scz": 96 }),
"r6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 29, "scy": 29, "scz": 150 })})
# pylint: enable=line-too-long, bad-whitespace
# pyformat: enable
# RGB-objects v1.3 adds two deformation axes to v1.1 (axis 7 and axis 8). The
# axis 6 in v1.1 is the deformation of the form factor by increasing the size
# along the z-axis. With v1.3 we introduce deformations which correspond to
# scaling along the x-axis (axis 7) and along the y-axis (axis 8). Moreover,
# we generate all objects as interpolations from a seed object s and 7 objects
# r2, r3, r5, r6, r7, r8 which are the maximum deformations of the seed
# object along the available axes 2, 3, 5, 6, 7, 8. Finally also, v1.3
# defines the objects Omn with Omn = (Om + On)//2.
# pylint: disable=line-too-long, bad-whitespace
# pyformat: disable
_OBJECTS_V1_3 = collections.OrderedDict({
"s": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50}),
"r2": ParametersDict({ "sds": 10, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 45, "scy": 45, "scz": 50}),
"r3": ParametersDict({ "sds": 4, "shr": 75, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 41, "scy": 49, "scz": 71}),
"r5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 42, "scx": 50, "scy": 50, "scz": 50}),
"r6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 29, "scy": 29, "scz": 150}),
"r7": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 29, "scy": 150, "scz": 29}),
"r8": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 150, "scy": 29, "scz": 29})})
# Internal TODO A
_OBJECTS_V1_3_NON_GRASPABLE = ("l36", "m36", "y36", "r36", "r67", "v36")
_OBJECTS_V1_3_NON_UNIQUE = ("f25", "h27", "l27", "m27", "r27", "r68", "u27", "v27", "x27", "y27", "h28", "l28", "m28", "r28", "u28", "v28", "x28", "y28", "r78")
# pylint: enable=line-too-long, bad-whitespace
# pyformat: enable
def parameters_interpolations(
params_dict1: ParametersDict,
params_dict2: ParametersDict,
interpolation_length: int = 1,
interpolation_keys: Tuple[str, ...] = ()
) -> "collections.OrderedDict[str, ParametersDict]":
"""Function to interpolate in between two parametersDict.
This function can be used to interpolate in between parametersDicts. The
function takes as input two parameterDicts. The function interpolates in
between the parametersDicts generating a given number of equally-spaced
samples. By default, only one sample is added, corresponding to an element
in between the two provided parameterDicts (e.g. m = (s+e)/2). Generated
parameterDicts are returned in a collection. By default, associated labels are
m1, m2, ... or otherwise specified by the user through a tuple.
Args:
params_dict1: the first parameterDict.
params_dict2: the second parameterDict.
interpolation_length: the numnber of interpolation samples.
interpolation_keys: the keys used in the resulting collection.
Returns:
the collection of combinations
"""
result_dictionary = collections.OrderedDict({})
# Creating intermediate objects from two adjucent ones.
if not interpolation_keys:
for i in range(0, interpolation_length):
interpolation_length = (*interpolation_length, "m" + str(i))
for i in range(1, interpolation_length+1):
obj_nickname = interpolation_keys[i - 1]
step = i / (interpolation_length + 1)
obj_values = params_dict1 + (params_dict2 - params_dict1) * step
result_dictionary.update({obj_nickname: obj_values})
return result_dictionary
def parameters_numeric_combinations(
params_dict_collection: "collections.OrderedDict[str, ParametersDict]",
labels_alphabetic_keys: Tuple[str, ...],
labels_numeric_keys: Tuple[str, ...],
combination_length: int = 2
) -> "collections.OrderedDict[str, ParametersDict]":
"""Function to combine collections of parametersDict with alphanumeric keys.
This function can be used to create combinations of parametersDict. The
function takes as input a collection of parameterDicts each labelled with an
alphanumeric string (e.g. e1, e2, e3, g1, g2, g3). The function combines the
parametersDicts taking the set of alpahbetic keys (e.g. {e, g}) and the set of
numeric keys (e.g. {1, 2, 3}). By default, for each alphabetic key all
2-combinations of numeric keys are created using the parameterDicts algebra.
In the example above we have: e12 = (e1 + e2) // 2, e13 = (e1 + e3) // 2,
e23 = (e2 + e3) // 2, g12 = (g1 + g2) // 2, g13 = (g1 + g3) // 2,
g23 = (g2 + g3) // 2. Otherwise, a specific combination length can be
specified. If 3-combination is specified then the following parameterDicts
are created: e123 = (e1 + e2 + e3) // 3 and g123 = (g1 + g2 + g3) // 3.
Args:
params_dict_collection: a collection of parametersDict. The keys associated
to each parametersDict should be alphanumeric.
labels_alphabetic_keys: the alphabetic part of the key labels.
labels_numeric_keys: the numeric part of the key labels.
combination_length: the length of cretated combinations.
Returns:
the collection of combinations
"""
result_dictionary = collections.OrderedDict({})
# Creating intermediate objects from two adjacent ones.
for alpha in labels_alphabetic_keys:
for num in itertools.combinations(labels_numeric_keys, combination_length):
obj_nickname = alpha
obj_nickname = obj_nickname + num[0]
obj_values = params_dict_collection[alpha + num[0]]
for i in range(1, combination_length):
obj_nickname = obj_nickname + num[i]
obj_values = obj_values + params_dict_collection[alpha + num[i]]
obj_values = obj_values // combination_length
result_dictionary.update({obj_nickname: obj_values})
return result_dictionary
def parameters_equispaced_combinations(
params_dict_collection: "collections.OrderedDict[str, ParametersDict]",
coefficients: Tuple[int,
...]) -> "collections.OrderedDict[str, ParametersDict]":
"""Function to create equispaced combinations.
This function can be used to create equispaced distributed combinations of
parametersDict. The function takes as input a collection of alphabetically
tagged parameterDicts (e.g. a, .. z). The function combines the given
parametersDicts to create new parametersDicts constructed as a*ca + ..
+ z*cz with ca + .. + cz = 1. The number of generated parametersDicts is
controlled by fixing the valid values for the coefficients cn. The resulting
objects are named aca_..._zcz.
Args:
params_dict_collection: a collection of parametersDict.
coefficients: the valid coefficients (tuple of int) expressed as integer
percentage, (0, 25, 50, 75, 100) corresponds to (0, 0.25, 0.5, 0.75, 1).
Returns:
the collection of combinations
"""
result_dictionary = collections.OrderedDict({})
n = len(params_dict_collection)
# Creating valid combinations
valid_combinations = [
s for s in itertools.product(coefficients, repeat=n) if sum(s) == 100
]
# Creating convex combinations of objects
result_dictionary = collections.OrderedDict({})
for valid_combination in valid_combinations:
obj_nickname = ""
obj_values = None
p = params_dict_collection
for kn, vn, cn in zip(p.keys(), p.values(), valid_combination):
if obj_values is None:
obj_values = vn * cn
if cn != 0:
obj_nickname = str(coefficients.index(cn)) + kn # pytype: disable=attribute-error
else:
obj_values = obj_values + vn * cn
if cn != 0:
obj_nickname = obj_nickname + str(coefficients.index(cn)) + kn
result_dictionary.update({obj_nickname: obj_values//100})
return result_dictionary
class RgbObjectsNames:
"""A class to define the RGB-objects names according to different versions.
Args:
version: string to describe the RGB-objects version.
"""
def __init__(self, version: RgbVersion = RgbVersion.v1_0):
self.__version__ = version
self._nicknames = collections.OrderedDict({})
if version == RgbVersion.v1_0:
self._nicknames.update(_OBJECTS_V1_0)
if version == RgbVersion.v1_3:
self._nicknames = collections.OrderedDict(copy.deepcopy(_OBJECTS_V1_3))
# Adding dn, fn, en, hn, xn, ln, bn, mn, yn, un
# linearly interpolating 10 objects in between "s" and "tn"
for n in ("2", "3", "5", "6", "7", "8"):
self._nicknames.update(parameters_interpolations(
_OBJECTS_V1_3["s"],
_OBJECTS_V1_3["r" + n],
10, ("d"+n, "f"+n, "e"+n, "u"+n, "h"+n,
"x"+n, "l"+n, "v"+n, "m"+n, "y"+n)))
# Updating the seed object name.
self._nicknames["s0"] = self._nicknames.pop("s")
# Creating intermediate Omn = (Om + On)//2.
self._nicknames.update(parameters_numeric_combinations(
self._nicknames,
("d", "f", "e", "h", "x", "l", "m", "y", "r", "u", "v"),
("2", "3", "5", "6", "7", "8"),
2))
# Remove 'd2' object which is identical to the seed 's0', both are cubes.
self._nicknames.pop("d2")
# Remove non-graspable and non-unique
for o in _OBJECTS_V1_3_NON_GRASPABLE + _OBJECTS_V1_3_NON_UNIQUE:
self._nicknames.pop(o, None)
# Add RGB v1.0 objects, except for the hollow ones.
self._nicknames.update(_OBJECTS_V1_0)
for o in ["r1", "b1", "g1"]:
self._nicknames.pop(o, None)
# This is necessary to guarantee one-to-one mapping: parameters <-> shapes
for v in self._nicknames.values():
if (v["shr"], v["drf"], v["hlw"], v["shx"], v["shy"]) == (0,)*5:
ordered_scale = sorted((v["scx"], v["scy"], v["scz"]))
v["scx"] = ordered_scale[0]
v["scy"] = ordered_scale[1]
v["scz"] = ordered_scale[2]
# Look for duplicates, print log-info if found and raise an error.
my_rgb = parametric_rgb_object.RgbObject(version)
uniques, duplicates = set(), set()
self._duplicates_groups = dict()
uniques_dict = {}
for obj_nick, obj_dict in self.nicknames.items():
obj_name = my_rgb.shape.get_name(obj_dict)
if obj_name in uniques:
duplicates.add(obj_nick)
self._duplicates_groups[
obj_name] = self._duplicates_groups[obj_name] + (obj_nick,)
else:
uniques.add(obj_name)
self._duplicates_groups.update({obj_name: (obj_nick,)})
uniques_dict.update({obj_nick: obj_name})
if duplicates:
for o in duplicates:
self._nicknames.pop(o, None)
@property
def nicknames(self) -> Dict[str, ParametersDict]:
# Dictionary of creation parameters sorted by object names.
return collections.OrderedDict(sorted(self._nicknames.items()))
@property
def duplicates(self) -> Dict[str, Tuple[str]]:
# Dictionary of object names and associated nicknames.
return self._duplicates_groups
| dm_robotics-main | py/manipulation/props/parametric_object/rgb_objects/rgb_object_names.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A class that defines an rgb_objects as an instance of parametric_object.
An rgb_object is a specific instance of a parametric object. An rgb_object
has 9 parameters named: 'sds', 'shr', 'scx', 'scy', 'scz', 'shx', 'shy', 'hlw',
'drf'.
The meaning of these parameters is the following:
sds: Number of sides, 2 circle
shr: Shrink the 2D shape before sxtruding
drf: Draft pyramidizing [deg]
hlw: % Hollow: 0 solid
shx: Shear in X [deg]
shy: Shear in Y [deg]
scx: Scale in X
scy: Scale in Y
scz: Scale in Z
These parameters are constrained as follows and this guarantees that the
mapping from parameters to shape is one-to-one. For RGB-objects versions
1.0 the constraints are:
/ 2 <= sds <= 10
| 0 <= shr <= 90
| 0 <= drf <= 45
| 0 <= hlw <= 90
< 0 <= shx <= shy
| shx <= shy <= 45
| 10 <= scx <= scy
| scx <= scy <= scz
\ scy <= scz <= 150
"""
import enum
from dm_robotics.manipulation.props.parametric_object import parametric_object
@enum.unique
class RgbVersion(enum.Enum):
v1_0 = '1.0'
v1_3 = '1.3'
# pylint: disable=bad-whitespace
_RGB_SHAPE_BOUNDS = {}
_RGB_SHAPE_BOUNDS[RgbVersion.v1_0] = {
'sds': [[ 2, 10]],
'shr': [[ 0, 90]],
'drf': [[ 0, 45]],
'hlw': [[ 0, 90]],
'shx': [[ 0, 'shy']],
'shy': [['shx', 45]],
'scx': [[ 10, 'scy']],
'scy': [['scx', 'scz']],
'scz': [['scy', 150]]}
_RGB_SHAPE_BOUNDS[RgbVersion.v1_3] = {
'sds': [[ 2, 4 ],[ 2, 4 ],[ 2, 4 ],[ 2, 4 ],[ 5, 10 ]],
'shr': [[ 0, 90 ],[ 1, 90 ],[ 0, 90],[ 0, 90 ],[ 0, 90 ]],
'drf': [[ 0, 60 ],[ 0, 60 ],[ 1, 60],[ 0, 60 ],[ 0, 60 ]],
'hlw': [[ 0, 90 ],[ 0, 90 ],[ 0, 90],[ 0, 90 ],[ 0, 90 ]],
'shx': [[ 0, 'shy'],[ 0, 'shy'],[ 0, 'shy'],[ 0, 'shy'],[ 0, 'shy']],
'shy': [['shx', 60 ],[ 0, 60 ],[ 0, 60],[ 1, 60 ],[ 0, 60 ]],
'scx': [[ 10, 'scy'],[10, 150],[10, 150],[10, 150],[10, 150]],
'scy': [['scx', 'scz'],[10, 150],[10, 150],[10, 150],[10, 150]],
'scz': [['scy', 150],[10, 150],[10, 150],[10, 150],[10, 150]]}
# pylint: enable=bad-whitespace
_RGB_SHAPE_NAMES_TYPES = {
'sds': parametric_object.ParametersTypes.INTEGER,
'shr': parametric_object.ParametersTypes.INTEGER,
'drf': parametric_object.ParametersTypes.INTEGER,
'hlw': parametric_object.ParametersTypes.INTEGER,
'shx': parametric_object.ParametersTypes.INTEGER,
'shy': parametric_object.ParametersTypes.INTEGER,
'scx': parametric_object.ParametersTypes.INTEGER,
'scy': parametric_object.ParametersTypes.INTEGER,
'scz': parametric_object.ParametersTypes.INTEGER}
_RGB_TEXTURE_BOUNDS = {
'r': [[0, 255]],
'g': [[0, 255]],
'b': [[0, 255]]}
_RGB_TEXTURE_NAMES_TYPES = {
'r': parametric_object.ParametersTypes.INTEGER,
'g': parametric_object.ParametersTypes.INTEGER,
'b': parametric_object.ParametersTypes.INTEGER}
class RgbObject(parametric_object.ParametricObject):
"""A class to parametrically describe an RGB-object.
Args:
version: a string describing the RGB version to be used.
"""
def __init__(self, version: RgbVersion = RgbVersion.v1_0) -> None:
shape_names = tuple(_RGB_SHAPE_NAMES_TYPES.keys())
try:
self._shape_bounds = parametric_object.ParametricMinMaxBounds(
_RGB_SHAPE_BOUNDS[RgbVersion(version)],
_RGB_SHAPE_NAMES_TYPES)
except KeyError:
raise ValueError('Invalid `version` for RGB-objects \n'
f'RgbObject initialized with version: {version} \n'
f'Available verions are: {[v for v in RgbVersion]} \n')
shape = parametric_object.ParametricProperties(
shape_names, self._shape_bounds)
texture_names = tuple(_RGB_TEXTURE_NAMES_TYPES.keys())
texture_bounds = parametric_object.ParametricMinMaxBounds(
_RGB_TEXTURE_BOUNDS, _RGB_TEXTURE_NAMES_TYPES)
texture = parametric_object.ParametricProperties(
texture_names, texture_bounds)
super().__init__(shape, texture)
@property
def shape_bounds(self):
return self._shape_bounds
| dm_robotics-main | py/manipulation/props/parametric_object/rgb_objects/parametric_rgb_object.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to process different mesh files formats."""
import itertools
import struct
import sys
from typing import Any, Sequence
# Internal file import.
DEFAULT_ENCODING = sys.getdefaultencoding()
def _flatten(list_of_lists: Sequence[Sequence[Any]]) -> Sequence[Any]:
return list(itertools.chain.from_iterable(list_of_lists))
def _relabel_obj_to_mj(vertices, faces, texcoords, normals):
"""Remapping elemets from obj to mujoco compatible format.
In normal obj we specify a list of 3D coordinates and texture coordinates.
Then when defining faces we can choose a different index for each one. This
way a single 3D location which has 2 different texture coordinates on 2
different faces can in obj be representated by defining a single 3D location
and 2 texture coords. Than when defining faces we match these up. However in
mujoco this is not possible as when face indexes into the array it uses the
same index for all. Therefore here we need to create a new vertex for every
used combination of position, texture coordinare and normal.
Args:
vertices: (vertex, 3) float list
faces: (faces, 3) int list
texcoords: (texcoords, 2) float list
normals: (normals, 3) float list
Returns:
A tuple of:
* vertices: (nvertex, 3) float list
* faces: (faces, 3) int list
* texcoords: (nvertex, 2) float list
* normals: (nvertex, 3) float list, or an empty list
"""
unique_triples_mapping = {}
remapped_vertices = []
remapped_faces = []
remapped_texcoords = []
remapped_normals = []
for face in faces:
this_face = []
for vertex in face:
if vertex not in unique_triples_mapping:
unique_triples_mapping[vertex] = len(remapped_vertices)
remapped_vertices.append(vertices[vertex[0]])
remapped_texcoords.append(texcoords[vertex[1]])
if normals:
remapped_normals.append(normals[vertex[2]])
this_face.append(unique_triples_mapping[vertex])
remapped_faces.append(this_face)
flat_remapped_vertices = _flatten(remapped_vertices)
flat_remapped_faces = _flatten(remapped_faces)
flat_remapped_texcoords = _flatten(remapped_texcoords)
flat_remapped_normals = _flatten(remapped_normals)
return (flat_remapped_vertices, flat_remapped_faces, flat_remapped_texcoords,
flat_remapped_normals)
def _parse_obj(path):
"""Parses obj from a path into a list of meshes."""
with open(path) as f:
obj_lines = f.readlines()
parsed_objects = []
vertices = []
faces = []
texcoords = []
normals = []
for l in obj_lines:
l = l.strip()
token = l.split(' ')
k = token[0]
v = token[1:]
if k == 'o':
if vertices:
parsed_objects.append((vertices, faces, texcoords, normals))
vertices = []
faces = []
texcoords = []
normals = []
elif k == 'v':
vertices.append(tuple(float(x) for x in v))
elif k == 'f':
v = [tuple(int(n) - 1 for n in b.split('/')) for b in v]
if len(v) == 4:
faces.append([v[2], v[3], v[0]])
v = v[:3]
faces.append(v)
elif k == 'vt':
raw_texcoords = tuple(float(x) for x in v)
# There seems to be an inconsistency between Katamari and MuJoco in the
# way the texture coordinates are defined
texcoords.append((raw_texcoords[0], 1 - raw_texcoords[1]))
elif k == 'vn':
normals.append(tuple(float(x) for x in v))
parsed_objects.append((vertices, faces, texcoords, normals))
return parsed_objects
def object_to_msh_format(vertices, faces, texcoords, normals):
"""Coverts a mesh from lists to a binary MSH format."""
nvertex = len(vertices) // 3
nnormal = len(normals) // 3
ntexcoord = len(texcoords) // 2
nface = len(faces) // 3
# Convert to binary format according to:
# # http://mujoco.org/book/XMLreference.html#mesh
msh_string = bytes()
msh_string += struct.pack('4i', nvertex, nnormal, ntexcoord, nface)
msh_string += struct.pack(str(3 * nvertex) + 'f', *vertices)
if nnormal:
msh_string += struct.pack(str(3 * nnormal) + 'f', *normals)
if ntexcoord:
msh_string += struct.pack(str(2 * ntexcoord) + 'f', *texcoords)
msh_string += struct.pack(str(3 * nface) + 'i', *faces)
return msh_string
def obj_file_to_mujoco_msh(mesh_file):
msh_strings = [
object_to_msh_format(*_relabel_obj_to_mj(*parsed_object))
for parsed_object in _parse_obj(mesh_file)
]
return msh_strings
| dm_robotics-main | py/manipulation/props/utils/mesh_formats_utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_robotics.manipulation.standard_cell.rgb_basket."""
from absl.testing import absltest
from dm_control import mjcf
from dm_robotics.manipulation.standard_cell import rgb_basket
class RGBBasketTest(absltest.TestCase):
def test_initialize(self):
basket = rgb_basket.RGBBasket()
physics = mjcf.Physics.from_mjcf_model(basket.mjcf_model)
# Check if we can call step the basket.
physics.step()
def test_collision_geom_group_with_primitive_collisions_enabled(self):
basket = rgb_basket.RGBBasket()
self.assertNotEmpty(basket.collision_geom_group)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/manipulation/standard_cell/rgb_basket_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RGB basket."""
import os
from typing import List, Optional
from dm_control import composer
from dm_control import mjcf
from dm_robotics.moma.models import types
from dm_robotics.moma.models import utils as models_utils
RESOURCES_ROOT_DIR = (
os.path.join(os.path.dirname(__file__), 'rgb_basket_assets')
)
RGB_BASKET_XML_PATH = os.path.join(RESOURCES_ROOT_DIR, 'rgb_basket.xml')
DEFAULT_CAMERA_KWARGS = {
'basket_front_left':
dict(
fovy=35.,
pos=(0.970, -0.375, 0.235),
quat=(0.7815, 0.4900, 0.2050, 0.3272),
),
'basket_front_right':
dict(
fovy=35.,
pos=(0.970, 0.375, 0.235),
quat=(0.3272, 0.2050, 0.4900, 0.7815),
),
'basket_back_left':
dict(
fovy=30.,
pos=(0.060, -0.260, 0.390),
quat=(0.754, 0.373, -0.250, -0.480)),
}
# Define simplified collision geometry for better contacts and motion planning.
_CAMERAS_AND_CAMERA_STRUTS_GEOMS_COLLISIONS_KWARGS = [{
'name': 'basket_front_left_camera_strut_CollisionGeom',
'type': 'capsule',
'fromto': '0.42 -0.42 0.15 0.42 -0.42 0.25',
'size': '0.06'
}, {
'name': 'basket_front_left_camera_CollisionGeom',
'type': 'capsule',
'fromto': '0.43 -0.43 0.28 0.36 -0.36 0.24',
'size': '0.04'
}, {
'name': 'basket_front_right_camera_strut_CollisionGeom',
'type': 'capsule',
'fromto': '-0.42 -0.42 0.15 -0.42 -0.42 0.25',
'size': '0.06'
}, {
'name': 'basket_front_right_camera_CollisionGeom',
'type': 'capsule',
'fromto': '-0.43 -0.43 0.28 -0.36 -0.36 0.24',
'size': '0.04'
}, {
'name': 'basket_back_camera_vertical_strut_CollisionGeom',
'type': 'capsule',
'fromto': '-0.28 0.57 0.10 -0.28 0.57 0.35',
'size': '0.06'
}, {
'name': 'basket_back_camera_horizontal_strut_CollisionGeom',
'type': 'capsule',
'fromto': '-0.28 0.57 0.15 -0.28 0.43 0.15',
'size': '0.06'
}, {
'name': 'basket_back_camera_CollisionGeom',
'type': 'capsule',
'fromto': '-0.285 0.6 0.42 -0.255 0.53 0.36',
'size': '0.04'
}]
_BASKET_STRUTS_GEOMS_COLLISIONS_KWARGS = [{
'name': 'basket_back_strut_CollisionGeom',
'type': 'capsule',
'fromto': '0.4 0.42 0.15 -0.4 0.42 0.15',
'size': '0.05'
}, {
'name': 'basket_front_strut_CollisionGeom',
'type': 'capsule',
'fromto': '0.4 -0.42 0.15 -0.4 -0.42 0.15',
'size': '0.05'
}, {
'name': 'basket_right_strut_CollisionGeom',
'type': 'capsule',
'fromto': '-0.42 0.4 0.15 -0.42 -0.4 0.15',
'size': '0.05'
}, {
'name': 'basket_left_strut_CollisionGeom',
'type': 'capsule',
'fromto': '0.42 0.4 0.15 0.42 -0.4 0.15',
'size': '0.05'
}]
_BASKET_SURFACE_GEOMS_COLLISIONS_KWARGS = [
{
'name': 'basket_base_surface_CollisionGeom',
'type': 'box',
'pos': '0.0 0.0 -0.02',
'size': '0.25 0.25 0.02'
},
{
'name': 'basket_sloped_side_surfaces_CollisionGeom1',
'type': 'box',
'pos': '-0.28 0 0.07',
'size': '0.20 0.40 0.005',
'axisangle': '0 1 0 0.44506' # slope angle 25.5 deg
},
{
'name': 'basket_sloped_side_surfaces_CollisionGeom2',
'type': 'box',
'pos': '0.28 0.0 0.07',
'size': '0.20 0.40 0.005',
'axisangle': '0 1 0 2.69653' # slope angle 154.5 deg
},
{
'name': 'basket_sloped_side_surfaces_CollisionGeom3',
'type': 'box',
'pos': '0.0 -0.28 0.07',
'size': '0.40 0.20 0.005',
'axisangle': '1 0 0 2.69653' # slope angle 154.5 deg
},
{
'name': 'basket_sloped_side_surfaces_CollisionGeom4',
'type': 'box',
'pos': '0.0 0.28 0.07',
'size': '0.40 0.20 0.005',
'axisangle': '1 0 0 0.44506' # slope angle 25.5 deg
}
]
# Dictionary mapping body names to a list of their collision geoms
_CAMERAS_AND_CAMERA_STRUTS_COLLISION_GEOMS_DICT = {
'collision_basket': _CAMERAS_AND_CAMERA_STRUTS_GEOMS_COLLISIONS_KWARGS,
}
_BASKET_STRUTS_COLLISION_GEOMS_DICT = {
'collision_basket': _BASKET_STRUTS_GEOMS_COLLISIONS_KWARGS,
}
_BASKET_SURFACE_COLLISION_GEOMS_DICT = {
'collision_basket': _BASKET_SURFACE_GEOMS_COLLISIONS_KWARGS,
}
class RGBBasket(composer.Arena):
"""An arena corresponding to the basket used in the RGB setup."""
def _build(self, name: Optional[str] = None):
"""Initializes this arena.
Args:
name: (optional) A string, the name of this arena. If `None`, use the
model name defined in the MJCF file.
"""
super()._build(name)
self._mjcf_root.include_copy(
mjcf.from_path(RGB_BASKET_XML_PATH), override_attributes=True)
self._set_to_non_colliding_geoms()
self._add_cameras()
self._add_collision_geoms()
def _set_to_non_colliding_geoms(self):
"""Set mesh geoms of the basket to be non-colliding."""
for geom in self._mjcf_root.find_all('geom'):
geom.contype = 0
geom.conaffinity = 0
def _add_cameras(self):
"""Add basket cameras."""
camera_body = self.mjcf_model.find('body', 'rgb_basket').add(
'body', name='camera_ref', pos='0 0.6 0', quat='0.707 0 0 -0.707')
self._cameras = []
for camera_name, mjcf_camera_kwargs in DEFAULT_CAMERA_KWARGS.items():
self._cameras.append(
camera_body.add('camera', name=camera_name, **mjcf_camera_kwargs))
def _add_collision_geoms(self):
"""Add collision geoms."""
self.mjcf_model.worldbody.add(
'body', name='collision_basket', pos='0 0 0', quat='0.707 0 0 0.707')
self._cameras_and_camera_struts_collision_geoms = (
models_utils.attach_collision_geoms(
self.mjcf_model, _CAMERAS_AND_CAMERA_STRUTS_COLLISION_GEOMS_DICT))
self._basket_struts_collision_geoms = models_utils.attach_collision_geoms(
self.mjcf_model, _BASKET_STRUTS_COLLISION_GEOMS_DICT)
# Enable collision with the surface collision geoms of the basket.
collision_geoms_kwargs = models_utils.default_collision_geoms_kwargs()
collision_geoms_kwargs['contype'] = 1
collision_geoms_kwargs['conaffinity'] = 1
collision_geoms_kwargs['rgba'] = '1 1 0 0.3'
self._basket_surface_collision_geoms = models_utils.attach_collision_geoms(
self.mjcf_model, _BASKET_SURFACE_COLLISION_GEOMS_DICT,
collision_geoms_kwargs)
@property
def collision_geom_group(self):
collision_geom_group = (
self.cameras_and_camera_struts_collision_geom_group +
self.basket_struts_collision_geom_group +
self.basket_surface_collision_geom_group)
return collision_geom_group
@property
def cameras_and_camera_struts_collision_geom_group(self):
collision_geom_group = [
geom.full_identifier
for geom in self._cameras_and_camera_struts_collision_geoms
]
return collision_geom_group
@property
def basket_struts_collision_geom_group(self):
collision_geom_group = [
geom.full_identifier for geom in self._basket_struts_collision_geoms
]
return collision_geom_group
@property
def basket_surface_collision_geom_group(self):
collision_geom_group = [
geom.full_identifier for geom in self._basket_surface_collision_geoms
]
return collision_geom_group
@property
def cameras(self) -> List[types.MjcfElement]:
"""Basket cameras."""
return self._cameras
@property
def mjcf_model(self) -> mjcf.RootElement:
"""Returns the `mjcf.RootElement` object corresponding to this basket."""
return self._mjcf_root
| dm_robotics-main | py/manipulation/standard_cell/rgb_basket.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining the interface for an image detector."""
import abc
from typing import Callable, Tuple
from dmr_vision import types
import numpy as np
Signature = Callable[[np.ndarray], Tuple[types.Centers, types.Detections]]
class ImageDetector(abc.ABC):
"""Image-based blob detector."""
@abc.abstractmethod
def __call__(self,
image: np.ndarray) -> Tuple[types.Centers, types.Detections]:
"""Detects something of interest in an image.
Args:
image: the input image.
Returns:
A dictionary mapping a detection name with
- the (u, v) coordinate of its barycenter, if found;
- `None`, otherwise or other conditions are met;
and a dictionary mapping a detection name with
- its contour superimposed on the input image;
- `None`, otherwise or other conditions are met;
"""
raise NotImplementedError
| dm_robotics-main | py/vision/detector.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils.py`."""
from absl.testing import absltest
from dmr_vision import robot_config
from dmr_vision import types
from dmr_vision import utils
import numpy as np
class PoseValidatorTest(absltest.TestCase):
def setUp(self):
super().setUp()
r_config = robot_config.get_robot_config("STANDARD_SAWYER")
center = np.append(r_config.basket_center, r_config.basket_height)
self.limits = types.PositionLimit(
upper=center + np.array([0.45, 0.45, 0.20]),
lower=center + np.array([-0.45, -0.45, -0.02]),
)
self.pose_validator = utils.PoseValidator(self.limits)
def testIsValid(self):
eps = np.array([1e-4, 0., 0.])
pos_slightly_above = self.limits.upper + eps
self.assertFalse(self.pose_validator.is_valid(pos_slightly_above))
pos_slightly_below = self.limits.lower - eps
self.assertFalse(self.pose_validator.is_valid(pos_slightly_below))
pos_in_limits = self.limits.upper - eps
self.assertTrue(self.pose_validator.is_valid(pos_in_limits))
if __name__ == "__main__":
absltest.main()
| dm_robotics-main | py/vision/utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining a color-based blob detector for camera images."""
from typing import Mapping, Optional, Tuple
from absl import logging
import cv2
from dmr_vision import detector
from dmr_vision import types
import numpy as np
class BlobDetector(detector.ImageDetector):
"""Color-based blob detector."""
def __init__(self,
color_ranges: Mapping[str, types.ColorRange],
scale: float = (1. / 6.),
min_area: int = 230,
mask_points: Optional[types.MaskPoints] = None,
visualize: bool = False,
toolkit: bool = False):
"""Constructs a `BlobDetector` instance.
Args:
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
scale: Rescaling image factor. Used for increasing the frame rate, at the
cost of reducing the precision of the blob barycenter and controur.
min_area: The minimum area the detected blob must have.
mask_points: (u, v) coordinates defining a closed regions of interest in
the image where the blob detector will not look for blobs.
visualize: Whether to output a visualization of the detected blob or not.
toolkit: Whether to display a YUV GUI toolkit for parameter tuning.
Enabling this implcitly sets `visualize = True`.
"""
self._color_ranges = color_ranges
self._scale = np.array(scale)
self._min_area = min_area
self._mask_points = mask_points if mask_points is not None else ()
self._visualize = visualize
self._mask = None
self._toolkit = toolkit
if self._toolkit:
self._visualize = True
self._window_name = "UV toolkit"
self._window_size = (800, 1000)
cv2.namedWindow(
self._window_name,
cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.resizeWindow(self._window_name, self._window_size)
self._trackbar_scale = 1000
num_colors = len(self._color_ranges.keys())
if num_colors > 1:
cv2.createTrackbar("Color selector", self._window_name, 0,
len(self._color_ranges.keys()) - 1,
self._callback_change_color)
cv2.createTrackbar("Subsampling", self._window_name, 5, 10,
lambda x: None)
cv2.setTrackbarMin("Subsampling", self._window_name, 1)
self._u_range_trackbar = CreateRangeTrackbar(self._window_name, "U min",
"U max", self._color_ranges,
"U", self._trackbar_scale)
self._v_range_trackbar = CreateRangeTrackbar(self._window_name, "V min",
"V max", self._color_ranges,
"V", self._trackbar_scale)
self._callback_change_color(0)
def __del__(self):
if self._toolkit:
cv2.destroyAllWindows()
def __call__(self,
image: np.ndarray) -> Tuple[types.Centers, types.Detections]:
"""Finds color blobs in the image.
Args:
image: the input image.
Returns:
A dictionary mapping a blob name with
- the (u, v) coordinate of its barycenter, if found;
- `None`, otherwise;
and a dictionary mapping a blob name with
- its contour superimposed on the input image;
- `None`, if `BlobDetector` is run with `visualize == False`.
"""
# Preprocess the image.
image = self._preprocess(image)
# Convert the image to YUV.
yuv_image = cv2.cvtColor(image.astype(np.float32) / 255., cv2.COLOR_RGB2YUV)
# Find blobs.
blob_centers = {}
blob_visualizations = {}
for name, color_range in self._color_ranges.items():
blob = self._find_blob(yuv_image, color_range)
blob_centers[name] = blob.center * (1. / self._scale) if blob else None
blob_visualizations[name] = (
self._draw_blobs(image, blob) if self._visualize else None)
if self._toolkit:
self._update_gui_toolkit(yuv_image, image)
return blob_centers, blob_visualizations
def _preprocess(self, image: np.ndarray) -> np.ndarray:
"""Preprocesses an image for color-based blob detection."""
# Resize the image to make all other operations faster.
size = np.round(image.shape[:2] * self._scale).astype(np.int32)
resized = cv2.resize(image, (size[1], size[0]))
if self._mask is None:
self._setup_mask(resized)
# Denoise the image.
denoised = cv2.fastNlMeansDenoisingColored(
src=resized, h=7, hColor=7, templateWindowSize=3, searchWindowSize=5)
return cv2.multiply(denoised, self._mask)
def _setup_mask(self, image: np.ndarray) -> None:
"""Initialises an image mask to explude pixels from blob detection."""
self._mask = np.ones(image.shape, image.dtype)
for mask_points in self._mask_points:
cv2.fillPoly(self._mask, np.int32([mask_points * self._scale]), 0)
def _find_blob(self, yuv_image: np.ndarray,
color_range: types.ColorRange) -> Optional[types.Blob]:
"""Find the largest blob matching the YUV color range.
Args:
yuv_image: An image in YUV color space.
color_range: The YUV color range used for segmentation.
Returns:
If found, the (u, v) coordinate of the barycenter and the contour of the
segmented blob. Otherwise returns `None`.
"""
# Threshold the image in YUV color space.
lower = color_range.lower
upper = color_range.upper
mask = cv2.inRange(yuv_image.copy(), lower, upper)
# Find contours.
_, contours, _ = cv2.findContours(
image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return None
# Find the largest contour.
max_area_contour = max(contours, key=cv2.contourArea)
# If the blob's area is too small, ignore it.
correction_factor = np.square(1. / self._scale)
normalized_area = cv2.contourArea(max_area_contour) * correction_factor
if normalized_area < self._min_area:
return None
# Compute the centroid.
moments = cv2.moments(max_area_contour)
if moments["m00"] == 0:
return None
cx, cy = moments["m10"] / moments["m00"], moments["m01"] / moments["m00"]
return types.Blob(center=np.array([cx, cy]), contour=max_area_contour)
def _draw_blobs(self, image: np.ndarray, blob: types.Blob) -> np.ndarray:
"""Draws the controuer of the detected blobs."""
frame = image.copy()
if blob:
# Draw center.
cv2.drawMarker(
img=frame,
position=(int(blob.center[0]), int(blob.center[1])),
color=(255, 0, 0),
markerType=cv2.MARKER_CROSS,
markerSize=7,
thickness=1,
line_type=cv2.LINE_AA)
# Draw contours.
cv2.drawContours(
image=frame,
contours=[blob.contour],
contourIdx=0,
color=(0, 0, 255),
thickness=1)
return frame
def _callback_change_color(self, color_index: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and selects the associated color.
The association between index and color is implementation dependent, i.e.
in the insertion order into a dictionary.
Args:
color_index: The current value of the trackbar. Passed automatically.
"""
colors = list(self._color_ranges.keys())
selected_color = colors[color_index]
min_upper = self._color_ranges[selected_color]
lower = min_upper.lower
upper = min_upper.upper
self._u_range_trackbar.set_trackbar_pos(lower[1], upper[1])
self._v_range_trackbar.set_trackbar_pos(lower[2], upper[2])
cv2.setWindowTitle(self._window_name,
self._window_name + " - Color: " + selected_color)
def _update_gui_toolkit(self, image_yuv: np.ndarray,
image_rgb: np.ndarray) -> None:
"""Updates the YUV GUI toolkit.
Creates and shows the UV representation of the current image.
Args:
image_yuv: The current image in YUV color space.
image_rgb: The current image in RGB color space.
"""
subsample = cv2.getTrackbarPos("Subsampling", self._window_name)
img_u = image_yuv[0::subsample, 0::subsample, 1]
img_v = 1.0 - image_yuv[0::subsample, 0::subsample, 2]
pixel_color = image_rgb[0::subsample, 0::subsample, :]
pixel_color = pixel_color.reshape(np.prod(img_u.shape[0:2]), -1)
img_u = img_u.ravel()
img_v = img_v.ravel()
fig_size = 300
fig = np.full(shape=(fig_size, fig_size, 3), fill_value=255, dtype=np.uint8)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(fig_size, fig_size),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(0, 0),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.putText(
img=fig,
text="U",
org=(int(0.94 * fig_size), int(0.97 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
cv2.putText(
img=fig,
text="V",
org=(int(0.03 * fig_size), int(0.06 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
for i in range(img_u.size):
color = tuple(int(p) for p in pixel_color[i, ::-1])
position = (int(img_u[i] * fig_size), int(img_v[i] * fig_size))
cv2.drawMarker(
img=fig,
position=position,
color=color,
markerType=cv2.MARKER_SQUARE,
markerSize=3,
thickness=2)
u_min, u_max = self._u_range_trackbar.get_trackbar_pos()
u_min = int(u_min * fig_size)
u_max = int(u_max * fig_size)
v_min, v_max = self._v_range_trackbar.get_trackbar_pos()
v_min = int((1.0 - v_min) * fig_size)
v_max = int((1.0 - v_max) * fig_size)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_min, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_max, v_max),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_min),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_max, v_max),
color=(0, 0, 0),
thickness=2)
cv2.imshow(self._window_name, fig)
cv2.waitKey(1)
class CreateRangeTrackbar:
"""Class to create and control, on an OpenCV GUI, two trackbars representing a range of values."""
def __init__(self,
window_name: str,
trackbar_name_lower: str,
trackbar_name_upper: str,
color_ranges: Mapping[str, types.ColorRange],
color_code: str,
trackbar_scale: int = 1000):
"""Initializes the class.
Args:
window_name: Name of the window that will be used as a parent of the
created trackbar.
trackbar_name_lower: The name of the trackbar implementing the lower bound
of the range.
trackbar_name_upper: The name of the trackbar implementing the upper bound
of the range.
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
color_code: The color code to change in `color_ranges`. Shall be "U" or
"V".
trackbar_scale: The trackbar scale to recover the real value from the
current trackbar position.
"""
self._window_name = window_name
self._trackbar_name_lower = trackbar_name_lower
self._trackbar_name_upper = trackbar_name_upper
self._color_ranges = color_ranges
self._color_code = color_code
self._trackbar_scale = trackbar_scale
self._trackbar_reset = False
# pylint: disable=g-long-lambda
cv2.createTrackbar(
self._trackbar_name_lower, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"lower", "lower", self._color_code, x))
cv2.createTrackbar(
self._trackbar_name_upper, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"upper", "upper", self._color_code, x))
# pylint: enable=g-long-lambda
def set_trackbar_pos(self, lower_value: float, upper_value: float) -> None:
"""Sets the trackbars to specific values."""
if lower_value > upper_value:
logging.error(
"Wrong values for setting range trackbars. Lower value "
"must be less than upper value. Provided lower: %d. "
"Provided upper: %d.", lower_value, upper_value)
return
# To change the trackbar values avoiding the consistency check enforced by
# the callback to implement a range of values with two sliders, we set the
# variable self._trackbar_reset to `True` and then bring it back to
# `False`.
self._trackbar_reset = True
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
int(lower_value * self._trackbar_scale))
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
int(upper_value * self._trackbar_scale))
self._trackbar_reset = False
def get_trackbar_pos(self, normalized: bool = True) -> Tuple[float, float]:
"""Gets the trackbars lower and upper values."""
lower = cv2.getTrackbarPos(self._trackbar_name_lower, self._window_name)
upper = cv2.getTrackbarPos(self._trackbar_name_upper, self._window_name)
if normalized:
return lower / self._trackbar_scale, upper / self._trackbar_scale
else:
return lower, upper
def _callback_update_threshold(self, lower_or_upper: str, attribute: str,
color_code: str, value: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and updates the associated U or V threshold.
This callback assumes that two trackbars, `trackbar_name_lower` and
`trackbar_name_upper`, form a range of values. As a consequence, when one
of the two trackbar is moved, there is a consistency check that the range
is valid (i.e. lower value less than max value and vice versa).
Typical usage example:
To pass it to an OpenCV/Qt trackbar, use this function in a lambda
as follows:
cv2.createTrackbar("Trackbar lower", ..., lambda x:
class_variable._callback_update_threshold("lower", "lower", "U", x))
Args:
lower_or_upper: The behaviour of this callback for the range. Shall be
`lower` or `upper`.
attribute: The name of the threshold in `self._color_ranges` for the
current selected color.
color_code: The color code to change. Shall be "U" or "V".
value: The current value of the trackbar.
"""
if not self._trackbar_reset:
if lower_or_upper == "lower":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_upper,
self._window_name)
if value > limiting_value:
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
limiting_value)
return
elif lower_or_upper == "upper":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_lower,
self._window_name)
if value < limiting_value:
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
limiting_value)
return
selected_color_index = cv2.getTrackbarPos("Color selector",
self._window_name)
colors = list(self._color_ranges.keys())
selected_color = colors[selected_color_index]
updated_value = value / self._trackbar_scale
color_threshold = getattr(self._color_ranges[selected_color], attribute)
if color_code == "U":
color_threshold[1] = updated_value
elif color_code == "V":
color_threshold[2] = updated_value
else:
logging.error(
"Wrong trackbar name. No U/V color code correspondence."
"Provided: `%s`.", color_code)
return
| dm_robotics-main | py/vision/blob_detector.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for the blob detector node."""
import dataclasses
from typing import Mapping
from dmr_vision import types
@dataclasses.dataclass(init=False)
class BlobDetectorConfig:
"""Data class for the blob triangulation node.
Attributes:
node_name: the name of the ROS node.
rate: the desired frame rate.
input_queue_size: the input data queue size (see ROS documentation).
output_queue_size: the output data queue size (see ROS documentation).
topic_by_camera_name: a camera name to ROS topic mapping.
mask_by_camera_name: (u, v) coordinates defining closed regions of interest
in the image where the blob detector will not look for blobs.
scale: image scaling factor to increase speed and frame rate.
min_area: minimum size in pixels above which a blob is deemed valid.
"""
node_name: str
input_queue_size: int
output_queue_size: int
topic_by_camera_name: Mapping[str, str]
mask_by_camera_name: Mapping[str, types.MaskPoints]
scale: float
min_area: int
def get_config() -> BlobDetectorConfig:
"""Returns the parameters for running ROS blob detector node."""
## Base configs
config = BlobDetectorConfig()
## ROS node configuration
config.node_name = "blob_detector"
config.input_queue_size = 1
config.output_queue_size = 1
config.topic_by_camera_name = {
"basket_front_left": "/pylon_basket_front_left/image_raw",
"basket_front_right": "/pylon_basket_front_right/image_raw",
"basket_back_left": "/pylon_basket_back_left/image_raw",
}
config.mask_by_camera_name = {
"basket_front_left": [[
(0, 0),
(0, 320),
(976, 176),
(1920, 360),
(1920, 0),
]],
"basket_front_right": [[
(0, 0),
(0, 360),
(944, 176),
(1920, 400),
(1920, 0),
]],
"basket_back_left": [],
}
config.scale = 1. / 8.
config.min_area = 1000
return config
| dm_robotics-main | py/vision/config_blob_detector.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch a ROS node to detect colored blobs from images."""
from absl import app
from absl import flags
from absl import logging
from dmr_vision import blob_detector
from dmr_vision import blob_tracker_object_defs
from dmr_vision import config_blob_detector
from dmr_vision import detector_node
import rospy
_CAMERA = flags.DEFINE_string(
name="camera",
default=None,
help=("The camera to use."
"Must be one of the keys in `cameras` in the configuration file."))
_PROPS = flags.DEFINE_list(
name="props",
default=[
blob_tracker_object_defs.Props.GREEN.value,
blob_tracker_object_defs.Props.RED.value,
blob_tracker_object_defs.Props.BLUE.value,
],
help="The names of the props to track.")
_VISUALISE = flags.DEFINE_boolean(
name="visualize",
default=False,
help="Whether to publish helper images of the detected blobs or not.",
)
_TOOLKIT = flags.DEFINE_boolean(
name="toolkit",
default=False,
help=("Whether to display a YUV GUI toolkit to find good YUV parameters to "
"detect blobs or not. Sets `visualize = True`."),
)
def main(_):
logging.info("Collecting configuration parameters.")
config = config_blob_detector.get_config()
try:
topic = config.topic_by_camera_name[_CAMERA.value]
except KeyError as ke:
raise ValueError("Please provide the name of one of the cameras listed in "
"the config `camera_namespaces` attribute. "
f"Provided: {_CAMERA.value}. Available: "
f"{[cam for cam in config.topic_by_camera_name]}.")
color_ranges = {}
for name in _PROPS.value:
prop = blob_tracker_object_defs.Props(name.lower())
color_ranges[name] = blob_tracker_object_defs.PROP_SPEC[prop]
logging.info("Initializing blob detector ROS node.")
rospy.init_node(name=config.node_name, anonymous=True)
detector = blob_detector.BlobDetector(
color_ranges=color_ranges,
scale=config.scale,
min_area=config.min_area,
mask_points=config.mask_by_camera_name[_CAMERA.value],
visualize=_VISUALISE.value,
toolkit=_TOOLKIT.value,
)
ros_node = detector_node.DetectorNode(
topic=topic,
detector=detector,
input_queue_size=config.input_queue_size,
output_queue_size=config.output_queue_size,
)
logging.info("Spinning ROS node.")
ros_node.spin()
logging.info("ROS node terminated.")
ros_node.close()
if __name__ == "__main__":
flags.mark_flag_as_required("camera")
app.run(main)
| dm_robotics-main | py/vision/launch_blob_detector.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common definitions of blob detector based objects."""
import enum
from dmr_vision import types
import numpy as np
@enum.unique
class Props(enum.Enum):
RED = "red"
GREEN = "green"
BLUE = "blue"
PROP_SPEC = {
Props.RED:
types.ColorRange(
lower=np.array([0., 0., 0.669]), upper=np.array([1., 0.518, 1.])),
Props.GREEN:
types.ColorRange(
lower=np.array([0., 0., 0.]), upper=np.array([1., 0.427, 0.479])),
Props.BLUE:
types.ColorRange(
lower=np.array([0., 0.568, 0.]), upper=np.array([1., 1., 0.590])),
}
ROS_PROPS = {
Props.RED: "/blob/red/pose",
Props.GREEN: "/blob/green/pose",
Props.BLUE: "/blob/blue/pose",
}
| dm_robotics-main | py/vision/blob_tracker_object_defs.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining utility classes for ROS."""
import dataclasses
import threading
from typing import Optional
from absl import logging
import numpy as np
import rospy
from geometry_msgs import msg as geometry_msgs
from sensor_msgs import msg as geometry_msgs
import cv_bridge
from std_msgs import msg as std_msgs
@dataclasses.dataclass(frozen=True)
class PointData:
"""ROS data representing a point.
Attributes:
data: a Cartesian [x, y, z] point.
frame_id: the frame id associated to the data.
stamp: the time stamp associated to the data.
"""
data: np.ndarray
frame_id: str
stamp: rospy.Time
class CameraInfoHandler:
"""Handler for receiving camera info."""
def __init__(
self,
topic: str,
queue_size: int = 1,
):
"""Constructs a CameraInfoHandler instance.
Args:
topic: The ROS topic to subscribe to.
queue_size: The ROS subscriber queue size.
"""
self._topic = topic
self._queue_size = queue_size
self._lock = threading.Condition(threading.RLock())
self._subscriber = rospy.Subscriber(
name=self._topic,
data_class=sensor_msgs.CameraInfo,
callback=self.__call__,
queue_size=self._queue_size,
tcp_nodelay=True,
)
logging.info("Waiting for the first message on topic %s", topic)
rospy.wait_for_message(topic, sensor_msgs.CameraInfo)
def close(self) -> None:
"""Gently cleans up CameraInfoHandler and closes ROS topics."""
logging.info("Unregistering subscriber.")
with self._lock:
self._subscriber.unregister()
def wait(self) -> None:
"""Waits for next camera matrix and distortion params to be available."""
with self._lock:
self._lock.wait()
@property
def camera_matrix(self) -> np.ndarray:
"""The latest received camera matrix."""
with self._lock:
return self._camera_matrix
@property
def distortion_parameters(self) -> Optional[np.ndarray]:
"""The latest received camera distortions."""
with self._lock:
return self._distortion_parameters
@property
def stamp(self) -> rospy.Time:
"""The latest received timestamp."""
with self._lock:
return self._stamp
def __call__(self, camera_info_msg: sensor_msgs.CameraInfo) -> None:
with self._lock:
self._stamp = camera_info_msg.header.stamp
self._camera_matrix = np.array(camera_info_msg.K).reshape((3, 3))
self._distortion_parameters = np.array(camera_info_msg.D)
self._lock.notify()
def __enter__(self) -> bool:
return self._lock.__enter__()
def __exit__(self, *args, **kwargs) -> Optional[bool]:
return self._lock.__exit__(*args, **kwargs)
class ImageHandler:
"""Handler for receiving and decoding images."""
def __init__(
self,
topic: str,
encoding: str = "rgb8",
queue_size: int = 1,
):
"""Constructs a `ImageHandler` instance.
Args:
topic: The topic to subscribe to.
encoding: The desired encoding of the image.
queue_size: The queue size to use.
"""
self._encoding = encoding
self._bridge = cv_bridge.CvBridge()
self._lock = threading.Condition(threading.RLock())
self._subscriber = rospy.Subscriber(
name=topic,
data_class=sensor_msgs.Image,
callback=self.__call__,
queue_size=queue_size,
tcp_nodelay=True,
)
logging.info("Waiting for the first message on topic %s", topic)
rospy.wait_for_message(topic, sensor_msgs.Image)
def close(self) -> None:
"""Gently cleans up ImageHandler and closes ROS topics."""
logging.info("Unregistering subscriber.")
with self._lock:
self._subscriber.unregister()
def wait(self) -> None:
"""Waits for the next image to be available."""
with self._lock:
self._lock.wait()
@property
def data(self) -> np.ndarray:
"""The latest received image data."""
with self._lock:
return self._data
@property
def frame_id(self) -> str:
"""The latest received frame id."""
with self._lock:
return self._frame_id
@property
def stamp(self) -> rospy.Time:
"""The latest received timestamp."""
with self._lock:
return self._stamp
def __call__(self, image_msg: sensor_msgs.Image):
"""Grabs an image frame and convert it into OpenCV (i.e. NumPy) image."""
data = self._bridge.imgmsg_to_cv2(image_msg, self._encoding)
with self._lock:
self._data = data
self._frame_id = image_msg.header.frame_id
self._stamp = image_msg.header.stamp
self._lock.notify()
def __enter__(self) -> bool:
return self._lock.__enter__()
def __exit__(self, *args, **kwargs) -> Optional[bool]:
return self._lock.__exit__(*args, **kwargs)
class ImagePublisher:
"""Publisher for OpenCV images."""
def __init__(self,
topic: str,
encoding: str = "rgb8",
frame_id: Optional[str] = None,
queue_size: int = 1):
"""Constructs an `ImagePublisher` instance.
Args:
topic: The topic to publish to.
encoding: The desired encoding.
frame_id: The associated frame id.
queue_size: The queue size to use.
"""
self._encoding = encoding
self._frame_id = frame_id
self._bridge = cv_bridge.CvBridge()
self._publisher = rospy.Publisher(
name=topic,
data_class=sensor_msgs.Image,
queue_size=queue_size,
tcp_nodelay=True)
def publish(self,
image: np.ndarray,
stamp: Optional[rospy.Time] = None) -> None:
"""Publishes the image.
Args:
image: The image to publish.
stamp: A ROS timestamp.
"""
message = self._bridge.cv2_to_imgmsg(image, encoding=self._encoding)
message.header.frame_id = self._frame_id
message.header.stamp = stamp
self._publisher.publish(message)
def close(self) -> None:
"""Gently cleans up ImagePublisher and closes ROS topics."""
logging.info("Unregistering publisher.")
self._publisher.unregister()
class PointHandler:
"""Handler for receiving point data."""
def __init__(self, topic: str, queue_size: int = 1):
"""Constructs a `PointHandler` instance.
Args:
topic: The ROS topic to subscribe to.
queue_size: The ROS subscriber queue size.
"""
self._lock = threading.Condition(threading.RLock())
self._subscriber = rospy.Subscriber(
name=topic,
data_class=geometry_msgs.PointStamped,
callback=self.__call__,
queue_size=queue_size,
tcp_nodelay=True)
logging.info("Waiting for the first message on topic %s", topic)
try:
rospy.wait_for_message(topic, geometry_msgs.PointStamped, timeout=10.)
except rospy.exceptions.ROSException:
logging.warning(
"Did not reveive a message on topic %s, object may be "
"occluded or colors may be poorly calibrated.", topic)
def close(self) -> None:
"""Gently cleans up `PointHandler` and closes ROS topics."""
logging.info("Unregistering subscriber.")
with self._lock:
self._subscriber.unregister()
def wait(self) -> None:
"""Waits for the next point to be available."""
with self._lock:
self._lock.wait()
@property
def point_data(self) -> PointData:
"""The latest received point."""
with self._lock:
return self._point_data
@property
def stamp(self) -> rospy.Time:
"""The latest received timestamp."""
with self._lock:
return self._stamp
def __call__(self, point_msg: geometry_msgs.PointStamped) -> None:
"""Callback used by ROS subscriber."""
data = np.array([point_msg.point.x, point_msg.point.y, point_msg.point.z])
with self._lock:
self._stamp = point_msg.header.stamp
self._point_data = PointData(
data=data,
frame_id=point_msg.header.frame_id,
stamp=point_msg.header.stamp)
self._lock.notify()
class PointMessage(geometry_msgs.PointStamped):
"""Simplifies constructions of `PointStamped` messages."""
def __init__(self,
point: np.ndarray,
frame_id: Optional[str] = None,
stamp: Optional[rospy.Time] = None):
"""Constructs a `PointMessage` instance.
Args:
point: The point.
frame_id: The associated frame id.
stamp: A timestamp.
"""
super().__init__()
self.header = std_msgs.Header()
self.header.frame_id = frame_id
self.header.stamp = stamp
if len(point) == 2:
(self.point.x, self.point.y) = point
else:
(self.point.x, self.point.y, self.point.z) = point
class PointPublisher:
"""Publisher for 2D / 3D points."""
def __init__(self,
topic: str,
frame_id: Optional[str] = None,
queue_size: int = 1):
"""Constructs a `PointPublisher` instance.
Args:
topic: The topic to publish to.
frame_id: The associated frame id.
queue_size: The queue size to use.
"""
self._frame_id = frame_id
self._publisher = rospy.Publisher(
name=topic,
data_class=geometry_msgs.PointStamped,
queue_size=queue_size,
tcp_nodelay=True)
def publish(self,
point: np.ndarray,
stamp: Optional[rospy.Time] = None) -> None:
"""Publishes the point.
Args:
point: The point.
stamp: A ROS timestamp.
"""
message = PointMessage(point, frame_id=self._frame_id, stamp=stamp)
self._publisher.publish(message)
def close(self) -> None:
"""Gently cleans up PointPublisher and closes ROS topics."""
logging.info("Unregistering publisher.")
self._publisher.unregister()
class PoseMessage(geometry_msgs.PoseStamped):
"""Simplifies constructions of `PoseStamped` messages."""
def __init__(self,
pose: np.ndarray,
frame_id: Optional[str] = None,
stamp: Optional[rospy.Time] = None):
"""Constructs a `PoseMessage` instance.
Args:
pose: The pose.
frame_id: The associated frame id.
stamp: A ROS timestamp.
"""
super().__init__()
self.header = std_msgs.Header()
self.header.frame_id = frame_id
self.header.stamp = stamp
(self.pose.position.x, self.pose.position.y, self.pose.position.z,
self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z,
self.pose.orientation.w) = pose
class PosePublisher:
"""Publisher for object poses."""
def __init__(self,
topic: str,
frame_id: Optional[str] = None,
queue_size: int = 1):
"""Constructs a `PosePublisher` instance.
Args:
topic: The topic to publish to.
frame_id: The associated frame id.
queue_size: The queue size to use.
"""
self._publisher = rospy.Publisher(
name=topic,
data_class=geometry_msgs.PoseStamped,
queue_size=queue_size,
tcp_nodelay=True)
self._frame_id = frame_id
def publish(self,
pose: np.ndarray,
stamp: Optional[rospy.Time] = None) -> None:
"""Publishes the pose.
Args:
pose: The pose expressed as 3D position and unit quaternion.
stamp: A ROS timestamp.
"""
message = PoseMessage(pose, frame_id=self._frame_id, stamp=stamp)
self._publisher.publish(message)
def close(self) -> None:
"""Gently cleans up PosePublisher and closes ROS topics."""
logging.info("Unregistering publisher.")
self._publisher.unregister()
| dm_robotics-main | py/vision/ros_utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_robotics-main | py/vision/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining robot configurations."""
import dataclasses
import enum
from typing import Mapping, Optional, Tuple
from dmr_vision import types
DEFAULT_SAWYER_BASKET_CENTER = (0.6, 0.)
DEFAULT_BASKET_HEIGHT = 0.0498
@dataclasses.dataclass(frozen=True)
class RobotConfig:
"""Robot parameters.
Attributes:
name: unique robot name.
cameras: collection of cameras.
basket_center: center of playground relative to the robot base frame in the
xy plane.
basket_height: displacement of the playground from the robot base frame.
base_frame_name: the name (or id) that identifies the robot ROS base frame.
"""
name: str
cameras: Mapping[str, types.Camera]
basket_center: Optional[Tuple[float, float]] = None
basket_height: Optional[float] = None
base_frame_name: Optional[str] = None
@enum.unique
class RobotType(enum.Enum):
STANDARD_SAWYER = RobotConfig(
name="STANDARD_SAWYER",
cameras={
"basket_front_left":
types.Camera(
width=1920,
height=1200,
extrinsics=types.Extrinsics(
pos_xyz=(0.973, -0.375, 0.299),
quat_xyzw=(0.783, 0.329, -0.196, -0.489),
)),
"basket_front_right":
types.Camera(
width=1920,
height=1200,
extrinsics=types.Extrinsics(
pos_xyz=(0.978, 0.375, 0.294),
quat_xyzw=(0.332, 0.774, -0.496, -0.213),
)),
"basket_back_left":
types.Camera(
width=1920,
height=1200,
extrinsics=types.Extrinsics(
pos_xyz=(0.059, -0.251, 0.441),
quat_xyzw=(0.759, -0.482, 0.235, -0.370),
)),
},
basket_center=DEFAULT_SAWYER_BASKET_CENTER,
basket_height=DEFAULT_BASKET_HEIGHT,
base_frame_name="base",
)
def get_robot_config(robot_type: str) -> RobotConfig:
"""Retrieves robot configration."""
try:
return RobotType[robot_type.upper()].value
except KeyError as ke:
raise ValueError("No robot configuration available for given robot type. "
f"Can be one among {[robot.name for robot in RobotType]}. "
f"Provided robot type: {robot_type}.") from ke
| dm_robotics-main | py/vision/robot_config.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common types used in DeepMind Robotics vision package.
This file is maintained for simplifying data creation and manipulation as well
as facilitating type hints.
"""
import dataclasses
from typing import Mapping, Optional, Sequence, Tuple
import numpy as np
MaskPoints = Sequence[Sequence[Tuple[int, int]]]
Centers = Mapping[str, Optional[np.ndarray]]
Detections = Mapping[str, Optional[np.ndarray]]
@dataclasses.dataclass(frozen=True)
class Intrinsics:
"""Camera intrinsics.
Attributes:
camera_matrix: intrinsic camera matrix for the raw (distorted) images: K =
[[fx 0 cx], [ 0 fy cy], [ 0 0 1]]. Projects 3D points in the camera
coordinate frame to 2D pixel coordinates using the focal lengths (fx, fy)
and principal point (cx, cy).
distortion_parameters: the distortion parameters, size depending on the
distortion model. For example, the "plumb_bob" model has 5 parameters (k1,
k2, t1, t2, k3).
"""
camera_matrix: np.ndarray
distortion_parameters: np.ndarray
@dataclasses.dataclass(frozen=True)
class Extrinsics:
"""Camera extrinsics.
Attributes:
pos_xyz: camera position in the world reference frame.
quat_xyzw: camera unit quaternion in the world reference frame.
"""
pos_xyz: Tuple[float, float, float]
quat_xyzw: Tuple[float, float, float, float]
@dataclasses.dataclass(frozen=True)
class Blob:
"""An image blob.
Attributes:
center: (u, v) coordintes of the blob barycenter.
contour: Matrix of (u, v) coordinates of the blob contour.
"""
center: np.ndarray
contour: np.ndarray
@dataclasses.dataclass(frozen=True)
class Camera:
"""Camera parameters.
Attributes:
width: image width.
height: image height.
extrinsics: camera extrinsics.
intrinsics: camera intrinsics.
"""
width: int
height: int
extrinsics: Optional[Extrinsics] = None
intrinsics: Optional[Intrinsics] = None
@dataclasses.dataclass(frozen=True)
class ValueRange:
"""A generic N-dimensional range of values in terms of lower and upper bounds.
Attributes:
lower: A ND array with the lower values of the range.
upper: A ND array with the upper values of the range.
"""
lower: np.ndarray
upper: np.ndarray
@dataclasses.dataclass(frozen=True)
class ColorRange(ValueRange):
"""A range of colors in terms of lower and upper bounds.
Typical usage example:
# A YUV color range (cuboid)
ColorRange(lower=np.array[0., 0.25, 0.25],
upper=np.array[1., 0.75, 0.75])
Attributes:
lower: A 3D array with the lower values of the color range.
upper: A 3D array with the upper values of the color range.
"""
@dataclasses.dataclass(frozen=True)
class PositionLimit(ValueRange):
"""A range of Cartesian position in terms of lower and upper bounds.
Typical usage example:
# Define a position limit in Cartesian space (cuboid)
Limits(lower=np.array[-0.5, -0.5, 0.],
upper=np.array[0.5, 0.5, 0.5])
Attributes:
lower: An [x, y, z] array with the lower values of the position limit.
upper: An [x, y, z] array with the upper values of the position limit.
"""
@dataclasses.dataclass(frozen=True)
class Plane:
"""Parameterization of a 3d plane.
Attributes:
point: 3d point which lies in the plane.
normal: 3d vector normal to the plane.
"""
point: np.ndarray
normal: np.ndarray
| dm_robotics-main | py/vision/types.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working in safety with robot setup."""
from typing import Mapping, Optional
from dmr_vision import types
import numpy as np
class PoseValidator:
"""Helper class for validating poses inside pre-defined limits and deadzones."""
def __init__(self,
limits: types.PositionLimit,
deadzones: Optional[Mapping[str, types.PositionLimit]] = None):
"""Constructs a `PoseValidator` instance.
Args:
limits: A range of Cartesian position in terms of lower and upper bounds.
deadzones: A mapping specifying deadzones with their limits, specified in
the same terms of `limits`.
"""
if len(limits.lower) != 3 or len(limits.upper) != 3:
raise ValueError("Upper/lower limits need to be of length 3 (cartesian)")
self._limits = limits
self._deadzones = deadzones
def is_valid(self, pose: np.ndarray) -> bool:
"""Checks if a pose is valid by checking it against limits and deadzones."""
position = pose[0:3]
if not self._within_zone(position, self._limits):
return False
if self._deadzones is not None:
for zone in self._deadzones.values():
if self._within_zone(position, zone):
return False
return True
def _within_zone(self, position: np.ndarray,
limits: types.PositionLimit) -> bool:
"""Checks if position is within a zone defined by limits."""
if ((position < limits.lower).any() or (position > limits.upper).any()):
return False
return True
| dm_robotics-main | py/vision/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch a ROS node to traingulate the barycenter of colored blobs from images."""
import math
from absl import app
from absl import flags
from absl import logging
from dmr_vision import blob_tracker_object_defs
from dmr_vision import blob_triangulation_node
from dmr_vision import config_blob_triangulation
from dmr_vision import ros_utils
import numpy as np
import rospy
_ROBOT = flags.DEFINE_string(
name="robot",
default="STANDARD_SAWYER",
help=("The name of the robot."
"Must be one of the enums in the robot configuration file."))
_PROPS = flags.DEFINE_list(
name="props",
default=[
blob_tracker_object_defs.Props.GREEN.value,
blob_tracker_object_defs.Props.RED.value,
blob_tracker_object_defs.Props.BLUE.value,
],
help="The names of the objects to track.")
_MAX_INVALID_CAMINFO = flags.DEFINE_float(
name="max_invalid_caminfo",
default=math.inf,
help=("Number of tentatives after receiving an invalid camera info message "
"before raising an exception. Setting this to `1` means to never "
"wait for a valid message after receiving a wrong one, while setting "
"it to `math.inf` means to never raise an exception and keep on "
"trying until a healthy message is received."))
def main(_):
logging.info("Collecting configuration parameters.")
config = config_blob_triangulation.get_config(_ROBOT.value)
extrinsics = {}
intrinsics = {}
for cam_topic, cam_extrinsics in config.extrinsics.items():
extrinsics[cam_topic] = cam_extrinsics
camera_info = ros_utils.CameraInfoHandler(
topic=f"{cam_topic}/camera_info",
queue_size=config.input_queue_size,
)
incalid_caminfo_counter = 0
while incalid_caminfo_counter < _MAX_INVALID_CAMINFO.value:
with camera_info:
if np.count_nonzero(camera_info.camera_matrix) == 0:
incalid_caminfo_counter += 1
logging.log_every_n_seconds(
logging.INFO, "Received an all-zero camera matrix from topic %s. "
"Tentative number %d. Discarding the message and not updating "
"camera matrix and distortion parameters. If the problem "
"persists, consider restarting the camera driver, checking the "
"camera's calibration file, or the provided intrinsics.", 12,
cam_topic, incalid_caminfo_counter)
camera_info.wait()
continue
else:
intrinsics[cam_topic].camera_matrix = camera_info.camera_matrix
intrinsics[cam_topic].distortion_parameters = (
camera_info.distortion_parameters)
camera_info.close()
if incalid_caminfo_counter >= _MAX_INVALID_CAMINFO.value:
camera_info.close()
raise ValueError(
"Received an all-zero camera matrix for more than "
f"{_MAX_INVALID_CAMINFO.value} time(s). Please restart the camera "
"driver, check the camera's calibration file, or the provided "
"intrinsics if the issue persists.")
logging.info("Initializing blob triangulation ROS node.")
rospy.init_node(name=config.node_name, anonymous=True)
ros_node = blob_triangulation_node.BlobTriangulationNode(
prop_names=_PROPS.value,
extrinsics=extrinsics,
intrinsics=intrinsics,
limits=config.limits,
deadzones=config.deadzones,
base_frame=config.base_frame,
rate=config.rate,
input_queue_size=config.input_queue_size,
output_queue_size=config.output_queue_size,
)
logging.info("Spinning ROS node.")
ros_node.spin()
logging.info("ROS node terminated.")
if __name__ == "__main__":
app.run(main)
| dm_robotics-main | py/vision/launch_blob_triangulation.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `triangulation.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import cv2
from dmr_vision import triangulation
from dmr_vision import types
import numpy as np
from tf import transformations as tf
class TriangulationTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Test parameters are taken from random real world camera calibrations.
self.camera_matrices = [
np.array([
[1418.1731532081515, 0.0, 951.1884329781567],
[0.0, 1418.7685215216047, 582.078606694917],
[0.0, 0.0, 1.0],
]),
np.array([
[1408.1781154186797, 0.0, 969.3781055884897],
[0.0, 1409.3923562102611, 600.0994341386963],
[0.0, 0.0, 1.0],
]),
np.array([
[1401.351249806934, 0.0, 979.9264199776977],
[0.0, 1402.777176503527, 615.5376637166237],
[0.0, 0.0, 1.0],
]),
]
# Test parameters are taken from one real robot Lego cell (cell 1).
self.extrinsics = [
(0.978, 0.363, 0.225, 0.332, 0.750, -0.523, -0.231),
(0.975, -0.362, 0.222, -0.754, -0.305, 0.179, 0.554),
(0.123, -0.203, 0.324, 0.717, -0.520, 0.230, -0.403),
]
# Taken from random cameras, not corresponding to projection matrices.
self.distortions = [
(-0.1716, 0.1010, -0.000725, -0.000551, 0.0),
(-0.1645, 0.0945, 0.000205, 0.000527, 0.0),
(-0.1676, 0.0935, -0.000168, -0.000842, 0.0),
]
# Roughly the Lego basket center, expressed in the robot base frame.
self.point_3d = np.array([0.605, 0.0, 0.05])
# Theoretic, distorted pixel measurements corresponding to the point and
# the camera settings above.
self.pixels = [
(905.87075049, 541.33127149),
(837.30620605, 582.11931161),
(993.52079234, 440.15403317),
]
# A plane which contains the above point.
self.plane = types.Plane(point=np.array([0., 0., self.point_3d[2]]),
normal=np.array([0., 0., 1.]))
@parameterized.parameters(True, False)
def test_undistorted(self, use_plane_constraint):
planar_constraint = self.plane if use_plane_constraint else None
point_3d_triangulated, residual = self._run_triangulation(
self.camera_matrices, None, self.extrinsics, self.point_3d,
planar_constraint=planar_constraint)
self.assertSequenceAlmostEqual(point_3d_triangulated.flatten(),
self.point_3d)
self.assertAlmostEqual(residual.item(), 0)
@parameterized.parameters(True, False)
def test_distorted(self, use_plane_constraint):
planar_constraint = self.plane if use_plane_constraint else None
point_3d_triangulated, residual = self._run_triangulation(
self.camera_matrices, self.distortions, self.extrinsics, self.point_3d,
planar_constraint=planar_constraint)
self.assertSequenceAlmostEqual(point_3d_triangulated.flatten(),
self.point_3d)
self.assertAlmostEqual(residual.item(), 0)
def test_from_single_viewpoint_with_plane_constraint(self):
point_3d_triangulated, _ = self._run_triangulation(
self.camera_matrices[0:1], self.distortions[0:1], self.extrinsics[0:1],
self.point_3d, planar_constraint=self.plane)
self.assertSequenceAlmostEqual(point_3d_triangulated.flatten(),
self.point_3d)
def test_from_two_viewpoints(self):
point_3d_triangulated, residual = self._run_triangulation(
self.camera_matrices[0:2], self.distortions[0:2], self.extrinsics[0:2],
self.point_3d)
self.assertSequenceAlmostEqual(point_3d_triangulated.flatten(),
self.point_3d)
self.assertAlmostEqual(residual.item(), 0)
def test_from_same_viewpoints(self):
camera_matrices = [self.camera_matrices[0], self.camera_matrices[0]]
distortions = [self.distortions[0], self.distortions[0]]
extrinsics = [self.extrinsics[0], self.extrinsics[0]]
with self.assertRaises(ValueError):
self._run_triangulation(camera_matrices, distortions, extrinsics,
self.point_3d)
def test_single_observation(self):
with self.assertRaises(ValueError):
self._run_triangulation([
self.camera_matrices[0],
], [
self.distortions[0],
], [
self.extrinsics[0],
], self.point_3d)
def test_behind_camera(self):
with self.assertRaises(ValueError):
self._run_triangulation(self.camera_matrices, None, self.extrinsics,
np.array([10., 0., 0.]))
def test_distorted_pixel_measurements(self):
point_3d_triangulated, residual = self._run_triangulation_from_pixels(
self.camera_matrices, self.distortions, self.extrinsics, self.pixels)
self.assertSequenceAlmostEqual(point_3d_triangulated.flatten(),
self.point_3d)
self.assertAlmostEqual(residual.item(), 0)
def _run_triangulation(self, camera_matrices, distortions,
extrinsics, point, planar_constraint=None):
triangulator = triangulation.Triangulation(
camera_matrices, distortions, extrinsics,
planar_constraint=planar_constraint)
# pylint: disable=invalid-name
pixel_measurements = []
for i in range(len(camera_matrices)):
extrinsics_mat = tf.quaternion_matrix(extrinsics[i][3:7])[0:3, 0:3]
extrinsics_pos = np.array(extrinsics[i][0:3])
point_3d_list = np.array([
point,
])
# OpenCV expects the camera position expressed in the camera frame.
W_x_C = extrinsics_mat.T.dot(-extrinsics_pos)
distortion = distortions[i] if distortions else np.zeros(4)
point_projected, _ = cv2.projectPoints(point_3d_list, extrinsics_mat.T,
W_x_C, camera_matrices[i],
distortion)
pixel_measurements.append(point_projected[0][0])
return triangulator.triangulate(pixel_measurements)
# pylint: enable=invalid-name
def _run_triangulation_from_pixels(self, camera_matrices, distortions,
extrinsics, pixel_measurements):
triangulator = triangulation.Triangulation(camera_matrices, distortions,
extrinsics)
return triangulator.triangulate(pixel_measurements)
if __name__ == "__main__":
absltest.main()
| dm_robotics-main | py/vision/triangulation_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining a ROS node to triangulate the barycenter of colored blobs."""
import collections
import itertools
from typing import Collection, Mapping, Optional, Tuple
from absl import logging
from dmr_vision import ros_utils
from dmr_vision import triangulation as linear_triangulation
from dmr_vision import types
from dmr_vision import utils
import numpy as np
import rospy
class BlobTriangulationNode:
"""A ROS node for triangulating prop positions in a robot base frame."""
def __init__(
self,
prop_names: Collection[str],
extrinsics: Mapping[str, types.Extrinsics],
intrinsics: Optional[Mapping[str, types.Intrinsics]],
limits: types.PositionLimit,
deadzones: Optional[Mapping[str, types.PositionLimit]] = None,
fuse_tolerance: float = 0.1,
planar_constraint: Optional[Mapping[str, types.Plane]] = None,
base_frame: str = "base",
input_queue_size: int = 1,
output_queue_size: int = 1,
rate: int = 20,
):
"""Constructs a `BlobTriangulationNode` instance.
Args:
prop_names: The names of the props to use.
extrinsics: A mapping from camera names to extrinsic parameters (a 7D pose
vector of the camera realtive to a common reference frame).
intrinsics: A mapping from camera names to intrinsics parameters. If a
camera is not present in this mapping, the node will attempt to collect
the intrinsics from the camera ROS driver `camera_info` topic.
limits: The robot playground limits, specified in terms of upper and lower
positions, i.e. a cuboid.
deadzones: A mapping specifying deadzones with their limits, specified in
the same terms of `limits`.
fuse_tolerance: Maximum time interval between fused data points.
planar_constraint: An optional mapping of prop names to planes (in global
frame) that the blob must lie in. This is useful for example for
tracking a ball which is guaranteed to be on the ground plane. If
provided then a single camera is enough for "triangulation".
base_frame: The frame id to use when publishing poses.
input_queue_size: The size of input queues.
output_queue_size: The size of output queues.
rate: The frequency with which to spin the node.
"""
self._prop_names = prop_names
self._camera_names = list(extrinsics.keys())
self._extrinsics = extrinsics
self._pose_validator = utils.PoseValidator(
limits=limits, deadzones=deadzones)
self._fuse_tolerance = fuse_tolerance
self._planar_constraint = planar_constraint or {}
self._intrinsics = intrinsics
self._base_frame = base_frame
self._input_queue_size = input_queue_size
self._output_queue_size = output_queue_size
self._rate = rospy.Rate(rate)
self._pose_publishers = {}
# Setup subscribers for receiving blob centers.
self._point_handler = collections.defaultdict(dict)
for prop_name in self._prop_names:
for camera_name in self._camera_names:
point_topic = f"{camera_name}/blob/{prop_name}/center"
self._point_handler[prop_name][camera_name] = ros_utils.PointHandler(
topic=point_topic, queue_size=input_queue_size)
def spin(self) -> None:
"""Loops the node until shutdown."""
while not rospy.is_shutdown():
centers, most_recent_stamp = self._get_blob_centers()
poses = self._fuse(centers)
self._publish_poses(poses, most_recent_stamp)
self._rate.sleep()
def _fuse(
self,
centers: Mapping[str, Mapping[str, np.ndarray]],
) -> Mapping[str, np.ndarray]:
"""Fuse the detected center points by triangulation."""
prop_poses = {}
for prop_name in self._prop_names:
# Skip, if there's no data for the prop.
if prop_name not in centers:
continue
# List the cameras in which the prop is visible.
available_cameras = list(centers[prop_name].keys())
# If there are not enough measurements then skip.
planar_constraint = self._planar_constraint.get(prop_name, None)
min_num_cameras = 2 if planar_constraint is None else 1
if len(available_cameras) < min_num_cameras:
continue
available_cameras_powerset = self._powerset(
available_cameras, min_cardinality=min_num_cameras)
position = None
residual = None
for camera_set in available_cameras_powerset:
# Setup the triangulation module for the camera subset.
triangulation = linear_triangulation.Triangulation(
camera_matrices=[
self._intrinsics[name].camera_matrix for name in camera_set
],
distortions=[
self._intrinsics[name].distortion_parameters
for name in camera_set
],
extrinsics=[
np.append(self._extrinsics[name].pos_xyz,
self._extrinsics[name].quat_xyzw)
for name in camera_set
],
planar_constraint=planar_constraint)
# Create a list of blob centers ordered by source camera.
blob_centers = [
centers[prop_name][camera_name] for camera_name in camera_set
]
# Triangulate the prop's position.
current_position, current_residual = triangulation.triangulate(
blob_centers)
if residual is None or current_residual < residual:
position = current_position
residual = current_residual
# Append a default orientation.
prop_poses[prop_name] = np.append(position, [0, 0, 0, 1])
return prop_poses
def close(self) -> None:
"""Gently cleans up BlobTriangulationNode and closes ROS topics."""
logging.info("Closing ROS topics.")
for prop_name in self._prop_names:
for camera_name in self._camera_names:
self._point_handler[prop_name][camera_name].close()
for pose_publishers in self._pose_publishers.values():
pose_publishers.close()
def _powerset(self, iterable, min_cardinality=1, max_cardinality=None):
"""Creates an iterable with all the powerset elements of `iterable`.
Example:
The powerset of the list [1,2,3] is (1,), (2,), (3,), (1, 2), (1, 3),
(2, 3), (1, 2, 3).
Args:
iterable: An `iterable` object.
min_cardinality: an `int`. The minimum cardinality in the powerset.
max_cardinality: an `int`. The minimum cardinality in the powerset.
Returns:
An `iterable` of the powerset elements as tuples.
"""
if max_cardinality is None:
max_cardinality = len(iterable)
if min_cardinality > max_cardinality:
raise ValueError("The minimum cardinality of a pawerset cannot be "
" greater than its maximum cardinality. "
f"Provided minimum: {min_cardinality}. "
f"Provided maximum: {max_cardinality}")
iterable_list = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(iterable_list, r)
for r in range(min_cardinality, max_cardinality + 1))
def _get_blob_centers(
self
) -> Tuple[Mapping[str, Mapping[str, np.ndarray]], Optional[rospy.Time]]:
"""Get the most recent, timely coherent set of center points."""
points_and_stamps = collections.defaultdict(dict)
most_recent_stamp = None
# Collect all centers and track their timestamps.
for prop_name in self._prop_names:
for camera_name in self._camera_names:
point_data = self._point_handler[prop_name][camera_name].point_data
if point_data is not None:
points_and_stamps[prop_name][camera_name] = (point_data.data[:2],
point_data.stamp)
if most_recent_stamp is None or point_data.stamp > most_recent_stamp:
most_recent_stamp = point_data.stamp
else:
logging.warning("No data received yet ('%s', '%s').", camera_name,
prop_name)
continue
# No blob center received yet.
if most_recent_stamp is None:
return {}, most_recent_stamp
# Remove outdated points.
filtered_points = collections.defaultdict(dict)
for prop_name, cameras in points_and_stamps.items():
for camera_name, info_tuple in cameras.items():
center, stamp = info_tuple
if (most_recent_stamp - stamp).to_sec() > self._fuse_tolerance:
logging.warning("Discarding outdated data ('%s', '%s').", camera_name,
prop_name)
continue
filtered_points[prop_name][camera_name] = center
return filtered_points, most_recent_stamp
def _publish_poses(self, poses: Mapping[str, np.ndarray],
stamp: rospy.Time) -> None:
for prop_name, pose in poses.items():
if pose is not None:
if not self._pose_validator.is_valid(pose):
continue
if prop_name not in self._pose_publishers:
self._setup_pose_publisher(prop_name, "pose")
self._pose_publishers[prop_name].publish(pose, stamp=stamp)
def _setup_pose_publisher(self, prop_name: str, topic: str) -> None:
topic = f"/blob/{prop_name}/{topic}"
self._pose_publishers[prop_name] = ros_utils.PosePublisher(
topic=topic,
frame_id=self._base_frame,
queue_size=self._output_queue_size)
| dm_robotics-main | py/vision/blob_triangulation_node.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining a ROS node to detect blobs on camera images."""
from typing import Callable, Mapping, Optional
from absl import logging
from dmr_vision import detector as vision_detector
from dmr_vision import ros_utils
from dmr_vision import types
import numpy as np
import rospy
class DetectorNode:
"""A ROS node for generic image-based detection."""
def __init__(
self,
topic: str,
detector: vision_detector.Signature,
input_queue_size: int = 1,
output_queue_size: int = 1,
image_optimizer: Optional[Callable[[], bool]] = None,
):
"""Constructs a `DetectorNode` instance.
Args:
topic: the camera ROS topic.
detector: the detector to use.
input_queue_size: the size of input queues.
output_queue_size: the size of output queues.
image_optimizer: a function that can be used to trigger specific options
on the camera. For example, this function may call camera APIs to adjust
the brightness, gamma values, etc.
Raises:
EnvironmentError: if `image_optimizer` fails and return `False`.
"""
self._topic = topic
self._namespace = "/" + topic.split("/")[1]
self._detector = detector
self._input_queue_size = input_queue_size
self._output_queue_size = output_queue_size
self._point_publishers = {}
self._visualization_publishers = {}
if image_optimizer and not image_optimizer():
raise EnvironmentError("Provided `image_optimizer` failed execution.")
self._image_handler = ros_utils.ImageHandler(
topic=self._topic,
queue_size=input_queue_size,
)
def spin(self) -> None:
"""Loops the node until shutdown."""
stamp = None
while not rospy.is_shutdown():
# Get the most recent data.
with self._image_handler:
while self._image_handler.stamp == stamp:
self._image_handler.wait()
image = self._image_handler.data
frame_id = self._image_handler.frame_id
stamp = self._image_handler.stamp
# Run the blob detector.
centers, detections = self._detector(image)
# Publish detection results.
self._publish_centers(centers, frame_id, stamp)
self._publish_detections(detections, frame_id, stamp)
def close(self) -> None:
"""Gently cleans up DetectorNode and closes ROS topics."""
logging.info("Closing ROS topics.")
self._image_handler.close()
for point_publisher in self._point_publishers.values():
point_publisher.close()
for visualization_publisher in self._visualization_publishers.values():
visualization_publisher.close()
def _publish_centers(self, centers: Mapping[str, Optional[np.ndarray]],
frame_id: str, stamp: rospy.Time) -> None:
for blob_name, center in centers.items():
if center is not None:
if blob_name not in self._point_publishers:
self._setup_point_publisher(blob_name, "center", frame_id)
self._point_publishers[blob_name].publish(center, stamp=stamp)
def _publish_detections(self, detections: types.Detections, frame_id: str,
stamp: rospy.Time) -> None:
for blob_name, visualization in detections.items():
if visualization is not None:
if blob_name not in self._visualization_publishers:
self._setup_image_publisher(blob_name, "visualization", frame_id)
publisher = self._visualization_publishers[blob_name]
publisher.publish(visualization, stamp=stamp)
def _setup_point_publisher(self, blob_name: str, topic: str,
frame_id: str) -> None:
topic = f"{self._namespace}/blob/{blob_name}/{topic}"
self._point_publishers[blob_name] = ros_utils.PointPublisher(
topic=topic, frame_id=frame_id, queue_size=self._output_queue_size)
def _setup_image_publisher(self, blob_name: str, topic: str,
frame_id: str) -> None:
topic = f"{self._namespace}/blob/{blob_name}/{topic}"
self._visualization_publishers[blob_name] = ros_utils.ImagePublisher(
topic=topic, frame_id=frame_id, queue_size=self._output_queue_size)
| dm_robotics-main | py/vision/detector_node.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for the blob triangulation node."""
import dataclasses
from typing import Mapping, Optional
from dmr_vision import robot_config
from dmr_vision import types
import numpy as np
ROS_TOPIC_CAM_PREFIX = "/pylon_"
@dataclasses.dataclass(init=False)
class BlobTriangulationConfig:
"""Data class for the blob triangulation node.
Attributes:
node_name: the name of the ROS node.
rate: the desired frame rate.
input_queue_size: the input data queue size (see ROS documentation).
output_queue_size: the output data queue size (see ROS documentation).
fuse_tolerance: time in seconds after which data is considered outdated and
not used for triangulation.
extrinsics: extrinsic camera parameters.
limits: Cartesian limits over which points are considered as outliers.
base_frame: the name of the robot base frame.
deadzones: additional Cartesian limits excluding volumes of the robot
operative space where points are discarded.
"""
node_name: str
rate: int
input_queue_size: int
output_queue_size: int
fuse_tolerance: float
extrinsics: Mapping[str, types.Extrinsics]
limits: types.PositionLimit
base_frame: str
deadzones: Optional[Mapping[str, types.PositionLimit]] = None
def get_config(robot_type: str) -> BlobTriangulationConfig:
"""Returns the parameters for running ROS blob triangulation node.
Args:
robot_type: the name of a robot among the ones listed in `robot_config`.
Returns:
The configuration parameters for the blob triangulation ROS node.
"""
## Base configs
r_config = robot_config.get_robot_config(robot_type)
config = BlobTriangulationConfig()
## ROS node configuration
config.node_name = "blob_triangulation"
config.rate = 60
config.input_queue_size = 3
config.output_queue_size = 1
config.fuse_tolerance = 0.2
## Robot configuration
config.extrinsics = {
ROS_TOPIC_CAM_PREFIX + cam_name: cam.extrinsics
for cam_name, cam in r_config.cameras.items()
}
center = np.append(r_config.basket_center, r_config.basket_height)
config.limits = types.PositionLimit(
upper=center + np.array([0.45, 0.45, 0.20]),
lower=center + np.array([-0.45, -0.45, -0.02]),
)
config.deadzones = {
"front_left":
types.PositionLimit(
upper=center + np.array([0.45, 0.27, 0.20]),
lower=center + np.array([0.27, -0.45, -0.02]),
),
"front_right":
types.PositionLimit(
upper=center + np.array([0.45, 0.45, 0.20]),
lower=center + np.array([0.27, 0.27, -0.02]),
),
}
config.base_frame = r_config.base_frame_name
return config
| dm_robotics-main | py/vision/config_blob_triangulation.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module performing triangulation of 2D points from multiple views."""
from typing import Optional, Sequence, Tuple
import cv2
from dmr_vision import types
import numpy as np
from tf import transformations as tf
def _plane_basis_from_normal(normal: np.ndarray):
"""Returns a 3x2 matrix with orthonormal columns tangent to normal."""
norm = np.linalg.norm(normal)
if norm < 1e-6:
raise ValueError("The norm of the normal vector is less than 1e-6. "
"Consider rescaling it or passing a unit vector.")
normal = normal / norm
# Choose some vector which is guaranteed not to be collinear with the normal.
if normal[0] > 0.8:
tangent_0 = np.array([0., 1., 0.])
else:
tangent_0 = np.array([1., 0., 0.])
# Make it orthonormal to normal.
tangent_0 = tangent_0 - normal * np.dot(tangent_0, normal)
tangent_1 = np.cross(normal, tangent_0)
return np.stack([tangent_0, tangent_1], axis=1)
class Triangulation:
"""Triangulates points in 3D space from multiple 2D image observations.
We assume that at least two observations from two different view points
corresponding to the same 3D point are available. This triangulation will
work both with fixed or moving cameras, as long as the camera pose at
measurement time is known.
In its current implementation, triangulation will not work for points at
or near infinity.
"""
def __init__(self, camera_matrices: Sequence[np.ndarray],
distortions: Optional[Sequence[np.ndarray]],
extrinsics: Sequence[np.ndarray],
planar_constraint: Optional[types.Plane] = None) -> None:
"""Initializes the class given the scene configuration.
Args:
camera_matrices: List of 3x3 camera (projection) matrices. Length of this
list corresponds to number of measurements. If multiple measurements are
taken from the same camera, the camera matrix needs to be provided
multiple times.
distortions: None or List of distortion parameters corresponding to each
measurement. If None, measurements are assumed to be taken from
rectified images. Distortion parameters can be of any type supported by
OpenCV.
extrinsics: List of camera extrinsics (i.e. poses) expressed in the
'world' frame in which we want to estimate the 3D point in. Each pose
must be provided as a NumPy array of size (7, 1) where the first three
entries are position and the last four are the quaternion [x, y, z, w].
planar_constraint: An optional plane (in global frame) that the
triangulated point must lie on.
Raises:
ValueError: Will be raised if dimensions between parameters mismatch.
"""
self._camera_matrices = camera_matrices
self._distortions = distortions
self._extrinsics = extrinsics
# Optional plane constraint parameterization.
self._has_planar_constraint = planar_constraint is not None
self._offset = None
self._basis = None
if self._has_planar_constraint:
self._offset = planar_constraint.point
self._basis = _plane_basis_from_normal(planar_constraint.normal)
if len(self._camera_matrices) != len(self._extrinsics):
raise ValueError("Number of camera matrices and extrinsics should match.")
if distortions is not None:
if len(self._camera_matrices) != len(self._extrinsics):
raise ValueError(
"Number of camera matrices and distortion parameters should match.")
def triangulate(
self,
pixel_measurements: Sequence[np.ndarray]) -> Tuple[np.ndarray, float]:
"""Triangulates a 3D point from multiple 2D observations.
Args:
pixel_measurements: List of 2D pixel measurements (u, v) corresponding to
the observation. If distortion parameters were set in the `__init__`
method, these pixel measurements should be distored as well.
Returns:
A NumPy array of size 3 with [x, y, z] coordinates of the triangulated
point and a NumPy array of size 1 with the residual of the triangulated
point.
Raises:
ValueError: Will be raised if number of configured cameras and
observations do not match.
"""
if len(self._camera_matrices) != len(pixel_measurements):
raise ValueError(
"Number of camera matrices and measurements should match.")
undistorted_points = []
for i in range(len(pixel_measurements)):
distortion = self._distortions[i] if self._distortions else np.zeros(4)
# OpenCV expects the following convoluted format.
pixel_list = np.array([np.array([
np.array(pixel_measurements[i]),
],)])
undistorted_points.append(
cv2.undistortPoints(pixel_list, self._camera_matrices[i], distortion))
return self._linear_triangulate(undistorted_points)
def _linear_triangulate(
self,
undistorted_points: Sequence[np.ndarray]) -> Tuple[np.ndarray, float]:
"""Implements linear triangulation using least-squares.
Args:
undistorted_points: List of 2D image points in undistorted, normalized
camera coordinates (not pixel coordinates).
Returns:
3D position of triangulation as NumPy array of size (3,).
Residual of the 3D position of triangulation of size (1,) or an empty
array if the 3D position can be reconstructed exactly from the
measurements and constraints.
Raises:
ValueError: Will be raised if observations do not allow triangulation.
np.linalg.LinAlgError: Raised if numeric errors occur.
Implementation according to 5.1 in
Hartley, Richard I., and Peter Sturm. "Triangulation."
Computer vision and image understanding 68.2 (1997): 146-157.
https://perception.inrialpes.fr/Publications/1997/HS97/HartleySturm-cviu97.pdf
While the article above talks about two cameras only, the multi-camera
equaivalent can be understood very intuitively: The 3D point to triangulate
needs to lie on the line constructed by the cameras focal point (its
position) and the detected point on the image plane. This line is given as:
l = x_c1 + s_1 * v_c1 (1)
where x_c1 is the position of camera 1 and v_c1 the detected point. Given
that the 3D point is somewhat along this line at an unknown distance s_1,
the intersection equation is given as
p - l = 0 (2)
where p is the position of the 3D point, we want to triangulate. To carry
out this computation, we need a coherent reference frame for all quantities.
Since we want to triangulate our 3D point in a global frame (called W),
equation (2) becomes:
W_p - W_l (3)
The camera position in equation (1) is already expressed in the global
frame. Hence we only need to rotate v_c1, which is usually expressed in the
camera frame, into the global frame
W_v_c1 = R_W_C * C_v_c1 (4)
where R_W_C is the orientation of the camera.
Substituting (4) into (3), we get:
W_p - (W_x_c1 + s_1 * W_v_c1) (5)
We can then replicate equation (5) for all cameras and form it as a
linear system of equations in the form of Ax = b where
A = [I, W_v_c1
I, W_v_c2
... ]
x = [W_p_x, W_p_y, W_p_z, s_1, s_2, ...]
b = [-W_x_c1, -W_x_c2, ...]
According to the article above, we then solve this system using the SVD
and discarding the scale.
If a planar constraint is provided, then we reparameterize
[W_p_x, W_p_y, W_p_z] using 2d coordinates in this plane, solve
for these coordinates, and then transform back to 3d.
This can be improved in two ways:
- implement the iterative method outlined in 5.2 in the article above
- implement a Levenberg-Marquardt implementation, also outlined in 5.2
LM is available, so this is relatively easy to do.
"""
num_measurements = len(undistorted_points)
if num_measurements < 2 and not self._has_planar_constraint:
raise ValueError("We need at least two measurements to triangulate.")
# For linear algebra as well as expressing reference frames, it really
# improves readability, if we allow variable names to start with capital
# letters.
# pylint: disable=invalid-name
rows = 3 * num_measurements
cols = 3 + num_measurements
A = np.zeros([rows, cols])
b = np.zeros([rows, 1])
for i in range(num_measurements):
# Normalize/convert to 3D.
C_v = np.append(undistorted_points[i], 1.)
# Transform measured point to global frame.
R_W_C = tf.quaternion_matrix(self._extrinsics[i][3:7])[0:3, 0:3]
W_v = R_W_C.dot(C_v)
# Fill A and b for the given observation, using the formulas above.
A[3 * i:3 * (i + 1), 0:3] = np.eye(3)
A[3 * i:3 * (i + 1), 3 + i] = -W_v
b[3 * i:3 * (i + 1), 0] = np.array(self._extrinsics[i][0:3]).T
# Maybe add a planar constraint.
if self._has_planar_constraint:
# Reparametrize x = Zx' + y -> AZx' = b - Ay.
Z = np.zeros([cols, cols - 1])
y = np.zeros([cols, 1])
Z[3:, 2:] = np.eye(num_measurements)
Z[:3, :2] = self._basis
y[:3, 0] = self._offset
orig_A = A
A = np.matmul(orig_A, Z) # [3 * num_measurments, 2 + num_measurments]
b = b - np.matmul(orig_A, y)
# Solve for the 3D point and the scale.
try:
x, residual, rank, _ = np.linalg.lstsq(A, b, rcond=None)
except np.linalg.LinAlgError as err:
err.message = """Triangulation failed, possibly due to invalid
data provided. Numeric error: """ + err.message
raise
if self._has_planar_constraint:
# Map from 2d plane to 3d world coordinates.
scales = x[2:]
pose = (np.matmul(self._basis, x[:2]) +
np.expand_dims(self._offset, axis=-1))
num_free_variables = 3 + num_measurements - 1
else:
scales = x[3:]
pose = x[:3]
num_free_variables = 3 + num_measurements
# Verify the rank to ensure visibility.
if rank < num_free_variables:
raise ValueError("Insufficient observations to triangulate.")
# Verify that scaling factors are positive.
if (scales < 0).any():
raise ValueError("3D point lies behind at least one camera")
return pose, np.sqrt(residual)
# pylint: enable=invalid-name
| dm_robotics-main | py/vision/triangulation.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A manual test runner that runs each test file in a separate process.
This is needed because there is global state in spec_utils used to switch off
validation after some time (for performance on real robots). This automatic
switch-off causes the validation test to fail because validation is switched
off when it comes to run, unless each test starts in a new process.
"""
import os
import subprocess
import sys
_MODULE = "dm_robotics.agentflow"
_EXCLUDED_PATHS = ["build", "./build", ".tox", "./.tox", "venv", "./venv"]
def test_file_paths(top_dir):
"""Yields the path to the test files in the given directory."""
def excluded_path(name):
return any(name.startswith(path) for path in _EXCLUDED_PATHS)
for dirpath, dirnames, filenames in os.walk(top_dir):
# do not search tox or other hidden directories:
remove_indexes = [
i for i, name in enumerate(dirnames) if excluded_path(name)
]
for index in reversed(remove_indexes):
del dirnames[index]
for filename in filenames:
if filename.endswith("test.py"):
yield os.path.join(dirpath, filename)
def module_name_from_file_path(pathname):
# dirname will be like: "./file.py", "./dir/file.py" or "./dir1/dir2/file.py"
# convert this to a module.name:
submodule_name = pathname.replace("./", "").replace("/", ".")[0:-3]
return _MODULE + "." + submodule_name
def run_test(test_module_name):
return subprocess.call([sys.executable, "-m", test_module_name]) == 0
if __name__ == "__main__":
dir_to_search = sys.argv[1]
success = True
for test_path in test_file_paths(dir_to_search):
module_name = module_name_from_file_path(test_path)
success &= run_test(module_name)
sys.exit(0 if success else 1)
| dm_robotics-main | py/agentflow/run_tests.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Utility functions, currently for terminal logging."""
import contextlib
import sys
from absl import logging
from dm_robotics.agentflow import core
# ANSI color codes for pretty-printing
ANSI_COLORS = {
'pink': '\033[95m',
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'red': '\033[91m',
'bold': '\033[1m',
'underline': '\033[4m',
'end': '\033[0m'
}
def log_info(string, color=None):
if color is None or not sys.stderr.isatty():
logging.info(string)
else:
logging.info('%s%s%s', ANSI_COLORS[color], string, ANSI_COLORS['end'])
def log_termination_reason(cur_option: core.Option,
option_result: core.OptionResult):
"""Pretty-print a status update."""
termination_reason = option_result.termination_reason
if termination_reason == core.TerminationType.SUCCESS:
text = 'Option \"{}\" successful. {}'.format(cur_option.name,
option_result.termination_text)
color = option_result.termination_color or 'green'
elif termination_reason == core.TerminationType.FAILURE:
text = 'Option \"{}\" failed. {}'.format(cur_option.name,
option_result.termination_text)
color = option_result.termination_color or 'red'
elif termination_reason == core.TerminationType.PREEMPTED:
text = 'Option \"{}\" preempted. {}'.format(cur_option.name,
option_result.termination_text)
color = option_result.termination_color or 'yellow'
else:
raise ValueError('Unknown exit code from subtask.')
log_info(text, color)
if hasattr(contextlib, 'nullcontext'):
nullcontext = contextlib.nullcontext # pylint: disable=invalid-name
else:
nullcontext = contextlib.suppress # pylint: disable=invalid-name
| dm_robotics-main | py/agentflow/util.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.spec_utils."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import specs
from dm_robotics.agentflow import action_spaces
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
import numpy as np
_rand_spec = testing_functions.random_array_spec
_valid_val = testing_functions.valid_value
random_array_spec = testing_functions.random_array_spec
random_observation_spec = testing_functions.random_observation_spec
random_discount_spec = testing_functions.random_discount_spec
random_reward_spec = testing_functions.random_reward_spec
random_timestep_spec = testing_functions.random_timestep_spec
valid_value = testing_functions.valid_value
class ValidationTest(parameterized.TestCase):
def assert_invalid_observation(self, spec, value, msg_substring=None):
try:
spec_utils.validate_observation(spec, value)
self.fail('Expected validation failure')
except ValueError as expected:
actual_msg = str(expected)
if msg_substring:
if msg_substring not in actual_msg:
self.fail('Got message "{}", expected to find "{}" in it'.format(
actual_msg, msg_substring))
def test_Float32(self):
spec = specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1., name='foo')
spec_utils.validate(
spec, np.float32(0.5), ignore_nan=False, ignore_ranges=False)
try:
spec_utils.validate(
spec, float(0.5), ignore_nan=False, ignore_ranges=False)
self.fail('Expected exception')
except ValueError as unused_but_expected:
del unused_but_expected
try:
spec_utils.validate(
spec, np.float64(0.5), ignore_nan=False, ignore_ranges=False)
self.fail('Expected exception')
except ValueError as unused_but_expected:
del unused_but_expected
@absltest.skip('dm_env StringArray incompatible with numpy 1.24')
def test_StringArray(self):
test_string = 'test string'
spec = specs.StringArray(shape=(), string_type=str, name='foo')
spec_utils.validate(
spec, test_string, ignore_nan=False, ignore_ranges=False)
with self.assertRaises(ValueError):
spec_utils.validate(
spec,
test_string.encode('ASCII'),
ignore_nan=False,
ignore_ranges=False)
# Test that StringArray is amenable to maximum/minimum. This is not of
# obvious utility, but arises due to the occasional need to derive a spec
# from a sample value, e.g. in AddObservation.
string_minimum = spec_utils.minimum(spec)
string_maximum = spec_utils.maximum(spec)
spec.validate(string_minimum)
spec.validate(string_maximum)
def test_NoneAlwaysAccepted(self):
spec_utils.validate(
random_array_spec(), None, ignore_nan=False, ignore_ranges=False)
def test_Observation_MissingKeysOk(self):
spec1 = random_array_spec()
spec2 = random_array_spec()
value1 = valid_value(spec1)
value2 = valid_value(spec2)
spec_utils.validate_observation({'foo': spec1}, {'foo': value1})
spec_utils.validate_observation({'foo': spec1}, {'foo': None})
spec_utils.validate_observation({'foo': spec1}, {})
spec_utils.validate_observation({
'foo': spec1,
'bar': spec2
}, {'bar': value2})
def test_Observation_ExtraKeysFail(self):
spec = random_array_spec()
value = valid_value(spec)
spec_utils.validate_observation({'foo': spec}, {'foo': value})
self.assert_invalid_observation({'foo': spec}, {'bar': value})
self.assert_invalid_observation({}, {'': value})
def test_Observation_Success(self):
spec1, spec2 = random_array_spec(), random_array_spec()
val1, val2 = valid_value(spec1), valid_value(spec2)
spec_utils.validate_observation({'a': spec1}, {'a': val1})
spec_utils.validate_observation({'a': spec1, 'b': spec2}, {'a': val1})
spec_utils.validate_observation({'a': spec1, 'b': spec2}, {'b': val2})
spec_utils.validate_observation({
'a': spec1,
'b': spec2
}, {
'a': val1,
'b': val2
})
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreNan_Scalar_ArraySpec(self, dtype):
spec = specs.Array(shape=(), dtype=dtype)
value = dtype('nan')
spec_utils.validate(spec, value, ignore_nan=True, ignore_ranges=False)
try:
spec_utils.validate(spec, value, ignore_nan=False, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreNan_Scalar_BoundedArraySpec(self, dtype):
# In this case, validation against the min and max will fail for nans.
spec = specs.BoundedArray(
shape=(),
dtype=dtype,
minimum=np.asarray(dtype(0.0)),
maximum=np.asarray(dtype(1.0)))
value = dtype('nan')
spec_utils.validate(spec, value, ignore_nan=True, ignore_ranges=False)
try:
spec_utils.validate(spec, value, ignore_nan=False, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreNan_Array(self, dtype):
spec = specs.Array(shape=(2,), dtype=dtype)
value = np.asarray([dtype('nan'), dtype('nan')], dtype=dtype)
spec_utils.validate(spec, value, ignore_nan=True, ignore_ranges=False)
try:
spec_utils.validate(spec, value, ignore_nan=False, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreNan_BoundedArray(self, dtype):
# Since unbounded, arrays don't care about nan.
spec = specs.BoundedArray(
shape=(2,),
dtype=dtype,
minimum=np.asarray([dtype(0.0), dtype(1.0)]),
maximum=np.asarray([dtype(2.0), dtype(3.0)]))
value = np.asarray([dtype('nan'), dtype('nan')], dtype=dtype)
spec_utils.validate(spec, value, ignore_nan=True, ignore_ranges=False)
try:
spec_utils.validate(spec, value, ignore_nan=False, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreNan_BoundedArray_RespectsLimits(self, dtype):
# Since unbounded, arrays don't care about nan.
spec = specs.BoundedArray(
shape=(2,),
dtype=dtype,
minimum=np.asarray([dtype(0.0), dtype(1.0)]),
maximum=np.asarray([dtype(2.0), dtype(3.0)]))
oob_value = np.asarray([dtype('nan'), dtype(4.0)], dtype=dtype)
try:
spec_utils.validate(spec, oob_value, ignore_nan=True, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
try:
spec_utils.validate(
spec, oob_value, ignore_nan=False, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreRanges_And_IgnoreNan(self, dtype):
# Since unbounded, arrays don't care about nan.
spec = specs.BoundedArray(
shape=(2,),
dtype=dtype,
minimum=np.asarray([dtype(0.0), dtype(1.0)]),
maximum=np.asarray([dtype(2.0), dtype(3.0)]))
oob_value = np.asarray([dtype('nan'), dtype(4.0)], dtype=dtype)
spec_utils.validate(spec, oob_value, ignore_nan=True, ignore_ranges=True)
try:
spec_utils.validate(spec, oob_value, ignore_nan=True, ignore_ranges=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_IgnoreRanges(self, dtype):
# Since unbounded, arrays don't care about nan.
spec = specs.BoundedArray(
shape=(2,),
dtype=dtype,
minimum=np.asarray([dtype(0.0), dtype(1.0)]),
maximum=np.asarray([dtype(2.0), dtype(3.0)]))
oob_value = np.asarray([dtype(1.0), dtype(4.0)], dtype=dtype)
spec_utils.validate(spec, oob_value, ignore_ranges=True, ignore_nan=False)
try:
spec_utils.validate(
spec, oob_value, ignore_ranges=False, ignore_nan=False)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
@parameterized.parameters(float, np.float32, np.float64)
def test_TypeCheck_Scalar(self, dtype):
spec = specs.Array(shape=(), dtype=dtype)
spec_utils.validate(spec, dtype(1.0), ignore_ranges=False, ignore_nan=False)
def test_EmptySpec(self):
empty_spec = spec_utils.merge_specs([])
spec_utils.validate(empty_spec, [])
class EnsureSpecCompatibilityTest(absltest.TestCase):
def test_EqualSpecs(self):
timestep_spec = random_timestep_spec()
spec_utils.ensure_spec_compatibility(
sub_specs=timestep_spec, full_specs=timestep_spec)
def test_DifferentReward(self):
observation_spec = random_observation_spec()
discount_spec = random_discount_spec()
reward_subspec = random_reward_spec(shape=(2,), dtype=np.float32)
reward_fullspec = random_reward_spec(shape=(5,), dtype=np.int8)
timestep_subspec = random_timestep_spec(
observation_spec=observation_spec,
discount_spec=discount_spec,
reward_spec=reward_subspec)
timestep_fullspec = random_timestep_spec(
observation_spec=observation_spec,
discount_spec=discount_spec,
reward_spec=reward_fullspec)
try:
spec_utils.ensure_spec_compatibility(timestep_subspec, timestep_fullspec)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
def test_DifferentDiscount(self):
observation_spec = random_observation_spec()
reward_spec = random_reward_spec()
discount_subspec = random_discount_spec(minimum=-5., maximum=5.)
discount_fullspec = random_discount_spec(minimum=0., maximum=1.)
timestep_subspec = random_timestep_spec(
observation_spec=observation_spec,
discount_spec=discount_subspec,
reward_spec=reward_spec)
timestep_fullspec = random_timestep_spec(
observation_spec=observation_spec,
discount_spec=discount_fullspec,
reward_spec=reward_spec)
try:
spec_utils.ensure_spec_compatibility(timestep_subspec, timestep_fullspec)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
def test_SubsetObservation(self):
reward_spec = random_reward_spec()
discount_spec = random_discount_spec()
observation_subspec = random_observation_spec()
observation_fullspec = copy.deepcopy(observation_subspec)
observation_fullspec['extra_obs'] = random_array_spec((8, 2), 'extra_obs',
np.float64)
timestep_subspec = random_timestep_spec(
observation_spec=observation_subspec,
discount_spec=discount_spec,
reward_spec=reward_spec)
timestep_fullspec = random_timestep_spec(
observation_spec=observation_fullspec,
discount_spec=discount_spec,
reward_spec=reward_spec)
spec_utils.ensure_spec_compatibility(timestep_subspec, timestep_fullspec)
def test_MissingObservation(self):
reward_spec = random_reward_spec()
discount_spec = random_discount_spec()
observation_subspec = random_observation_spec()
observation_fullspec = copy.deepcopy(observation_subspec)
observation_fullspec.pop(list(observation_fullspec.keys())[-1])
timestep_subspec = random_timestep_spec(
observation_spec=observation_subspec,
discount_spec=discount_spec,
reward_spec=reward_spec)
timestep_fullspec = random_timestep_spec(
observation_spec=observation_fullspec,
discount_spec=discount_spec,
reward_spec=reward_spec)
try:
spec_utils.ensure_spec_compatibility(timestep_subspec, timestep_fullspec)
self.fail('Validation failure expected.')
except KeyError as unused_but_expected:
del unused_but_expected
def test_WrongObservation(self):
reward_spec = random_reward_spec()
discount_spec = random_discount_spec()
observation_subspec = random_observation_spec()
observation_fullspec = copy.deepcopy(observation_subspec)
# Modify the first op:
obs0 = observation_fullspec[list(observation_fullspec.keys())[0]]
new_shape = tuple((d + 1 for d in obs0.shape))
new_obs = random_array_spec(
shape=new_shape, name=obs0.name, dtype=obs0.dtype)
observation_fullspec[obs0.name] = new_obs
timestep_subspec = random_timestep_spec(
observation_spec=observation_subspec,
discount_spec=discount_spec,
reward_spec=reward_spec)
timestep_fullspec = random_timestep_spec(
observation_spec=observation_fullspec,
discount_spec=discount_spec,
reward_spec=reward_spec)
try:
spec_utils.ensure_spec_compatibility(timestep_subspec, timestep_fullspec)
self.fail('Validation failure expected.')
except ValueError as unused_but_expected:
del unused_but_expected
class CastTest(absltest.TestCase):
def _assert_array(self, actual: np.ndarray, expected: np.ndarray):
self.assertEqual(actual.dtype, expected.dtype)
np.testing.assert_almost_equal(actual, expected)
def test_CastArrayToFloat64(self):
spec = specs.Array(shape=(2,), dtype=np.float64, name='64bitspec')
expected = np.array([1, 2], dtype=np.float64)
input_float = np.array([1, 2], dtype=float)
input_float32 = np.array([1, 2], dtype=np.float32)
input_float64 = np.array([1, 2], dtype=np.float64)
self._assert_array(spec_utils.cast(spec, input_float), expected)
self._assert_array(spec_utils.cast(spec, input_float32), expected)
self._assert_array(spec_utils.cast(spec, input_float64), expected)
def test_CastArrayToFloat32(self):
spec = specs.Array(shape=(2,), dtype=np.float32, name='64bitspec')
expected = np.array([1, 2], dtype=np.float32)
input_float = np.array([1, 2], dtype=float)
input_float32 = np.array([1, 2], dtype=np.float32)
input_float64 = np.array([1, 2], dtype=np.float64)
self._assert_array(spec_utils.cast(spec, input_float), expected)
self._assert_array(spec_utils.cast(spec, input_float32), expected)
self._assert_array(spec_utils.cast(spec, input_float64), expected)
def test_CastArrayToInt32(self):
spec = specs.Array(shape=(2,), dtype=np.int32, name='32bitspec')
expected = np.array([1, 2], dtype=np.int32)
input_int = np.array([1, 2], dtype=int)
input_int32 = np.array([1, 2], dtype=np.int32)
input_int64 = np.array([1, 2], dtype=np.int64)
self._assert_array(spec_utils.cast(spec, input_int), expected)
self._assert_array(spec_utils.cast(spec, input_int32), expected)
self._assert_array(spec_utils.cast(spec, input_int64), expected)
def test_CastScalarToFloat64(self):
spec = specs.Array(shape=(), dtype=np.float64)
def check_value(value):
assert type(value) == np.float64 # pylint: disable=unidiomatic-typecheck
check_value(spec_utils.cast(spec, float(1.2)))
check_value(spec_utils.cast(spec, np.float32(1.2)))
check_value(spec_utils.cast(spec, np.float64(1.2)))
check_value(spec_utils.cast(spec, float('nan')))
check_value(spec_utils.cast(spec, np.float32('nan')))
check_value(spec_utils.cast(spec, np.float64('nan')))
def test_CastScalarToFloat32(self):
spec = specs.Array(shape=(), dtype=np.float32)
def check_value(value):
assert type(value) == np.float32 # pylint: disable=unidiomatic-typecheck
check_value(spec_utils.cast(spec, float(1.2)))
check_value(spec_utils.cast(spec, np.float32(1.2)))
check_value(spec_utils.cast(spec, np.float64(1.2)))
check_value(spec_utils.cast(spec, float('nan')))
check_value(spec_utils.cast(spec, np.float32('nan')))
check_value(spec_utils.cast(spec, np.float64('nan')))
def test_CastScalarToInt32(self):
spec = specs.Array(shape=(), dtype=np.int32)
def check_value(value):
assert type(value) == np.int32 # pylint: disable=unidiomatic-typecheck
check_value(spec_utils.cast(spec, int(12)))
check_value(spec_utils.cast(spec, np.int32(12)))
check_value(spec_utils.cast(spec, np.int64(12)))
class MergeTest(absltest.TestCase):
def test_MergePrimitives(self):
# Can only merge a value with None, cannot merge two primitives
# Well, we /could/ do, but that would require an aggregation function
# E.g. SUM, MIN, MAX, MEAN etc.
val1 = np.asarray([1, 2, np.nan, np.nan, 5, 6])
val2 = np.asarray([np.nan, np.nan, 3, 4, np.nan, np.nan])
testing_functions.assert_value(spec_utils.merge_primitives([val1]), val1)
testing_functions.assert_value(
np.asarray([1, 2, 3, 4, 5, 6]),
spec_utils.merge_primitives([val1, val2]))
try:
spec_utils.merge_primitives(
[np.asarray([np.nan, 1, 2]),
np.asarray([np.nan, np.nan, 2])])
self.fail('Exception expected')
except ValueError as unused_but_expected:
pass
def test_MergeSpecs(self):
with self.subTest('with_same_dtypes'):
spec1 = specs.BoundedArray(shape=(3,), dtype=np.int32,
minimum=np.zeros((3,), np.int32),
maximum=np.ones((3,), np.int32))
spec2 = specs.BoundedArray(shape=(2,), dtype=np.int32,
minimum=np.ones((2,), np.int32),
maximum=np.ones((2,), np.int32) * 2)
expected_spec = specs.BoundedArray(
shape=(5,), dtype=np.int32,
minimum=np.asarray([0, 0, 0, 1, 1], dtype=np.int32),
maximum=np.asarray([1, 1, 1, 2, 2], dtype=np.int32))
self.assertEqual(spec_utils.merge_specs([spec1, spec2]), expected_spec)
with self.subTest('with_different_dtypes'):
spec1 = specs.BoundedArray(shape=(1,), dtype=np.int32,
minimum=np.zeros((1,), np.int32),
maximum=np.ones((1,), np.int32))
spec2 = specs.BoundedArray(shape=(1,), dtype=np.float32,
minimum=np.ones((1,), np.float32),
maximum=np.ones((1,), np.float32) * 2)
# Defaults to float64 if there are no matching dtypes.
expected_spec = specs.BoundedArray(
shape=(2,), dtype=np.float64, minimum=np.asarray([0., 1.]),
maximum=np.asarray([1., 2.]))
self.assertEqual(spec_utils.merge_specs([spec1, spec2]), expected_spec)
with self.subTest('skips_empty_specs'):
spec1 = specs.BoundedArray(shape=(1,), dtype=np.int32,
minimum=np.zeros((1,), np.int32),
maximum=np.ones((1,), np.int32))
empty_spec = specs.BoundedArray(shape=(), dtype=np.int32,
minimum=0, maximum=0)
self.assertEqual(spec_utils.merge_specs([spec1, empty_spec]), spec1)
with self.subTest('returns_empty_spec_if_no_inputs'):
empty_spec = specs.BoundedArray(shape=(0,), dtype=np.float64,
minimum=[], maximum=[])
self.assertEqual(spec_utils.merge_specs([]), empty_spec)
class ShrinkToFitTest(absltest.TestCase):
def test_primitive(self):
spec = specs.BoundedArray(
shape=(3,),
dtype=float,
minimum=[0.0, 0.0, 0.0],
maximum=[20.0, 100.0, 20.0])
val1 = np.asarray([21.0, 5.0, 21.0]) # over-max, under-min, over-max
factor1 = 20.0 / 21.0
expected1 = np.asarray([20.0, 5.0 * factor1, 20.0])
testing_functions.assert_value(
spec_utils.shrink_to_fit(val1, spec), expected1)
val2 = np.asarray([1.0, 200.0, 21.0]) # ok, over-max, over-max
# factor2 = 0.5 # 100 / 200
expected2 = np.asarray([0.5, 100.0, 10.5])
testing_functions.assert_value(
spec_utils.shrink_to_fit(val2, spec), expected2)
def test_zero_inside_bounds(self):
spec = specs.BoundedArray(
shape=(1,), dtype=np.float32, minimum=[-10.0], maximum=[10.0])
val1 = np.asarray([0.0])
expected1 = np.copy(val1)
testing_functions.assert_value(
spec_utils.shrink_to_fit(val1, spec), expected1)
def test_negative(self):
spec = specs.BoundedArray(
shape=(3,),
dtype=float,
minimum=[-10.0, 0.0, 0.0],
maximum=[10.0, 100.0, 20.0])
val = np.asarray([-20.0, 50.0, 10.0])
# Values are halved to make -20 -> -10.
expected = np.asarray([-10.0, 25.0, 5.0])
testing_functions.assert_value(
spec_utils.shrink_to_fit(val, spec), expected)
class ClipTest(absltest.TestCase):
def test_primitive(self):
spec = specs.BoundedArray(
shape=(3,),
dtype=float,
minimum=[0.0, 10.0, 20.0],
maximum=[20.0, 100.0, 20.0])
val1 = np.asarray([21.0, 5.0, 21.0]) # over-max, under-min, over-max
expected1 = np.asarray([20.0, 10.0, 20.0])
testing_functions.assert_value(spec_utils.clip(val1, spec), expected1)
val2 = np.asarray([1.0, 200.0, 21.0]) # ok, over-max, over-max
expected2 = np.asarray([1.0, 100.0, 20.0])
testing_functions.assert_value(spec_utils.clip(val2, spec), expected2)
class PrefixSlicerTest(absltest.TestCase):
def test_ArmJoints(self):
spec = specs.BoundedArray(
shape=(3,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0, 3.0]),
maximum=np.asarray([11.0, 22.0, 33.0]),
name='name/sawyer/j0\tname/sawyer/j1\tname/sawyer/gripper')
action_space = action_spaces.prefix_slicer(spec, prefix='.*/j[0-9]+$')
# Verify the retuurned spec.
expected_spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0]),
maximum=np.asarray([11.0, 22.0]),
name='name/sawyer/j0\tname/sawyer/j1')
spec_utils.verify_specs_equal_bounded(expected_spec, action_space.spec())
# Verify the returned action space.
np.testing.assert_array_almost_equal(
np.asarray([1.0, 2.0, np.nan]),
action_space.project(np.asarray([1.0, 2.0])))
def test_Gripper(self):
spec = specs.BoundedArray(
shape=(3,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0, 3.0]),
maximum=np.asarray([11.0, 22.0, 33.0]),
name='name/sawyer/j0\tname/sawyer/j1\tname/sawyer/gripper')
action_space = action_spaces.prefix_slicer(spec, prefix='.*/gripper$')
# Verify the retuurned spec.
expected_spec = specs.BoundedArray(
shape=(1,),
dtype=np.float32,
minimum=np.asarray([3.0]),
maximum=np.asarray([33.0]),
name='name/sawyer/gripper')
spec_utils.verify_specs_equal_bounded(expected_spec, action_space.spec())
# Verify the returned action space.
np.testing.assert_array_almost_equal(
np.asarray([np.nan, np.nan, 3.0]),
action_space.project(np.asarray([3.0])))
def test_NonContiguousMatches(self):
spec = specs.BoundedArray(
shape=(4,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0, 3.0, 4.0]),
maximum=np.asarray([11.0, 22.0, 33.0, 44.0]),
name='h1\tm1\th2\tm2')
action_space = action_spaces.prefix_slicer(spec, prefix='h.$')
expected_spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([1.0, 3.0]),
maximum=np.asarray([11.0, 33.0]),
name='h1\th2')
spec_utils.verify_specs_equal_bounded(expected_spec, action_space.spec())
np.testing.assert_array_almost_equal(
np.asarray([1.0, np.nan, 2.0, np.nan]),
action_space.project(np.asarray([1.0, 2.0])))
def test_EmptySpec(self):
spec = specs.BoundedArray(
shape=(0,),
dtype=np.float32,
minimum=np.asarray([]),
maximum=np.asarray([]),
name='')
action_space = action_spaces.prefix_slicer(spec, prefix='a')
spec_utils.verify_specs_equal_bounded(spec, action_space.spec())
np.testing.assert_array_almost_equal(
np.asarray([]), action_space.project(np.asarray([], dtype=np.float32)))
def test_AllMatch(self):
spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0]),
maximum=np.asarray([11.0, 22.0]),
name='h1\th2')
action_space = action_spaces.prefix_slicer(spec, prefix='h.$')
spec_utils.verify_specs_equal_bounded(spec, action_space.spec())
np.testing.assert_array_almost_equal(
np.asarray([1.0, 2.0]), action_space.project(np.asarray([1.0, 2.0])))
def test_NoneMatch(self):
spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0]),
maximum=np.asarray([11.0, 22.0]),
name='h1\th2')
action_space = action_spaces.prefix_slicer(spec, prefix='m.$')
expected_spec = specs.BoundedArray(
shape=(0,),
dtype=np.float32,
minimum=np.asarray([]),
maximum=np.asarray([]),
name='')
spec_utils.verify_specs_equal_bounded(expected_spec, action_space.spec())
np.testing.assert_array_almost_equal(
np.asarray([np.nan, np.nan]), action_space.project(np.asarray([])))
def test_Defaulting(self):
spec = specs.BoundedArray(
shape=(4,),
dtype=np.float32,
minimum=np.asarray([1.0, 2.0, 3.0, 4.0]),
maximum=np.asarray([11.0, 22.0, 33.0, 44.0]),
name='h1\tm1\th2\tm2')
action_space = action_spaces.prefix_slicer(
spec, prefix='h.$', default_value=99.0)
np.testing.assert_array_almost_equal(
np.asarray([1.0, 99.0, 2.0, 99.0]),
action_space.project(np.asarray([1.0, 2.0])))
class TimeStepSpecTest(parameterized.TestCase):
def test_EqualSpecs(self):
array = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([-1.0, -2.0]),
maximum=np.asarray([1.0, 2.0]),
name='bounded_array')
obs_spec = {'obs1': array}
spec = spec_utils.TimeStepSpec(
observation_spec=obs_spec, reward_spec=array, discount_spec=array)
self.assertEqual(spec, copy.deepcopy(spec))
def test_NonEqualSpecs(self):
array = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([-1.0, -2.0]),
maximum=np.asarray([1.0, 2.0]),
name='bounded_array')
obs_spec = {'obs1': array}
spec1 = spec_utils.TimeStepSpec(
observation_spec=obs_spec, reward_spec=array, discount_spec=array)
array2 = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=np.asarray([-3.0, -4.0]),
maximum=np.asarray([3.0, 4.0]),
name='bounded_array2')
obs_spec2 = {'obs2': array2}
spec2 = spec_utils.TimeStepSpec(
observation_spec=obs_spec2, reward_spec=array2, discount_spec=array2)
self.assertNotEqual(spec1, spec2)
@parameterized.parameters(float, np.int8)
def test_minimum(self, dtype):
array = specs.BoundedArray(
shape=(2,),
dtype=dtype,
minimum=np.asarray([-1.0, -2.0]),
maximum=np.asarray([1.0, 2.0]),
name='bounded_array')
obs_spec = {'obs1': array}
spec = spec_utils.TimeStepSpec(
observation_spec=obs_spec, reward_spec=array, discount_spec=array)
expected_minimum_obs = {'obs1': np.array([-1.0, -2.0], dtype=dtype)}
expected_minimum_reward = np.array([-1.0, -2.0], dtype=dtype)
expected_minimum_discount = np.array([-1.0, -2.0], dtype=dtype)
minimum_timestep = spec.minimum()
if issubclass(dtype, np.inexact):
assert_fn = np.testing.assert_almost_equal
else:
assert_fn = np.testing.assert_equal
assert_fn(minimum_timestep.reward, expected_minimum_reward)
assert_fn(minimum_timestep.discount, expected_minimum_discount)
self.assertEqual(set(minimum_timestep.observation.keys()),
set(expected_minimum_obs.keys()))
for key in expected_minimum_obs:
assert_fn(minimum_timestep.observation[key],
expected_minimum_obs[key])
@parameterized.parameters(float, np.int8)
def test_maximum(self, dtype):
array = specs.BoundedArray(
shape=(2,),
dtype=dtype,
minimum=np.asarray([-1.0, -2.0]),
maximum=np.asarray([1.0, 2.0]),
name='bounded_array')
obs_spec = {'obs1': array}
spec = spec_utils.TimeStepSpec(
observation_spec=obs_spec, reward_spec=array, discount_spec=array)
expected_maximum_obs = {'obs1': np.array([1.0, 2.0], dtype=dtype)}
expected_maximum_reward = np.array([1.0, 2.0], dtype=dtype)
expected_maximum_discount = np.array([1.0, 2.0], dtype=dtype)
maximum_timestep = spec.maximum()
if issubclass(dtype, np.inexact):
assert_fn = np.testing.assert_almost_equal
else:
assert_fn = np.testing.assert_equal
assert_fn(maximum_timestep.reward, expected_maximum_reward)
assert_fn(maximum_timestep.discount, expected_maximum_discount)
self.assertEqual(set(maximum_timestep.observation.keys()),
set(expected_maximum_obs.keys()))
for key in expected_maximum_obs:
assert_fn(maximum_timestep.observation[key],
expected_maximum_obs[key])
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/spec_utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""SubTask and its observer, see class doc-strings."""
import abc
from typing import Any, Mapping, Optional, Sequence, Text, Tuple
import dm_env
from dm_env import specs
from dm_robotics.agentflow import core
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import util # pylint: disable=unused-import
from dm_robotics.agentflow.decorators import overrides
import numpy as np
# Internal profiling
class SubTask(abc.ABC, core.Renderable):
"""A SubTask defines a task for subtask policies, given a parent task.
SubTask is used to define learning problems for agents used inside a larger
policy. SubTask defines everything required by `Option` except for the
policy itself, and is combined with a subtask policy in `SubTaskOption`.
Its primary use-case is to build Options which can internally train an RL
agent. From the agent's perspective it lives in a regular RL environment, but
from AgentFlow's perspective it's just another option node that can be plugged
into a graph.
In addition to training internally, a SubTaskOption can provide an `arg_spec`
to define an abstract action-space to a parent `MetaOption` (any `Option` can
do this). If a SubTask does this, it should expect an observation with the
key and spec, and do something appropriate (presumably condition the policy).
In summary, SubTask can be used in two modes:
1) As a stand-alone task-definition which trains up a sub-policy to be used
by the parent as a black-box option. E.g. an insertion task, which will
be invoked by the parent at the appropriate time.
2) As an interface between a parent and child policy for learning a
parameterized policy, e.g. a reaching task parameterized by some
representation of goal-pose.
In either case, SubTask's job is to pack an observation during each timestep
that contains any relevant features or sensor information, as with any task.
It must also implement a reward function, and the standard life-cycle methods
for an `Option` controlling initiation and termination.
For case (2), `SubTask` must also define `arg_spec`, which is passed to both
the parent agent for initializing its arg-generating module, and to
the child agent for initializing its arg-consuming module. The parent arg
is then parsed out of the observation at each step, and passed along to the
child.
All methods on SubTask that take a timestep, expect that timestep to be
from the environment aka 'parent'.
All Option-related methods also take `arg_key`, selected by SubTaskOption, in
order to allow a given SubTask to train multiple Policies.
"""
def __init__(self, name: Optional[Text] = None) -> None:
"""Initialize SubTask.
Args:
name: (optional) A name for the subtask.
"""
self._name = name or self.__class__.__name__
self._uid = core.uid_generator()
self._key_prefix = "{}_{}".format(self._name, self._uid)
self._default_arg_key = "{}_arg".format(self._key_prefix)
@property
def name(self) -> Text:
return self._name
def get_arg_key(self, policy: Optional[core.Policy]) -> Text:
"""The key under which the SubTask can find args from the parent.
Note: if controlling an `Option` this key should match `Option.arg_key`.
Args:
policy: The policy that this subtask is wrapping. By default SubTask uses
the arg_key of this policy, if available.
Returns:
A string key into timestep.observation under which args can be found.
"""
if hasattr(policy, "arg_key"):
return policy.arg_key
elif not hasattr(self, "_default_arg_key"):
raise AttributeError("arg_key not defined. Did you forget to call "
"`super` on your SubTask's __init__()?")
return self._default_arg_key
@abc.abstractmethod
def observation_spec(self) -> Mapping[Text, specs.Array]:
"""Defines the observation seen by agents for trained on this subtask."""
pass
@abc.abstractmethod
def arg_spec(self) -> Optional[specs.Array]:
"""Defines the arg to be passed by the parent task during each step."""
pass
@abc.abstractmethod
def action_spec(self) -> specs.BoundedArray:
"""Defines the action spec seen by agents that run on this subtask."""
pass
def reward_spec(self) -> specs.Array:
"""Describes the reward returned by the environment.
By default this is assumed to be a single float.
Returns:
An `Array` spec, or a nested dict, list or tuple of `Array` specs.
"""
return specs.Array(shape=(), dtype=np.float32, name="reward")
def discount_spec(self) -> specs.Array:
"""Describes the discount returned by the environment.
By default this is assumed to be a single float between 0 and 1.
Returns:
An `Array` spec, or a nested dict, list or tuple of `Array` specs.
"""
return specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1., name="discount")
@abc.abstractmethod
def agent_to_parent_action(self, agent_action: np.ndarray) -> np.ndarray:
"""Convert an action from the agent to the parent task."""
pass
def reset(self, parent_timestep: dm_env.TimeStep):
"""Called when a new episode is begun."""
pass
@abc.abstractmethod
def parent_to_agent_timestep(self, parent_timestep: dm_env.TimeStep,
own_arg_key: Text) -> dm_env.TimeStep:
"""Converts a timestep from the parent to one consumable by the agent.
This method should not modify parent_timestep. This is the right place to
compute pterm.
Args:
parent_timestep: A TimeStep from the parent task or environment.
own_arg_key: A string key into parent_timestep.observation in which the
arg for the current Option can be found.
Returns:
A timestep for the agent that this subtask encloses.
"""
pass
@abc.abstractmethod
def pterm(self, parent_timestep: dm_env.TimeStep, own_arg_key: Text) -> float:
"""Returns the termination probability for the current state.
Args:
parent_timestep: A TimeStep from the parent task or environment.
own_arg_key: A string key into parent_timestep.observation in which the
arg for the current Option can be found.
"""
pass
def subtask_result(self, parent_timestep: dm_env.TimeStep,
own_arg_key: Text) -> core.OptionResult:
"""Return an OptionResult conditional on the timestep and arg."""
del parent_timestep
del own_arg_key
return core.OptionResult(core.TerminationType.SUCCESS, data=None)
class SubTaskObserver(abc.ABC):
"""An observer that can be attached to SubTaskOption.
The observer is called on every step with both the environment-side and
agent-side timesteps and actions, this allows us to (for example) log the
environment side observations and the agent-side actions. This is useful to
log the raw observations and the actions that the agent took (rather than the
projection of that action to the environment's action space).
"""
@abc.abstractmethod
def step(self, parent_timestep: dm_env.TimeStep,
parent_action: Optional[np.ndarray], agent_timestep: dm_env.TimeStep,
agent_action: Optional[np.ndarray]) -> None:
"""Steps the observer.
The relationship between the timestep and action is the action is a response
to the timestep.
Args:
parent_timestep: The timestep output by the underlying base task, prior
to any TimestepPreprocessors being applied.
parent_action: The agent action as seen by the base task. A None action is
given if the agent timestep is a LAST timestep.
agent_timestep: The timestep seen by the agent, proir to any
TimestepPreprocessors being applied.
agent_action: The action output by the agent. A None action is given if
the agent timestep is a LAST timestep.
"""
pass
class SubTaskOption(core.Option):
"""An option composed of a `SubTask` and a `Policy`.
The SubTask is responsible for:
Defining option-specific lifecycle methods (pterm),
Defining the environment the agent operates in.
The Agent is responsible for:
Defining the policy (through step)
Defining agent lifecycle methods (begin_episode, step, end_episode,
tear_down).
"""
def __init__(self,
sub_task: SubTask,
agent: Any,
observers: Optional[Sequence[SubTaskObserver]] = None,
name: Text = "SubTaskOption"):
"""Builds a SubTaskOption.
Args:
sub_task: A SubTask object defining life-cycle and agent-interface.
agent: Any object with a `step(environment_timestep)` method.
observers: Observers to invoke from step after actions are determined.
name: An arbitrary name for the resulting Option.
"""
super().__init__(name=name)
self._task = sub_task
self._agent = agent
self._observers = []
if observers:
self._observers.extend(observers)
# Spec expected by agents running in the subtask.
self._task_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=self._task.observation_spec(),
reward_spec=self._task.reward_spec(),
discount_spec=self._task.discount_spec())
@property
def subtask(self) -> SubTask:
"""Returns the underlying subtask for this SubTaskOption."""
return self._task
@property
def agent(self) -> Any: # Should we just assume these are always `Policy`?
"""Returns the underlying policy for this SubTaskOption."""
return self._agent
@overrides(core.Option)
def arg_spec(self):
return self._task.arg_spec()
@property
@overrides(core.Option)
def arg_key(self) -> Text:
"""The key under which the SubTask can find args from the parent.
Returns:
A string key into timestep.observation under which args can be found.
"""
return self._task.get_arg_key(self._agent)
@overrides(core.Option)
# Profiling for .wrap("SubTaskOption.on_selected")
def on_selected(self,
parent_timestep: dm_env.TimeStep,
prev_option_result=None) -> None:
"""Process first timestep and delegate to agent."""
if parent_timestep.first():
self._task.reset(parent_timestep)
if isinstance(self._agent, core.Option):
agent_timestep = self._task.parent_to_agent_timestep(
parent_timestep, self.arg_key)
self._agent.on_selected(agent_timestep, prev_option_result)
@overrides(core.Option)
# Profiling for .wrap("SubTaskOption.step")
def step(self, parent_timestep: dm_env.TimeStep) -> np.ndarray:
"""Delegate step to policy, and run subtask hooks."""
if parent_timestep.first():
self._task.reset(parent_timestep)
# pyformat: disable
# pylint: disable=line-too-long
# pyformat: disable
# Create timestep for agent, cache SubTask termination signal.
with util.nullcontext(): # create agent timestep
agent_timestep = self._create_agent_timestep(parent_timestep)
# Get action from the agent, and validate it.
with util.nullcontext(): # step agent
agent_action = self._agent.step(agent_timestep)
spec_utils.validate(self._task.action_spec(), agent_action, ignore_nan=True)
with util.nullcontext(): # agent_to_parent_action
# Subtask converts the agent action to an action for the environment.
parent_action = self._task.agent_to_parent_action(agent_action)
with util.nullcontext(): # run observers
for obs in self._observers:
obs.step(
parent_timestep=parent_timestep,
parent_action=parent_action,
agent_timestep=agent_timestep,
agent_action=agent_action)
# pyformat: enable
# pylint: enable=line-too-long
# pyformat: enable
return parent_action
def _create_agent_timestep(
self, parent_timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Generates a timestep for the agent."""
agent_timestep = self._task.parent_to_agent_timestep(
parent_timestep, self.arg_key)
# Check that the timestep we pass the agent matches the task spec, which
# tells the agent what to expect in the timestep.
spec_utils.validate_timestep(self._task_timestep_spec, agent_timestep)
return agent_timestep
@overrides(core.Option)
# Profiling for .wrap("SubTaskOption.pterm")
def pterm(self, parent_timestep: dm_env.TimeStep) -> float:
"""Delegate pterm to subtask."""
return self._task.pterm(parent_timestep, self.arg_key)
@overrides(core.Option)
# Profiling for .wrap("SubTaskOption.result")
def result(self, parent_timestep: dm_env.TimeStep) -> core.OptionResult:
return self._task.subtask_result(parent_timestep, self.arg_key)
@overrides(core.Option)
# Profiling for .wrap("SubTaskOption.render_frame")
def render_frame(self, canvas) -> None:
if hasattr(self._task, "render_frame"):
self._task.render_frame(canvas)
if hasattr(self._agent, "render_frame"):
self._agent.render_frame(canvas)
def child_policies(self) -> Tuple[core.Policy]:
return (self._agent,)
def add_observer(self, observer: SubTaskObserver) -> None:
self._observers.append(observer)
| dm_robotics-main | py/agentflow/subtask.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for subtask."""
from typing import List, NamedTuple, Optional, Text
from typing import Union
from unittest import mock
from absl.testing import absltest
import dm_env
from dm_env import specs
import dm_robotics.agentflow as af
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
import numpy as np
valid_value = testing_functions.valid_value
random_timestep = testing_functions.random_timestep
def random_step_type():
return np.random.choice(list(dm_env.StepType))
def _random_timestep(obs_spec: Union[None, specs.Array,
spec_utils.ObservationSpec] = None):
if obs_spec is None:
key = testing_functions.random_string(3)
spec = testing_functions.random_array_spec()
obs_val = valid_value(spec)
observation = {key: obs_val}
else:
observation = valid_value(obs_spec)
return dm_env.TimeStep(
step_type=random_step_type(),
reward=np.float32(np.random.random()),
discount=np.float32(np.random.random()),
observation=observation)
def _random_result():
termination_reason = np.random.choice(
[af.TerminationType.SUCCESS, af.TerminationType.FAILURE])
data = np.random.random(size=(5,)).astype(np.float32)
return af.OptionResult(termination_reason, data)
# A custom comparison function because nan != nan and our timesteps contain nan
# when there is no reward.
def _timestep_equals(lhs, rhs):
# If not iterable do normal comparison.
try:
iter(lhs)
except TypeError:
return lhs == rhs
for field_a, field_b in zip(lhs, rhs):
if field_a == field_b:
return True
if np.isnan(field_a) and np.isnan(field_b):
return True
return False
class StubSubTask(af.SubTask):
def __init__(self, observation_spec: spec_utils.ObservationSpec,
action_spec: specs.Array):
self._observation_spec = observation_spec
self._action_spec = action_spec
# A list of the agent actions passed to agent_to_parent_action.
self.actual_agent_actions = [] # type: List[np.ndarray]
# A list of parent actions to return from agent_to_parent_action.
# This list is popped from as agent_to_parent_action is called.
self.parent_actions = [] # type: List[np.ndarray]
# Timesteps received by parent_to_agent_timestep
self.actual_parent_timesteps = [] # type: List[dm_env.TimeStep]
# Timesteps to return from parent_to_agent_timestep.
# This list is popped from as parent_to_agent_timestep is called.
self.agent_timesteps = [] # type: List[dm_env.TimeStep]
def observation_spec(self) -> spec_utils.ObservationSpec:
return self._observation_spec
def arg_spec(self) -> Optional[specs.Array]:
return None
def action_spec(self) -> specs.BoundedArray:
return self._action_spec
def agent_to_parent_action(self, agent_action: np.ndarray) -> np.ndarray:
self.actual_agent_actions.append(np.copy(agent_action))
if not self.parent_actions:
raise ValueError("No more actions to return.")
return self.parent_actions.pop(0)
def parent_to_agent_timestep(self, parent_timestep: dm_env.TimeStep,
arg_key: Text) -> dm_env.TimeStep:
self.actual_parent_timesteps.append(parent_timestep)
if not self.agent_timesteps:
raise ValueError("no more agent timesteps")
return self.agent_timesteps.pop(0)
def pterm(self, parent_timestep: dm_env.TimeStep,
own_arg_key: Text) -> float:
return 0.
ObserverStep = NamedTuple("ObserverStep", [("parent_timestep", dm_env.TimeStep),
("parent_action", np.ndarray),
("agent_timestep", dm_env.TimeStep),
("agent_action", np.ndarray)])
class SpySubTaskObserver(af.SubTaskObserver):
def __init__(self):
self.steps = [] # type: List[ObserverStep]
def step(self, parent_timestep: dm_env.TimeStep, parent_action: np.ndarray,
agent_timestep: dm_env.TimeStep, agent_action: np.ndarray) -> None:
self.steps.append(
ObserverStep(parent_timestep, parent_action, agent_timestep,
agent_action))
class SubTaskOptionTest(absltest.TestCase):
def testTaskDefinesOptionArgSpec(self):
agent = mock.MagicMock(spec=af.Policy)
task = mock.MagicMock(spec=af.SubTask)
spec = testing_functions.random_array_spec()
task.arg_spec.return_value = spec
option = af.SubTaskOption(task, agent)
actual_arg_spec = option.arg_spec()
self.assertEqual(actual_arg_spec, spec)
def testTaskDelegatesArgKeyToOptionIfPossible(self):
policy = mock.MagicMock(spec=af.Policy)
option = mock.MagicMock(spec=af.Option)
random_action_spec = testing_functions.random_array_spec(shape=(5,))
random_observation_spec = testing_functions.random_array_spec(shape=(10,))
task = testing_functions.IdentitySubtask(
observation_spec=random_observation_spec,
action_spec=random_action_spec,
steps=100)
task_arg_key = testing_functions.random_string()
option_arg_key = testing_functions.random_string()
type(option).arg_key = mock.PropertyMock(return_value=option_arg_key)
task._default_arg_key = task_arg_key
sto_wrapping_policy = af.SubTaskOption(task, policy)
sto_wrapping_option = af.SubTaskOption(task, option)
self.assertEqual(sto_wrapping_option.arg_key, option_arg_key)
self.assertEqual(sto_wrapping_policy.arg_key, task_arg_key)
def testPtermTakenFromAgentTimestep(self):
# pterm of the SubTaskOption should delegate to the SubTask.
# 1. Arrange:
task_action_spec = testing_functions.random_array_spec(shape=(5,))
task_obs_spec = testing_functions.random_observation_spec()
agent_action = valid_value(task_action_spec)
parent_action = np.random.random(size=(5,)).astype(np.float32)
subtask_timestep = _random_timestep(task_obs_spec)
task = mock.MagicMock(spec=af.SubTask)
task.parent_to_agent_timestep.return_value = subtask_timestep
task.pterm.return_value = 0.2
task.action_spec.return_value = task_action_spec
task.agent_to_parent_action.return_value = parent_action
timestep = random_timestep(observation={})
task.observation_spec.return_value = task_obs_spec
task.reward_spec.return_value = specs.Array(
shape=(), dtype=np.float32, name="reward")
task.discount_spec.return_value = specs.Array(
shape=(), dtype=np.float32, name="discount")
agent = mock.MagicMock(spec=af.Policy)
agent.step.return_value = agent_action
option = af.SubTaskOption(task, agent)
# 2. Act:
option.step(timestep)
# 3. Assert:
self.assertEqual(option.pterm(timestep), 0.2)
def testStepTimestepFromSubtask(self):
# The timestep the agent sees in begin_episode should come from the task.
# 1. Arrange:
task_action_spec = testing_functions.random_array_spec(shape=(5,))
task_obs_spec = testing_functions.random_observation_spec(shape=(4,))
agent_action = valid_value(task_action_spec)
parent_action = np.random.random(size=(5,)).astype(np.float32)
parent_timestep = _random_timestep()
parent_timestep_without_reward = parent_timestep._replace(
reward=np.float32("nan"))
subtask_timestep = _random_timestep(task_obs_spec)
pterm = 0.7
task = mock.MagicMock(spec=af.SubTask)
task.parent_to_agent_timestep.return_value = subtask_timestep
task.pterm.return_value = pterm
task.action_spec.return_value = task_action_spec
task.agent_to_parent_action.return_value = parent_action
task.observation_spec.return_value = task_obs_spec
task.reward_spec.return_value = specs.Array(
shape=(), dtype=np.float32, name="reward")
task.discount_spec.return_value = specs.Array(
shape=(), dtype=np.float32, name="discount")
agent = mock.MagicMock(spec=af.Policy)
agent.step.return_value = agent_action
option = af.SubTaskOption(task, agent)
# 2. Act:
actual_option_action = option.step(parent_timestep)
# 3. Assert:
# Check that the task was given the correct timestep to pack.
testing_functions.assert_calls(
task.parent_to_agent_timestep,
[(parent_timestep_without_reward, option.arg_key)],
equals_fn=_timestep_equals)
# Check that the agent was given the timestep from the task.
testing_functions.assert_calls(agent.step, [(subtask_timestep,)])
# Check that the task was given the agent aciton to convert to an action
# for the parent environment.
testing_functions.assert_calls(task.agent_to_parent_action,
[(agent_action,)])
# Check that this parent environment action is the one that's returned.
np.testing.assert_equal(actual_option_action, parent_action)
def testObservable(self):
# Arrange:
env_def = testing_functions.EnvironmentSpec.random()
subtask = StubSubTask(
observation_spec=testing_functions.random_observation_spec(),
action_spec=testing_functions.random_array_spec())
subtask_def = testing_functions.EnvironmentSpec.for_subtask(subtask)
# Agent definition (agent operates 'in' the SubTask):
agent_action1 = subtask_def.create_action()
agent_action2 = subtask_def.create_action()
agent = af.Sequence([af.FixedOp(agent_action1), af.FixedOp(agent_action2)])
# Observer - this is the class under test (CUT / SUT).
observer = SpySubTaskObserver()
# This is the option that the observer is observing.
subtask_option = af.SubTaskOption(subtask, agent, [observer])
# Define how the subtask will behave (two parts):
# Part 1 - The timesteps it will pass to the agent
agent_timestep1 = subtask_def.create_timestep(
step_type=dm_env.StepType.FIRST)
agent_timestep2 = subtask_def.create_timestep(step_type=dm_env.StepType.MID)
subtask.agent_timesteps.append(agent_timestep1)
subtask.agent_timesteps.append(agent_timestep2)
# Part 2 - The actions it will return to the parent.
env_action1 = env_def.create_action()
env_action2 = env_def.create_action()
subtask.parent_actions.append(env_action1)
subtask.parent_actions.append(env_action2)
# Act:
# Drive the subtask_option. This should result in our listener being
# invoked twice (once per step). Each invocation should contain the
# env-side timestep and action and the subtask-side timestep and action.
env_timestep1 = env_def.create_timestep(step_type=dm_env.StepType.FIRST)
env_timestep2 = env_def.create_timestep(step_type=dm_env.StepType.MID)
actual_parent_action1 = subtask_option.step(env_timestep1)
actual_parent_action2 = subtask_option.step(env_timestep2)
# Assert:
# Check that the observer was passed the expected values.
np.testing.assert_almost_equal(env_action1, actual_parent_action1)
np.testing.assert_almost_equal(env_action2, actual_parent_action2)
# Check the timesteps and actions that were given to the listener.
self.assertLen(observer.steps, 2)
step = observer.steps[0]
testing_functions.assert_timestep(env_timestep1, step.parent_timestep)
testing_functions.assert_value(env_action1, step.parent_action)
testing_functions.assert_timestep(agent_timestep1, step.agent_timestep)
testing_functions.assert_value(agent_action1, step.agent_action)
step = observer.steps[1]
testing_functions.assert_timestep(env_timestep2, step.parent_timestep)
testing_functions.assert_value(env_action2, step.parent_action)
testing_functions.assert_timestep(agent_timestep2, step.agent_timestep)
testing_functions.assert_value(agent_action2, step.agent_action)
if __name__ == "__main__":
absltest.main()
| dm_robotics-main | py/agentflow/subtask_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Top-level names for agentflow."""
from dm_robotics.agentflow.action_spaces import CastActionSpace # pytype: disable=pyi-error
from dm_robotics.agentflow.action_spaces import CompositeActionSpace # pytype: disable=pyi-error
from dm_robotics.agentflow.action_spaces import FixedActionSpace # pytype: disable=pyi-error
from dm_robotics.agentflow.action_spaces import prefix_slicer # pytype: disable=pyi-error
from dm_robotics.agentflow.action_spaces import SequentialActionSpace # pytype: disable=pyi-error
from dm_robotics.agentflow.action_spaces import ShrinkToFitActionSpace # pytype: disable=pyi-error
from dm_robotics.agentflow.core import ActionSpace
from dm_robotics.agentflow.core import Arg
from dm_robotics.agentflow.core import ArgSpec
from dm_robotics.agentflow.core import IdentityActionSpace
from dm_robotics.agentflow.core import MetaOption
from dm_robotics.agentflow.core import Option
from dm_robotics.agentflow.core import OptionResult
from dm_robotics.agentflow.core import Policy
from dm_robotics.agentflow.core import TerminationType
from dm_robotics.agentflow.loggers.subtask_logger import Aggregator
from dm_robotics.agentflow.loggers.subtask_logger import EpisodeReturnAggregator
from dm_robotics.agentflow.loggers.subtask_logger import SubTaskLogger
from dm_robotics.agentflow.meta_options.control_flow.cond import Cond
from dm_robotics.agentflow.meta_options.control_flow.loop_ops import Repeat
from dm_robotics.agentflow.meta_options.control_flow.loop_ops import While
from dm_robotics.agentflow.meta_options.control_flow.sequence import Sequence
from dm_robotics.agentflow.options.basic_options import all_terminate
from dm_robotics.agentflow.options.basic_options import any_terminates
from dm_robotics.agentflow.options.basic_options import ArgAdaptor
from dm_robotics.agentflow.options.basic_options import ConcurrentOption
from dm_robotics.agentflow.options.basic_options import DelegateOption
from dm_robotics.agentflow.options.basic_options import FixedOp
from dm_robotics.agentflow.options.basic_options import LambdaOption
from dm_robotics.agentflow.options.basic_options import OptionAdapter
from dm_robotics.agentflow.options.basic_options import options_terminate
from dm_robotics.agentflow.options.basic_options import PadOption
from dm_robotics.agentflow.options.basic_options import PolicyAdapter
from dm_robotics.agentflow.options.basic_options import RandomOption
from dm_robotics.agentflow.preprocessors.timestep_preprocessor import PreprocessorTimestep
from dm_robotics.agentflow.preprocessors.timestep_preprocessor import TimestepPreprocessor
from dm_robotics.agentflow.subtask import SubTask
from dm_robotics.agentflow.subtask import SubTaskObserver
from dm_robotics.agentflow.subtask import SubTaskOption
from dm_robotics.agentflow.util import log_info
from dm_robotics.agentflow.util import log_termination_reason
| dm_robotics-main | py/agentflow/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Core AgentFlow types."""
import abc
import enum
import functools
from typing import Any, Generic, Iterable, Optional, Text, TypeVar
import dm_env
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow.decorators import register_class_properties
from dm_robotics.agentflow.decorators import register_property
import numpy as np
# Internal usage logging import.
Arg = np.ndarray # pylint: disable=invalid-name
ArgSpec = specs.Array
class TerminationType(enum.IntEnum):
# The episode ended in an ordinary (internal) terminal state.
SUCCESS = 0
# The episode ended in failure.
FAILURE = 1
# The episode was preempted by an upstream signal.
PREEMPTED = 2
class UidGenerator(object):
"""Generates hashable unique identifiers for options."""
def __init__(self):
self._next_id = 1
def __call__(self):
return_value = self._next_id
self._next_id += 1
return return_value
# A shared module-level UidGenerator. We don't enforce a strict singleton
# pattern to allow UidGenerator to be used in local contexts as well.
uid_generator = UidGenerator()
class OptionResult(object):
"""The result of an Option, encapsulating the termination reason and data."""
def __init__(self,
termination_reason: TerminationType,
data: Optional[Any] = None,
termination_text: Optional[Text] = None,
termination_color: Optional[Text] = None):
assert termination_reason is not None
self._termination_reason = termination_reason
self._data = data
self.termination_text = termination_text or ''
self.termination_color = termination_color or ''
@property
def termination_reason(self) -> TerminationType:
return self._termination_reason
@termination_reason.setter
def termination_reason(self, reason: TerminationType) -> None:
self._termination_reason = reason
@property
def data(self):
return self._data
@classmethod
def success_result(cls):
return cls(termination_reason=TerminationType.SUCCESS)
@classmethod
def failure_result(cls):
return cls(termination_reason=TerminationType.FAILURE)
def __str__(self):
return 'OptionResult({}, {}, {})'.format(self.termination_reason,
self.termination_text, self.data)
def __eq__(self, other: 'OptionResult'):
return ((self._termination_reason == other.termination_reason) and
(self._data == other.data))
def __hash__(self):
return hash((self._termination_reason, self._data))
Spec = TypeVar('Spec', bound=specs.Array)
class ActionSpace(Generic[Spec], abc.ABC):
"""A mapping between actions; for example from cartesian to joint space.
An action space defines a `spec` which actions 'in' the space must adhere to,
and a `project` method that converts actions from that to another space.
"""
@property
@abc.abstractmethod
def name(self) -> Text:
"""Returns the name of this action space."""
raise NotImplementedError()
@abc.abstractmethod
def spec(self) -> Spec:
"""Spec of values that can be passed to `project`."""
raise NotImplementedError()
@abc.abstractmethod
def project(self, action: np.ndarray) -> np.ndarray:
"""Project input action (which adheres to `spec()`) to another action."""
raise NotImplementedError()
class IdentityActionSpace(ActionSpace[Spec]):
"""Identity action space."""
def __init__(self, spec: Spec, name: Text = 'identity'):
self._spec = spec
self._name = name
@property
def name(self) -> Text:
return self._name
def spec(self) -> Spec:
return self._spec
def project(self, action: np.ndarray) -> np.ndarray:
spec_utils.validate(self._spec, action, ignore_nan=True)
return action
class Renderable:
"""An interface for adding a `render_frame` method to a class."""
def render_frame(self, canvas) -> None:
"""Renders to provided canvas object.
Args:
canvas: An object that instances can draw on. AgentFlow does not
assume any particular interface for the Canvas, but it does forward
calls down the agent-graph from the top-level `Option` in order to allow
users to implement arbitrary drawing logic. On the task-side, it is
also forwarded through subtasks to timestep-preprocessors, which allows
rendering from user-defined tasks.
I.e.:
canvas = MyCanvas()
agent = BigAgentFlowGraph()
agent.render_frame(canvas) # All nodes should see `canvas`.
"""
pass
@register_class_properties
class Policy(abc.ABC, Renderable):
"""Base class for agents."""
def __init__(self, name: Optional[Text] = None) -> None:
"""Initialize Policy.
Args:
name: (optional) A name for the policy.
"""
# Internal usage logging hook.
self._name = name or self.__class__.__name__
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
"""Determine what action to send to the environment.
Args:
timestep: A timestep containing observations, reward etc.
Returns:
A action that the environment (or SubTask) understands.
"""
raise NotImplementedError
@property
@register_property
def name(self) -> Text:
return self._name if hasattr(self, '_name') else self.__class__.__name__
def child_policies(self) -> Iterable['Policy']:
return []
def setup(self) -> None:
"""Called once, before the run loop starts."""
for child in self.child_policies():
child.setup()
def tear_down(self) -> None:
"""Called once, after the run loop ends."""
for child in self.child_policies():
child.tear_down()
@functools.total_ordering # for __gt__, __le__, __ge__
@register_class_properties
class Option(Policy): # pytype: disable=ignored-metaclass
"""Abstract option class.
Option lifecycle:
If the framework decides to select an option, `on_selected` is called.
Next, the option enters the standard agent lifecycle methods:
`step` is called repeatedly, the step_type of the timesteps that are
passed must follow these rules:
* The first timestep must have step_type of `FIRST`
* The last timestep (before another FIRST) must have step_type of `LAST`
* All other timesteps must have a step type of `MID`.
while an option is being `step`ped, AgentFlow will call `pterm` (which
returns a float [0,1]). This is an advisory signal - returning a pterm of
1.0 does not guarantee that the next step will be a `LAST` step.
When an option is terminated (a `LAST` step is given), an `OptionResult`
is obtained from `result`. AgentFlow will use that result in subsequent
calls to `on_selected`.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name: Optional[Text] = None) -> None:
"""Initialize Option.
Args:
name: (optional) A name for the option.
"""
super().__init__(name)
self._uid = uid_generator()
self._key_prefix = '{}_{}'.format(self._name, self._uid)
self._arg_key = '{}_arg'.format(self._key_prefix)
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
raise NotImplementedError
def arg_spec(self) -> Optional[ArgSpec]:
"""Returns an arg_spec for the option.
A arg_spec should be an `Array` (following v2 convention), allowing a
parent `MetaOption` to communicate runtime-arguments to the option.
"""
return
@property
@register_property
def name(self) -> Text:
return self._name
@name.setter
@register_property
def name(self, value: Text):
self._name = value
@property
@register_property
def key_prefix(self) -> Text:
"""Auto-assigned prefix to ensure no collisions on arg_keys."""
return self._key_prefix
@property
@register_property
def arg_key(self) -> Text:
return self._arg_key
@property
@register_property
def uid(self) -> int:
"""Returns the auto-generated UID.
It's not expected that this property is overridden.
"""
return self._uid
def on_selected(self,
timestep: dm_env.TimeStep,
prev_option_result: Optional[OptionResult] = None) -> None:
pass
def pterm(self, timestep: dm_env.TimeStep) -> float:
"""Returns the termination probability for the current state.
Args:
timestep: an AgentTimeStep namedtuple.
"""
del timestep
return 0.0
def result(self, unused_timestep: dm_env.TimeStep) -> OptionResult:
return OptionResult(termination_reason=TerminationType.SUCCESS)
def __eq__(self, other):
return isinstance(other, Option) and self.uid == other.uid
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.name < other.name
def __hash__(self):
return self.uid
def __str__(self):
return 'uid={}, name=\"{}\"'.format(self.uid, self.name)
def __repr__(self):
# not technically a repr but useful for shell debugging
return '{}({})'.format(self.__class__.__name__, str(self))
class MetaOption(Option):
"""Base class for Options that can use other Options.
This class exists only to define a base interface for meta-Options, such that
users can mix-and-match different mechanisms to drive options in a coherent
pytype-checkable way.
"""
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
raise NotImplementedError
| dm_robotics-main | py/agentflow/core.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Utility functions that are helpful in testing."""
import random
import string
import sys
import typing
from typing import Any, Dict, List, Mapping, NewType, Optional, Text, Tuple, Type, Union
from absl import logging
from dm_control import composer
from dm_control.composer import arena
import dm_env
from dm_env import specs
import dm_robotics.agentflow as af
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow.decorators import overrides
import numpy as np
Arg = NewType('Arg', Any) # pylint: disable=invalid-name
# Container for caching the timestep argument to Option methods.
class SpyOpCall(typing.NamedTuple):
pterm: Optional[dm_env.TimeStep]
on_selected: Optional[dm_env.TimeStep]
step: Optional[dm_env.TimeStep]
result: Optional[dm_env.TimeStep]
def _equal_or_close(x: Arg, y: Arg) -> bool:
"""An equals function that can take floats, dicts, or == compatible inputs."""
if isinstance(x, float) or isinstance(x, np.ndarray):
return np.allclose(x, y, equal_nan=True)
elif isinstance(x, dict) and (set(map(type, x)) == {str}):
# If args are themselves dicts with string keys, recurse on the values.
return all(
[_equal_or_close(vx, vy) for vx, vy in zip(x.values(), y.values())])
else:
return x == y
class SpyOp(af.FixedOp):
"""FixedOp that records the timestep it's given."""
def __init__(self,
value,
pterm=None,
result=None,
arg_spec=None,
num_steps: Optional[int] = 0,
name='SpyOp'):
super().__init__(value, num_steps=num_steps, name=name)
self._pterm = pterm
self._result = result
self._timesteps = [] # type: List[SpyOpCall]
self._arg_spec = arg_spec
self._default_call = SpyOpCall(
pterm=None, on_selected=None, step=None, result=None)
def pterm(self, timestep):
self._timesteps.append(self._default_call._replace(pterm=timestep))
if self._pterm is None:
return super().pterm(timestep)
else:
return self._pterm
def result(self, timestep):
self._timesteps.append(self._default_call._replace(result=timestep))
if self._result is None:
return super().result(timestep)
else:
return self._result
def step(self, timestep: dm_env.TimeStep):
self._timesteps.append(self._default_call._replace(step=timestep))
return super().step(timestep)
def on_selected(self, timestep, prev_option_result=None):
self._timesteps.append(self._default_call._replace(on_selected=timestep))
super().on_selected(timestep, prev_option_result)
def arg_spec(self):
if self._arg_spec is None:
return super().arg_spec()
else:
return self._arg_spec
@property
def timestep(self):
if not self._timesteps:
return None
timesteps = self._timesteps[-1]._asdict().values()
return next((t for t in timesteps if t is not None), None)
@property
def timesteps(self):
return self._timesteps
def clear_timesteps(self):
del self._timesteps[:]
class FixedOpWithArg(af.FixedOp):
"""A FixedOp which expects a runtime-argument via the timestep."""
def __init__(self,
action: np.ndarray,
arg_spec: af.ArgSpec,
**kwargs):
super().__init__(action, **kwargs)
self._arg_spec = arg_spec
def arg_spec(self) -> af.ArgSpec:
return self._arg_spec
class ActionSequenceOp(af.Option):
"""An option that returns a fixed sequence of actions."""
def __init__(self, actions, name='ActionSequenceOp'):
super().__init__(name=name)
self._actions = actions
self._next_action_index = 0
@overrides(af.Option)
def step(self, timestep: dm_env.TimeStep):
logging.debug('ActionSequenceOp.step(%s) _next_action_index: %d', timestep,
self._next_action_index)
if timestep.first():
self._next_action_index = 0
index = self._next_action_index
self._next_action_index += 1
if timestep.last():
# This is not ok, it /is/ ok for index == len(self._actions)
# but not to go over that.
assert index <= len(self._actions), 'Too many steps without a LAST step.'
# self._next_action_index = 0
if index == len(self._actions):
return None
return self._actions[index]
def pterm(self, timestep) -> float:
return 0.0 if self._next_action_index < len(self._actions) else 1.0
class IdentitySubtask(af.SubTask):
"""Trivial subtask that does not change the task."""
def __init__(self,
observation_spec: specs.Array,
action_spec: specs.Array,
steps: int,
name: Optional[Text] = None) -> None:
super().__init__(name)
self._max_steps = steps
self._steps_taken = 0
self._observation_spec = observation_spec
self._action_spec = action_spec
@overrides(af.SubTask)
def observation_spec(self):
return self._observation_spec
@overrides(af.SubTask)
def action_spec(self):
return self._action_spec
@overrides(af.SubTask)
def arg_spec(self):
return None
@overrides(af.SubTask)
def agent_to_parent_action(self, agent_action: np.ndarray) -> np.ndarray:
return agent_action
@overrides(af.SubTask)
def reset(self, parent_timestep: dm_env.TimeStep):
self._steps_taken = 0
return parent_timestep
@overrides(af.SubTask)
def parent_to_agent_timestep(
self,
parent_timestep: dm_env.TimeStep,
arg_key: Optional[Text] = None) -> Tuple[dm_env.TimeStep, float]:
self._steps_taken += 1
return parent_timestep, self.pterm(parent_timestep, arg_key)
def pterm(self, parent_timestep: dm_env.TimeStep, arg_key: Text) -> float:
"""Option termination probability.
This implementation assumes sparse reward; returns 1.0 if reward() > 0.
Args:
parent_timestep: A timestep from the parent.
arg_key: The part of the observation that stores the arg.
Returns:
Option termination probability.
"""
return 1.0 if self._steps_taken >= self._max_steps else 0.0
class SpyEnvironment(dm_env.Environment):
"""An environment for testing."""
def __init__(self, episode_length: int = sys.maxsize):
self.actions_received = [] # type: List[np.ndarray]
self._episode_length = episode_length
self._episode_step = -1
self._global_step = -1
trivial_arena = arena.Arena()
self._task = composer.NullTask(trivial_arena)
def reset(self) -> dm_env.TimeStep:
self._episode_step = 0
self.actions_received = []
return self._timestep()
def step(self, agent_action: np.ndarray) -> dm_env.TimeStep:
self.actions_received.append(agent_action)
self._episode_step += 1
self._global_step += 1
if self._episode_step > self._episode_length or self._episode_step == 0:
return self.reset()
else:
return self._timestep()
def _timestep(self) -> dm_env.TimeStep:
step_type = self._step_type()
observation = self._observation()
return dm_env.TimeStep(
step_type=step_type,
reward=np.full(
shape=self.reward_spec().shape,
fill_value=0,
dtype=self.reward_spec().dtype),
discount=np.full(
self.discount_spec().shape,
fill_value=0,
dtype=self.discount_spec().dtype),
observation=observation,
)
def _step_type(self):
if self._episode_step == 0:
return dm_env.StepType.FIRST
elif self._episode_step >= self._episode_length:
return dm_env.StepType.LAST
else:
return dm_env.StepType.MID
def _observation(self):
if self.actions_received:
last_action = self.actions_received[-1]
else:
last_action = np.full(
shape=self.action_spec().shape,
fill_value=-1,
dtype=self.action_spec().dtype)
return {
'step_count':
np.asarray([self._episode_step], dtype=np.float32),
'global_step_count':
np.asarray([self._global_step], dtype=np.float32),
'last_action':
np.copy(last_action)
}
def observation_spec(self):
return {
'step_count':
specs.BoundedArray(
shape=(1,),
dtype=np.float32,
minimum=[0],
maximum=[sys.maxsize]),
'global_step_count':
specs.BoundedArray(
shape=(1,),
dtype=np.float32,
minimum=[0],
maximum=[sys.maxsize]),
'last_action':
self.action_spec()
}
def action_spec(self):
return specs.BoundedArray(
shape=(0,), dtype=np.float32, minimum=[], maximum=[])
def get_step_count(self, timestep: dm_env.TimeStep):
assert 'step_count' in timestep.observation
step_count_array = timestep.observation['step_count']
assert step_count_array.shape == (1,)
return int(step_count_array[0])
def get_global_step_count(self, timestep: dm_env.TimeStep):
assert 'global_step_count' in timestep.observation
step_count_array = timestep.observation['global_step_count']
assert step_count_array.shape == (1,)
return int(step_count_array[0])
def get_last_action(self, timestep: dm_env.TimeStep):
assert 'last_action' in timestep.observation
return timestep.observation['last_action']
@property
def physics(self):
return None
def task(self):
return self._task
def atomic_option_with_name(name, action_size=2):
return af.FixedOp(
action=random_action(action_size), name=name)
def random_action(action_size=2):
return np.random.random(size=(action_size,))
def random_string(length=None) -> Text:
length = length or random.randint(5, 10)
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
def random_shape(ndims=None) -> Tuple[int, ...]:
ndims = ndims or random.randint(1, 3)
return tuple([random.randint(1, 10) for _ in range(ndims)])
def random_dtype() -> Type[np.floating]:
return random.choice([float, np.float32, np.float64])
def unit_array_spec(shape: Optional[Tuple[int, ...]] = None,
name=None) -> specs.BoundedArray:
shape = shape or random_shape()
dtype = random.choice([np.float32, np.float64])
minimum = np.zeros(shape=shape, dtype=dtype)
maximum = np.ones(shape=shape, dtype=dtype)
name = name or random_string()
return specs.BoundedArray(shape, dtype, minimum, maximum, name)
def random_array_spec(
shape: Optional[Tuple[int, ...]] = None,
name: Optional[Text] = None,
dtype: Optional[Type[np.floating]] = None,
minimum: Optional[np.ndarray] = None,
maximum: Optional[np.ndarray] = None) -> specs.BoundedArray:
"""Create BoundedArray spec with unspecified parts randomized."""
shape = shape or random_shape()
name = name or random_string()
dtype = dtype or random.choice([np.float32, np.float64])
if minimum is None:
minimum = np.random.random(size=shape) * random.randint(0, 10)
minimum = minimum.astype(dtype)
if maximum is None:
maximum = np.random.random(size=shape) * random.randint(0, 10) + minimum
maximum = maximum.astype(dtype)
return specs.BoundedArray(shape, dtype, minimum, maximum, name)
def random_observation_spec(
size: Optional[int] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.floating]] = None) -> Dict[Text, specs.Array]:
size = random.randint(3, 6) if size is None else size
obs_spec = {}
for _ in range(size):
name = random_string(3)
obs_spec[name] = random_array_spec(shape, name, dtype)
return obs_spec
def random_step_type() -> dm_env.StepType:
return random.choice(list(dm_env.StepType))
def valid_value(spec: Union[specs.Array, spec_utils.ObservationSpec,
spec_utils.TimeStepSpec]):
"""Returns a valid value from the primitive, dict, or timestep spec."""
def valid_primitive(prim_spec):
value = np.random.random(size=prim_spec.shape).astype(prim_spec.dtype)
if isinstance(prim_spec, specs.BoundedArray):
# Clip specs to handle +/- np.inf in the specs.
maximum = np.clip(prim_spec.maximum, -1e10, 1e10)
minimum = np.clip(prim_spec.minimum, -1e10, 1e10)
value *= (maximum - minimum)
value += minimum
else:
value *= 1e10 # Make range / magnitude assumptions unlikely to hold.
return value.astype(prim_spec.dtype)
if isinstance(spec, dict):
return {k: valid_primitive(v) for k, v in spec.items()}
elif isinstance(spec, specs.Array):
return valid_primitive(spec)
elif isinstance(spec, spec_utils.TimeStepSpec):
return dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
reward=valid_value(spec.reward_spec),
discount=valid_value(spec.discount_spec),
observation=valid_value(spec.observation_spec))
else:
raise ValueError('bad spec, type: {}'.format(type(spec)))
def assert_value(expected, actual, path=None):
"""Fails if the expected and actual values are different."""
if expected is actual:
return
path = path or ''
msg = '\nexpected: {}\ngot: {}'.format(expected, actual)
if isinstance(expected, np.ndarray):
if not isinstance(actual, np.ndarray):
raise AssertionError('array vs not at {}: {}'.format(path, msg))
np.testing.assert_almost_equal(expected, actual)
elif isinstance(expected, dict):
if not isinstance(actual, dict):
raise AssertionError('dict vs not at {}: {}'.format(path, msg))
if sorted(expected.keys()) != sorted(actual.keys()):
raise AssertionError('wrong keys at {}: {}'.format(path, msg))
for key in expected.keys():
assert_value(expected[key], actual[key], path + '/{}'.format(key))
else:
raise AssertionError('Bad type given: {}: {}'.format(type(expected), msg))
def assert_spec(expected: spec_utils.TimeStepSpec,
actual: spec_utils.TimeStepSpec):
return expected == actual
def assert_timestep(lhs: dm_env.TimeStep, rhs: dm_env.TimeStep):
assert_value(lhs.observation, rhs.observation)
np.testing.assert_almost_equal(lhs.reward, rhs.reward)
np.testing.assert_almost_equal(lhs.discount, rhs.discount)
if lhs.step_type != rhs.step_type:
raise AssertionError('step types differ left: {}, right {}'.format(
lhs, rhs))
def _call_string(call):
"""Converts the provided call to string."""
positional_args = call[0]
keyword_args = call[1]
arg_strings = []
if positional_args:
arg_strings.append(', '.join([str(arg) for arg in positional_args]))
if keyword_args:
arg_strings.append(', '.join(
[f'{k}={v}' for (k, v) in keyword_args.items()]))
arg_string = ', '.join(arg_strings)
return f'({arg_string})'
def _call_strings(calls):
return [_call_string(call) for call in calls]
def _args_match(actual_arg, expected_arg, equals_fn):
"""Return True if actual_arg matched expected_arg."""
if actual_arg is expected_arg:
return True
if actual_arg is None or expected_arg is None:
return False # They're not /both/ None.
return equals_fn(actual_arg, expected_arg)
def call_matches(mock_call_obj,
expected_args: Tuple, # pylint: disable=g-bare-generic
expected_kwargs: Dict[Text, Any],
equals_fn=None):
"""Return True if the args and kwargs in mock_call_obj are as expected."""
if equals_fn is None:
equals_fn = _equal_or_close
mock_positional_args = mock_call_obj[0]
mock_keyword_args = mock_call_obj[1]
args_matches = [
_args_match(*args, equals_fn=equals_fn)
for args in zip(mock_positional_args, expected_args)
]
if mock_keyword_args.keys() != expected_kwargs.keys():
return False
aligned_kwargs = [[mock_keyword_args[k], expected_kwargs[k]]
for k in mock_keyword_args.keys()]
kwargs_matches = [
_args_match(*args, equals_fn=equals_fn)
for args in aligned_kwargs
]
return all(args_matches) and all(kwargs_matches)
def assert_calls(mock_obj,
expected_args: Optional[List[Tuple]] = None, # pylint: disable=g-bare-generic
expected_kwargs: Optional[List[Dict[Text, Any]]] = None,
equals_fn=None):
"""Checks that the calls made to the given match the args given.
This function takes a mock function on which function calls may have occurred,
and corresponding lists of args and kwargs for what those calls should have
been.
It then checks that the args and kwargs match the mock call list exactly,
using np.testing.assert_equal for numpy arguments.
It does not check the call order, but does check that there are no extra or
missing calls.
Args:
mock_obj: The mock (function) to check calls on.
expected_args: A list (one per call) of positional argument tuples.
expected_kwargs: A list (one per call) of keyword argument dicts.
equals_fn: Custom comparison function which will be called with potential
argument pairs. If the default (None) is used than we will compare the
objects using a default function which can handle numpy arrays and
string-key specially, and deferrs everything else to ==.
Returns:
None
Raises:
AssertionError: if the calls do not match.
"""
if expected_args is None and expected_kwargs is not None:
expected_args = [()] * len(expected_kwargs)
elif expected_args is not None and expected_kwargs is None:
expected_kwargs = [{}] * len(expected_args)
elif expected_args is not None and expected_kwargs is not None:
assert len(expected_args) == len(expected_kwargs)
expected_calls = list(zip(expected_args, expected_kwargs))
actual_calls = mock_obj.call_args_list
if len(actual_calls) != len(expected_args):
raise AssertionError('Expected {} calls, but got {}'.format(
len(expected_args), len(actual_calls)))
found = [False] * len(actual_calls)
matched = [False] * len(actual_calls)
for expected_index, expected_call in enumerate(expected_calls):
for actual_index, actual_call in enumerate(actual_calls):
if not matched[actual_index] and call_matches(
actual_call,
expected_args=expected_call[0],
expected_kwargs=expected_call[1],
equals_fn=equals_fn):
matched[actual_index] = True
found[expected_index] = True
break
if not all(found):
expected_not_found = [
call for found, call in zip(found, expected_calls) if not found
]
unmatched_actual = [
call for matched, call in zip(matched, actual_calls) if not matched
]
raise AssertionError(('Did not find all expected calls in actual calls.\n'
'Expected but not found:\n{}\n'
'Unmatched, actual calls:\n{}').format(
_call_strings(expected_not_found),
_call_strings(unmatched_actual)))
def random_reward_spec(shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.floating]] = None,
name: Text = 'reward') -> specs.Array:
if not shape:
shape = ()
if not dtype:
dtype = random_dtype()
return specs.Array(shape=shape, dtype=dtype, name=name)
def random_discount_spec(shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.floating]] = None,
minimum: float = 0.,
maximum: float = 1.,
name: Text = 'discount') -> specs.BoundedArray:
"""Generate a discount spec."""
if not shape:
shape = ()
if not dtype:
dtype = random_dtype()
return specs.BoundedArray(
shape=shape, dtype=dtype, minimum=minimum, maximum=maximum, name=name)
def random_timestep_spec(
observation_spec: Optional[Mapping[Text, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.BoundedArray] = None
) -> spec_utils.TimeStepSpec:
"""Generate a timestep spec."""
if not observation_spec:
observation_spec = random_observation_spec()
if not reward_spec:
reward_spec = random_reward_spec()
if not discount_spec:
discount_spec = random_discount_spec()
return spec_utils.TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
def random_timestep(spec: Optional[spec_utils.TimeStepSpec] = None,
step_type: Optional[dm_env.StepType] = None,
reward: Optional[np.floating] = None,
discount: Optional[np.floating] = None,
observation: Optional[spec_utils.ObservationValue] = None):
"""Create a timestep."""
if spec and not observation:
# Create a valid observation:
observation = valid_value(spec.observation_spec)
else:
observation = observation or {} # no spec => no observation.
if step_type is None:
step_type = random_step_type()
if reward is None:
if spec:
reward = valid_value(spec.reward_spec)
else:
reward = random_dtype()(np.random.random())
if discount is None:
if spec:
discount = valid_value(spec.discount_spec)
else:
discount = random_dtype()(np.random.random())
timestep = dm_env.TimeStep(
step_type=step_type,
reward=reward,
discount=discount,
observation=observation)
# We should not return and invalid timestep.
if spec:
spec_utils.validate_timestep(spec, timestep)
return timestep
class EnvironmentSpec(object):
"""Convenience class for creating valid timesteps and actions."""
@classmethod
def random(cls):
"""Create an EnvironmentSpec with randomly created specs."""
ts_spec = random_timestep_spec()
action_spec = random_array_spec()
return EnvironmentSpec(timestep_spec=ts_spec, action_spec=action_spec)
@classmethod
def for_subtask(cls, subtask: af.SubTask):
timestep_spec = spec_utils.TimeStepSpec(
observation_spec=subtask.observation_spec(),
reward_spec=subtask.reward_spec(),
discount_spec=subtask.discount_spec())
action_spec = subtask.action_spec()
return EnvironmentSpec(timestep_spec=timestep_spec, action_spec=action_spec)
def __init__(self, timestep_spec: spec_utils.TimeStepSpec,
action_spec: specs.Array):
self.action_spec = action_spec
self.spec = timestep_spec
def create_timestep(
self,
step_type: Optional[dm_env.StepType] = None,
reward: Optional[np.floating] = None,
discount: Optional[np.floating] = None,
observation: Optional[spec_utils.ObservationValue] = None
) -> dm_env.TimeStep:
return random_timestep(self.spec, step_type, reward, discount, observation)
def create_action(self) -> np.ndarray:
val = valid_value(self.action_spec) # type: np.ndarray # pytype: disable=annotation-type-mismatch
return val
def composite_spec(*components: specs.BoundedArray) -> specs.BoundedArray:
"""Create a spec by composing / concatenating the given specs."""
if not components:
raise ValueError('No specs to compose')
for spec in components:
if len(spec.shape) != 1:
raise ValueError('Not creating composite spec: not all shapes are 1-D')
if not all(spec.dtype == components[0].dtype for spec in components):
raise ValueError('not all dtypes match')
shape = (sum(spec.shape[0] for spec in components),)
dtype = components[0].dtype
minimum = np.hstack([spec.minimum for spec in components])
maximum = np.hstack([spec.maximum for spec in components])
name = '\t'.join(spec.name for spec in components)
return specs.BoundedArray(shape, dtype, minimum, maximum, name)
| dm_robotics-main | py/agentflow/testing_functions.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with action and observation specifications.
These specifications can be nested lists and dicts of `Array` and its
subclass `BoundedArray`.
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Type, TypeVar
from absl import logging
import dm_env
from dm_env import specs
import numpy as np
# Internal profiling
ObservationSpec = Mapping[str, specs.Array]
ObservationValue = Mapping[str, np.ndarray]
ScalarOrArray = TypeVar('ScalarOrArray', np.floating, np.ndarray)
class TimeStepSpec(object):
"""Type specification for a TimeStep."""
def __init__(self, observation_spec: ObservationSpec,
reward_spec: specs.Array, discount_spec: specs.Array):
self._observation_spec = observation_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
@property
def observation_spec(self) -> Mapping[str, specs.Array]:
return dict(self._observation_spec)
@property
def reward_spec(self) -> specs.Array:
return self._reward_spec
@property
def discount_spec(self) -> specs.Array:
return self._discount_spec
def validate(self, timestep: dm_env.TimeStep):
validate_observation(self.observation_spec, timestep.observation)
validate(self.reward_spec, timestep.reward)
validate(self.discount_spec, timestep.discount)
def minimum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = minimum(self._reward_spec)
discount = minimum(self._discount_spec)
observation = {k: minimum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def maximum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = maximum(self._reward_spec)
discount = maximum(self._discount_spec)
observation = {k: maximum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def replace(self,
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.Array] = None) -> 'TimeStepSpec':
"""Return a new TimeStepSpec with specified fields replaced."""
if observation_spec is None:
observation_spec = self._observation_spec
if reward_spec is None:
reward_spec = self._reward_spec
if discount_spec is None:
discount_spec = self._discount_spec
return TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
def __eq__(self, other):
if not isinstance(other, TimeStepSpec):
return False
# All the properties of the spec must be equal.
if self.reward_spec != other.reward_spec:
return False
if self.discount_spec != other.discount_spec:
return False
if len(self.observation_spec) != len(other.observation_spec):
return False
for key in self.observation_spec:
if (key not in other.observation_spec or
self.observation_spec[key] != other.observation_spec[key]):
return False
return True
def minimum(spec: specs.Array):
if hasattr(spec, 'minimum'):
return clip(np.asarray(spec.minimum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).min, dtype=spec.dtype)
elif isinstance(spec, specs.StringArray):
return np.full(spec.shape, spec.string_type(''), dtype=object)
else:
return np.full(spec.shape, np.finfo(spec.dtype).min, dtype=spec.dtype)
def maximum(spec: specs.Array):
if hasattr(spec, 'maximum'):
return clip(np.asarray(spec.maximum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).max, dtype=spec.dtype)
elif isinstance(spec, specs.StringArray):
return np.full(spec.shape, spec.string_type(''), dtype=object)
else:
return np.full(spec.shape, np.finfo(spec.dtype).max, dtype=spec.dtype)
def zeros(action_spec: specs.Array) -> np.ndarray:
"""Create a zero value for this Spec."""
return np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
def cast(spec: specs.Array, value: ScalarOrArray) -> ScalarOrArray:
"""Cast a value to conform to a spec."""
if np.isscalar(value):
return spec.dtype.type(value)
else:
return value.astype(spec.dtype)
def clip(value: np.ndarray, spec: specs.BoundedArray) -> np.ndarray:
"""Clips the given value according to the spec."""
if value is None:
raise ValueError('no value')
if isinstance(spec.dtype, np.inexact):
eps = np.finfo(spec.dtype).eps * 5.0
else:
eps = 0
min_bound = np.array(spec.minimum, dtype=spec.dtype)
max_bound = np.array(spec.maximum, dtype=spec.dtype)
return np.clip(value, min_bound + eps, max_bound - eps)
def shrink_to_fit(
value: np.ndarray,
spec: specs.BoundedArray,
ignore_nan: Optional[bool] = None,
) -> np.ndarray:
"""Scales the value towards zero to fit within spec min and max values.
Clipping is done after scaling to ensure there are no values that are very
slightly (say 10e-8) out of range.
This, by nature, assumes that min <= 0 <= max for the spec.
Args:
value: np.ndarray to scale towards zero.
spec: Specification for value to scale and clip.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the size of `value`, so that large values are not checked.
Returns:
Scaled and clipped value.
Raises:
ValueError: On missing values or high-dimensional values.
"""
if value is None:
raise ValueError('no value')
if spec is None:
raise ValueError('no spec')
if not isinstance(value, np.ndarray):
raise ValueError('value not numpy array ({})'.format(type(value)))
if len(value.shape) > 1:
raise ValueError('2d values not yet handled')
if not isinstance(spec, specs.BoundedArray):
raise ValueError('Cannot scale to spec: {})'.format(spec))
if np.any(spec.minimum > 0) or np.any(spec.maximum < 0):
raise ValueError('Cannot scale to spec, due to bounds: {})'.format(spec))
factor = 1.0
for val, min_val, max_val in zip(value, spec.minimum, spec.maximum):
if val < min_val:
new_factor = min_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
if val > max_val:
new_factor = max_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
scaled = (value * factor).astype(spec.dtype)
clipped = clip(scaled, spec)
try:
validate(spec, clipped, ignore_nan)
except ValueError:
logging.error('Failed to scale %s to %s. Got: %s', value, spec, clipped)
return clipped
def merge_specs(spec_list: Sequence[specs.BoundedArray]):
"""Merges a list of BoundedArray into one."""
# Check all specs are flat.
for spec in spec_list:
if len(spec.shape) > 1:
raise ValueError('Not merging multi-dimensional spec: {}'.format(spec))
# Filter out no-op specs with no actuators.
spec_list = [spec for spec in spec_list if spec.shape and spec.shape[0]]
dtype = np.find_common_type([spec.dtype for spec in spec_list], [])
num_actions = 0
name = ''
mins = np.array([], dtype=dtype)
maxs = np.array([], dtype=dtype)
for i, spec in enumerate(spec_list):
num_actions += spec.shape[0]
if name:
name += '\t'
name += spec.name or f'spec_{i}'
mins = np.concatenate([mins, spec.minimum])
maxs = np.concatenate([maxs, spec.maximum])
return specs.BoundedArray(
shape=(num_actions,), dtype=dtype, minimum=mins, maximum=maxs, name=name)
def merge_primitives(values: Sequence[np.ndarray],
default_value: Optional[float] = None) -> np.ndarray:
"""Merge the given values (arrays) where NaNs are considered missing.
Args:
values: The values to merge.
default_value: A default value to replace NaNs with, after merging.
Returns:
A merged value.
Raises:
ValueError: On ambiguity, shape/dtype mismatch, or no values.
An ambiguity means >1 arrays have a non-nan value in the same index.
"""
if not values:
raise ValueError('No values to merge')
# Ignore Nones.
shape = values[0].shape
dtype = values[0].dtype
result = np.ndarray(shape=shape, dtype=dtype)
result.fill(np.nan)
if len(shape) != 1:
raise ValueError('Not implemented for multi-dimensional arrays')
for value in values:
if value.shape != shape:
raise ValueError('Shape mismatch, expect {} got {}. All: {}'.format(
shape, value.shape, [v.shape for v in values]))
if value.dtype != dtype:
raise ValueError('dtype mismatch, expect {} got {}. All: {}'.format(
dtype, value.dtype, [v.dtype for v in values]))
for i in range(shape[0]):
if not np.isnan(value[i]):
if np.isnan(result[i]):
result[i] = value[i]
else:
raise ValueError('Ambiguous merge at index {} with values: {}'.format(
i, values))
if default_value is not None:
result[np.isnan(result)] = default_value
return result
def merge_in_default(value, default_value):
"""Fill in the given value with the parts of the default_value."""
if value is None:
return default_value
if isinstance(default_value, dict):
for key in default_value.keys():
value[key] = merge_in_default(value.get(key, None), default_value[key])
return value
elif isinstance(value, list):
for i in range(len(default_value)):
if i >= len(value):
value.append(default_value[i])
else:
value[i] = merge_in_default(value[i], default_value[i])
return value
else:
return value
def validate_timestep(spec: TimeStepSpec, timestep: dm_env.TimeStep):
validate_observation(spec.observation_spec, timestep.observation)
validate(spec.reward_spec, timestep.reward)
validate(spec.discount_spec, timestep.discount)
def ensure_spec_compatibility(sub_specs: TimeStepSpec,
full_specs: TimeStepSpec):
"""Validates compatibility of 2 timestep specs.
For the observations we only check inclusion of sub_specs in full_specs.
Args:
sub_specs:
full_specs:
Raises:
ValueError: If the discount_spec, the reward_spec or one of the observation
spec do not match.
KeyError: If an observation in sub_specs is not in full_specs.
"""
if sub_specs.discount_spec != full_specs.discount_spec:
raise ValueError('Non matching discount specs.\nDiscount_sub_spec : {} \n'
'Discount_full_specs: {}\n'.format(
sub_specs.discount_spec, full_specs.discount_spec))
if sub_specs.reward_spec != full_specs.reward_spec:
raise ValueError('Non matching reward specs.\nReward_sub_spec : {} \n'
'Reward_spec: {}\n'.format(sub_specs.reward_spec,
full_specs.reward_spec))
for obs_spec_key, obs_spec in sub_specs.observation_spec.items():
if obs_spec_key not in full_specs.observation_spec:
raise KeyError('Missing observation key {} in spec.'.format(obs_spec_key))
if obs_spec != full_specs.observation_spec[obs_spec_key]:
raise ValueError('Non matching observation specs for key {}. \n'
'sub_spec = {} \n spec = {}'.format(
obs_spec_key, obs_spec,
full_specs.observation_spec[obs_spec_key]))
def verify_specs_equal_unbounded(expected: specs.Array, actual: specs.Array):
"""Assert that two specs are equal."""
if expected.shape != actual.shape:
raise ValueError(f'invalid shape for spec {expected.name}: '
f'{expected.shape}, actual shape: {actual.shape}')
if expected.dtype != actual.dtype:
raise ValueError(f'invalid dtype for spec {expected.name}: '
f'{expected.dtype}, actual dtype: {actual.dtype}')
if expected.name != actual.name:
raise ValueError(f'invalid name for spec {expected.name}: '
f'{expected.name}, actual name: {actual.name}')
def verify_specs_equal_bounded(expected: specs.BoundedArray,
actual: specs.BoundedArray):
"""Check specs are equal, raise a ValueError if they are not."""
if not isinstance(expected, specs.BoundedArray):
raise ValueError(f'Expected BoundedArray for first spec {expected.name}, '
'got {str(type(expected))}')
if not isinstance(actual, specs.BoundedArray):
raise ValueError(f'Expected BoundedArray for second spec {actual.name}, '
'got {str(type(actual))}')
if not np.allclose(expected.minimum, actual.minimum):
raise ValueError(f'Minimum values for spec {expected.name} do not match')
if not np.allclose(expected.maximum, actual.maximum):
raise ValueError(f'Maximum values for spec {expected.name} do not match')
verify_specs_equal_unbounded(expected, actual)
def validate_observation(spec: ObservationSpec,
value: ObservationValue,
check_extra_keys: bool = True,
ignore_nan: Optional[bool] = None,
ignore_ranges: Optional[bool] = None,
msg: Optional[str] = None):
"""Validate an observation against an observation spec.
Args:
spec: The spec to validate against.
value: The value to validate (!).
check_extra_keys: If True having extra observations will fail.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the size of `value`, so that large values are not checked.
ignore_ranges: If True, ignore minimum and maximum of BoundedArray. If None,
this is determined by the size of `value`, so that large values are not
checked.
msg: message to append to any failure message.
Raises:
ValueError: On a validation failure.
"""
if check_extra_keys:
extra_keys = set(value.keys()) - set(spec.keys())
if extra_keys:
raise ValueError(
'Extra keys in observation:\nSpec keys: {}\nvalue keys: {}\n'
'Extra keys: {}'.format(spec.keys(), value.keys(), extra_keys))
for spec_key, sub_spec in spec.items():
if spec_key in value: # Assumes missing keys are allowed.
validate(
sub_spec,
value[spec_key],
ignore_nan=ignore_nan,
ignore_ranges=ignore_ranges,
msg='{} for observation {}'.format(msg, spec_key))
# Profiling for .wrap('spec_utils.validate')
def validate(spec: specs.Array,
value: np.ndarray,
ignore_nan: Optional[bool] = None,
ignore_ranges: Optional[bool] = None,
msg: Optional[str] = None):
"""Validates that value matches the spec.
Args:
spec: The spec to validate against.
value: The value to validate (!).
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the shape of `value`, so that large arrays (e.g. images) are
not checked (for performance reasons).
ignore_ranges: If True, ignore minimum and maximum of BoundedArray. If None,
this is determined by the size of `value`, so that large values are not
checked.
msg: message to append to any failure message.
Raises:
ValueError: On a validation failure.
"""
if value is None:
return # ASSUME this is ok.
value = np.asarray(value)
if not np.issubdtype(value.dtype, np.number):
# The value is non-numeric, so skip the nan and range checks.
ignore_nan = True
ignore_ranges = True
elif np.prod(spec.shape) > 128:
# Check less, in this case.
if ignore_nan is None:
ignore_nan = True
if ignore_ranges is None:
ignore_ranges = True
else:
# Check more in this case, it's cheap.
if ignore_nan is None:
ignore_nan = False
if ignore_ranges is None:
ignore_ranges = False
if not ignore_nan:
if np.any(np.isnan(value)):
raise ValueError('NaN in value: {}, spec: {} ({})'.format(
value, spec, msg))
if not ignore_ranges or isinstance(spec, specs.StringArray):
# Perform full validation if user cares about range, or if a StringArray.
# Explanation: StringArray.validate has no range, but `validate` outputs an
# array with `object` dtype rather than `string_type`. Therefore it will
# fail the dtype check below even if it is a valid input.
spec.validate(value)
else:
if spec.shape != value.shape:
raise ValueError('shape mismatch {}. {} vs. {}'.format(
msg, spec, value.shape))
if spec.dtype != value.dtype:
raise ValueError('dtype mismatch {}. {} vs. {}'.format(
msg, spec, value.dtype))
def assert_not_dtype(spec: specs.Array, dtype: Type[Any]):
"""Asserts that the spec is not of the given dtype.
Args:
spec: A spec to validate.
dtype: The dtype to check for.
"""
dtype = np.dtype(dtype)
maybe_spec_name = find_dtype(spec, dtype) # pytype: disable=wrong-arg-types # typed-numpy
if maybe_spec_name:
spec, name = maybe_spec_name
raise AssertionError('type {} found in {} ({})'.format(dtype, spec, name))
def find_dtype(spec: specs.Array,
dtype: Type[np.floating]) -> Optional[Tuple[specs.Array, str]]:
"""Finds if the given spec uses the give type.
Args:
spec: A spec to search.
dtype: The dtype to find.
Returns:
None if no match found, else (spec, spec_name) of the spec using dtype.
"""
dtype = np.dtype(dtype)
match = None # type: Optional[Tuple[specs.Array, str]]
if isinstance(spec, specs.Array):
if spec.dtype is dtype:
match = (spec, '')
elif isinstance(spec, dict):
for name, subspec in spec.items():
if find_dtype(subspec, dtype):
match = (subspec, name)
else:
raise ValueError('Unknown spec type {}'.format(type(spec)))
return match
| dm_robotics-main | py/agentflow/spec_utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package building script."""
import setuptools
def _get_requirements(requirements_file): # pylint: disable=g-doc-args
"""Returns a list of dependencies for setup() from requirements.txt.
Currently a requirements.txt is being used to specify dependencies. In order
to avoid specifying it in two places, we're going to use that file as the
source of truth.
Lines starting with -r will be ignored. If the requirements are split across
multiple files, call this function multiple times instead and sum the results.
"""
def line_should_be_included(line):
return line and not line.startswith("-r")
with open(requirements_file) as f:
return [_parse_line(line) for line in f if line_should_be_included(line)]
def _parse_line(s):
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split("#")
return requirement.strip()
setuptools.setup(
name="dm_robotics-agentflow",
package_dir={"dm_robotics.agentflow": ""},
packages=[
"dm_robotics.agentflow",
"dm_robotics.agentflow.loggers",
"dm_robotics.agentflow.meta_options.control_flow",
"dm_robotics.agentflow.options",
"dm_robotics.agentflow.preprocessors",
"dm_robotics.agentflow.rendering",
"dm_robotics.agentflow.subtasks",
],
version="0.5.0",
license="Apache 2.0",
author="DeepMind",
description="Tools for single-embodiment, multiple-task, Reinforcement Learning",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_robotics/tree/main/py/agentflow",
python_requires=">=3.7, <3.11",
setup_requires=["wheel >= 0.31.0"],
install_requires=(_get_requirements("requirements.txt") +
_get_requirements("requirements_external.txt")),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering",
],
zip_safe=True,
)
| dm_robotics-main | py/agentflow/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests SpyEnvironment."""
from absl.testing import absltest
import dm_env
from dm_robotics.agentflow import testing_functions
import numpy as np
def action(spec, value):
return np.full(shape=spec.shape, fill_value=value, dtype=spec.dtype)
class SpyEnvironmentTest(absltest.TestCase):
def testSomeSteps(self):
env = testing_functions.SpyEnvironment()
ts1 = env.reset()
action1 = action(env.action_spec(), 1.0)
ts2 = env.step(action1)
action2 = action(env.action_spec(), 2.0)
ts3 = env.step(action2)
# Check step_count.
self.assertEqual(env.get_step_count(ts1), 0)
self.assertEqual(env.get_step_count(ts2), 1)
self.assertEqual(env.get_step_count(ts3), 2)
# Check last_action.
np.testing.assert_array_almost_equal(env.get_last_action(ts2), action1)
np.testing.assert_array_almost_equal(env.get_last_action(ts3), action2)
# Check the step types.
self.assertIs(ts1.step_type, dm_env.StepType.FIRST)
self.assertIs(ts2.step_type, dm_env.StepType.MID)
self.assertIs(ts3.step_type, dm_env.StepType.MID)
def testReset(self):
env = testing_functions.SpyEnvironment(episode_length=3)
action1 = action(env.action_spec(), 1)
action2 = action(env.action_spec(), 2)
action3 = action(env.action_spec(), 3)
action4 = action(env.action_spec(), 4)
action5 = action(env.action_spec(), 5)
ts1 = env.step(action1) # Should reset, ignoring action.
ts2 = env.step(action2) # Step 1 of 3
ts3 = env.step(action3) # Step 2 of 3
ts4 = env.step(action4) # Step 3 of 3
ts5 = env.reset()
ts6 = env.step(action5)
# Check step types.
self.assertIs(ts1.step_type, dm_env.StepType.FIRST)
self.assertIs(ts2.step_type, dm_env.StepType.MID)
self.assertIs(ts3.step_type, dm_env.StepType.MID)
self.assertIs(ts4.step_type, dm_env.StepType.LAST)
self.assertIs(ts5.step_type, dm_env.StepType.FIRST)
self.assertIs(ts6.step_type, dm_env.StepType.MID)
def testImplicitResets(self):
env = testing_functions.SpyEnvironment(episode_length=2)
action1 = action(env.action_spec(), 1)
action2 = action(env.action_spec(), 2)
action3 = action(env.action_spec(), 3)
action4 = action(env.action_spec(), 4)
action5 = action(env.action_spec(), 5)
action6 = action(env.action_spec(), 6)
ts1 = env.step(action1) # Should reset, ignoring action.
ts2 = env.step(action2) # Step 1 of 2
ts3 = env.step(action3) # Step 2 of 2
ts4 = env.step(action4) # Should reset.
ts5 = env.step(action5) # Step 1 of 2
ts6 = env.step(action6) # Step 2 of 2
# Check step types.
self.assertIs(ts1.step_type, dm_env.StepType.FIRST)
self.assertIs(ts2.step_type, dm_env.StepType.MID)
self.assertIs(ts3.step_type, dm_env.StepType.LAST)
self.assertIs(ts4.step_type, dm_env.StepType.FIRST)
self.assertIs(ts5.step_type, dm_env.StepType.MID)
self.assertIs(ts6.step_type, dm_env.StepType.LAST)
# Check in-episode step count.
self.assertEqual(env.get_step_count(ts1), 0)
self.assertEqual(env.get_step_count(ts2), 1)
self.assertEqual(env.get_step_count(ts3), 2)
self.assertEqual(env.get_step_count(ts4), 0)
self.assertEqual(env.get_step_count(ts5), 1)
self.assertEqual(env.get_step_count(ts6), 2)
# Check global step count.
self.assertEqual(env.get_global_step_count(ts1), 0)
self.assertEqual(env.get_global_step_count(ts2), 1)
self.assertEqual(env.get_global_step_count(ts3), 2)
self.assertEqual(env.get_global_step_count(ts4), 3)
self.assertEqual(env.get_global_step_count(ts5), 4)
self.assertEqual(env.get_global_step_count(ts6), 5)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/testing_functions_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ActionSpace implementations."""
import re
from typing import Generator, Iterable, List, Optional, Tuple, Sequence, Union
from absl import logging
from dm_env import specs
from dm_robotics.agentflow import core
from dm_robotics.agentflow import spec_utils
import numpy as np
# Internal profiling
class _Deslicer(core.ActionSpace):
"""Creates a value by projecting the given value into a larger one."""
def __init__(
self,
name: str,
spec: core.Spec,
output_shape: Tuple[int], # 1-d
output_dtype: np.dtype,
mask: Sequence[bool],
default_value: Optional[np.floating] = None):
super().__init__()
self._name = name
self._input_spec = spec
self._output_shape = output_shape
self._output_dtype = output_dtype
self._mask = mask
if not np.any(mask):
logging.warning('Deslicer passed a mask with no valid indices to write'
'to.')
self._default = np.nan if default_value is None else default_value
@property
def name(self) -> str:
return self._name
def spec(self) -> core.Spec:
return self._input_spec
# Profiling for .wrap('Deslicer.project')
def project(self, action: np.ndarray) -> np.ndarray:
output = np.full(
shape=self._output_shape,
fill_value=self._default,
dtype=self._output_dtype)
if np.any(self._mask):
output[self._mask] = action
return output
def prefix_slicer(
spec: core.Spec,
prefix: str,
default_value: Optional[float] = None) -> core.ActionSpace[core.Spec]:
"""An ActionSpace for actions starting with prefix.
The spec name is split on tab, and it's expected that the number of names
from this split matches the shape of the spec, I.e. that each component has
a name.
The returned ActionSpace will project from actions with the prefix to the
given spec, inserting default_value (or NaN, if missing).
I.e, given a spec with seven components that start with 'robot_0' and
some that do not, this function will return an ActionSpace of size seven
that when projected will have the same size as the input spec did, but with
NaNs for all the components that don't start with 'robot_0'.
Args:
spec: The (primitive) action spec to 'slice'.
prefix: A regular expression for components to select. Note that we use
regular expression comparison, so you can use exclusion patterns too.
default_value: The default value used by the Desclicer.
Returns:
An ActionSpace.
Given a value that conforms to the (currently implicit) input spec,
return a value that conforms to the larger spec given to this function.
Raises:
ValueError: If a non-primitive spec is given or the names in the spec don't
split as expected.
"""
# Special case an empty input spec.
if np.prod(spec.shape) == 0:
return core.IdentityActionSpace(spec)
names = np.asarray(spec.name.split('\t'))
prefix_expr = re.compile(prefix)
indices: List[bool] = [
re.match(prefix_expr, name) is not None for name in names
]
if len(names) != spec.shape[0]:
raise ValueError('Expected {} names, got {}. Name: {}'.format(
spec.shape[0], len(names), names))
if isinstance(spec, specs.DiscreteArray):
raise ValueError('Support for DiscreteArray not implemented, yet.')
elif isinstance(spec, specs.BoundedArray):
input_spec = specs.BoundedArray(
shape=(np.count_nonzero(indices),),
dtype=spec.dtype,
minimum=spec.minimum[indices],
maximum=spec.maximum[indices],
name='\t'.join(names[indices]))
elif isinstance(spec, specs.Array):
input_spec = specs.Array(
shape=(np.count_nonzero(indices),),
dtype=spec.dtype,
name='\t'.join(names[indices]))
else:
raise ValueError('unknown spec type: {}'.format(type(spec)))
return _Deslicer(
name=prefix,
spec=input_spec,
output_shape=spec.shape,
output_dtype=spec.dtype,
mask=indices,
default_value=default_value)
class CastActionSpace(core.ActionSpace):
"""Casts actions to the appropriate dtype for the provided spec."""
def __init__(self,
spec: core.Spec,
ignore_nan: Optional[bool] = None,
name: str = 'cast'):
"""Initializes SequentialActionSpace.
Note: ShrinkToFitActionSpace also casts, so this should only be used if
scaling is not desired.
Args:
spec: Specification for value to cast to.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the shape of `value`, so that large arrays (e.g. images)
are not checked (for performance reasons).
name: A name for action space.
"""
if np.issubdtype(spec.dtype, np.integer):
logging.warning('Casting to %s will fail for NaN', spec.dtype)
self._spec = spec
self._ignore_nan = ignore_nan
self._name = name
@property
def name(self) -> str:
return self._name
def spec(self) -> core.Spec:
return self._spec
def project(self, action: np.ndarray) -> np.ndarray:
cast_action = action.astype(self._spec.dtype)
spec_utils.validate(self._spec, cast_action, self._ignore_nan)
return cast_action
class ShrinkToFitActionSpace(core.ActionSpace[specs.BoundedArray]):
"""Action space that scales an action if any component falls out of bounds."""
def __init__(self,
spec: specs.BoundedArray,
ignore_nan: Optional[bool] = None,
name: str = 'shrink_to_fit'):
"""Action space that scales the value towards zero to fit within spec.
This action-space also casts the value to the dtype of the provided spec.
Args:
spec: Specification for value to scale and clip.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the shape of `value`, so that large arrays (e.g. images)
are not checked (for performance reasons).
name: A name for action space.
"""
self._spec = spec
self._ignore_nan = ignore_nan
self._name = name
@property
def name(self) -> str:
return self._name
def spec(self) -> specs.BoundedArray:
return self._spec
def project(self, action: np.ndarray) -> np.ndarray:
return spec_utils.shrink_to_fit(
value=action, spec=self._spec, ignore_nan=self._ignore_nan)
class FixedActionSpace(core.ActionSpace):
"""Like a partial function application, for an action space."""
def __init__(self, action_space: core.ActionSpace, fixed_value: np.ndarray):
self._action_space = action_space
self._fixed_value = fixed_value
space_shape = action_space.spec().shape
value_shape = fixed_value.shape
if space_shape != value_shape:
raise ValueError('Shape mismatch. Spec: {} ({}), Value: {} ({})'.format(
action_space, space_shape, fixed_value, value_shape))
@property
def name(self) -> str:
return 'Fixed'
def spec(self) -> core.Spec:
return specs.BoundedArray((0,), np.float32, minimum=[], maximum=[], name='')
def project(self, action: np.ndarray) -> np.ndarray:
return self._action_space.project(self._fixed_value)
class SequentialActionSpace(core.ActionSpace):
"""Apply a sequence of ActionSpaces iteratively.
This allows users to compose action transformations, or apply a transformation
to a subset of a larger action space, e.g. one sliced out by `prefix_slicer`.
"""
def __init__(self,
action_spaces: Sequence[core.ActionSpace[core.Spec]],
name: Optional[str] = None):
"""Initialize SequentialActionSpace.
Args:
action_spaces: A sequence of action spaces to apply in order.
name: Optional name. Defaults to 0th action space name.
"""
self._action_spaces = action_spaces
self._name = name or action_spaces[0].name
@property
def name(self) -> str:
return self._name
def spec(self) -> core.Spec:
return self._action_spaces[0].spec()
def project(self, action: np.ndarray) -> np.ndarray:
"""Projects the action iteratively through the sequence."""
for action_space in self._action_spaces:
action = action_space.project(action)
return action
class CompositeActionSpace(core.ActionSpace[specs.BoundedArray]):
"""Composite Action Space consisting of other action spaces.
Assumptions (which are verified):
1. The input action spaces all `project` to the same shape space. I.e. the
output of project for each space has the same shape.
2. The input action spaces are all one dimensional.
3. The input action spaces are all ActionSpace[BoundedArray] instances.
Behaviour:
The outputs of the composed action spaces are merged not by concatenation -
they all output arrays of the same shape - but by using NaN as a sentinel, as
as missing value. An example merging:
input1: [1.0, 2.0, NaN, NaN, Nan]
input2: [NaN, NaN, NaN, 4.0, 5.0]
merged: [1.0, 2.0, NaN, 4.0, 5.0]
It is invalid if there is a position where both inputs have a non-NaN value.
"""
def __init__(self,
action_spaces: Iterable[core.ActionSpace[specs.BoundedArray]],
name: Optional[str] = None):
if not action_spaces:
logging.warning('CompositeActionSpace created with no action_spaces.')
sub_specs = [space.spec() for space in action_spaces]
for spec in sub_specs:
if not isinstance(spec, specs.BoundedArray):
raise ValueError('spec {} is not a BoundedArray, (type: {})'.format(
spec, type(spec)))
sizes = [spec.shape[0] for spec in sub_specs]
minimums = [spec.minimum for spec in sub_specs]
maximums = [spec.maximum for spec in sub_specs]
if sub_specs:
spec_dtype = np.find_common_type([spec.dtype for spec in sub_specs], [])
min_dtype = np.find_common_type([lim.dtype for lim in minimums], [])
max_dtype = np.find_common_type([lim.dtype for lim in maximums], [])
minimum = np.concatenate(minimums).astype(min_dtype)
maximum = np.concatenate(maximums).astype(max_dtype)
else:
# No input spaces; we have to default the data type.
spec_dtype = np.float32
minimum = np.asarray([], dtype=spec_dtype)
maximum = np.asarray([], dtype=spec_dtype)
self._component_action_spaces = action_spaces
self._composite_action_spec = specs.BoundedArray(
shape=(sum(sizes),),
dtype=spec_dtype,
minimum=minimum,
maximum=maximum,
name='\t'.join([spec.name for spec in sub_specs if spec.name]))
self._name = name or '_'.join(
[space.name for space in action_spaces if space.name])
@property
def name(self) -> str:
return self._name
def spec(self) -> specs.BoundedArray:
return self._composite_action_spec
# Profiling for .wrap('CompositeActionSpace.project')
def project(self, action: np.ndarray) -> np.ndarray:
if not self._component_action_spaces:
return np.asarray([], dtype=self.spec().dtype)
# Check input value has correct shape (and legal values).
spec_utils.validate(self._composite_action_spec, action, ignore_nan=True)
cur_action = None # type: np.ndarray
for action_space, action_component in zip(self._component_action_spaces,
self._action_components(action)):
projection = action_space.project(action_component)
if cur_action is None:
cur_action = np.full(
projection.shape, fill_value=np.nan, dtype=projection.dtype)
elif not np.all(cur_action.shape == projection.shape):
raise ValueError(f'Projected actions differ in shape! cur_action: '
f'{cur_action.shape}, projection: {projection.shape}')
cur_empty_indices = np.isnan(cur_action)
proj_empty_indices = np.isnan(projection)
assert np.all(
np.logical_or(proj_empty_indices, cur_empty_indices)
), 'The projection and current action empty indices do not align'
proj_valid_indices = np.logical_not(proj_empty_indices)
cur_action[proj_valid_indices] = projection[proj_valid_indices]
assert cur_action is not None, 'Program error, no action created!'
return cur_action
def _action_components(
self, action: np.ndarray) -> Generator[np.ndarray, None, None]:
start_index = 0
for action_space in self._component_action_spaces:
input_length = action_space.spec().shape[0]
end_index = start_index + input_length
assert end_index <= self._composite_action_spec.shape[0]
action_component = action[start_index:end_index]
start_index = end_index
yield action_component
assert start_index == self._composite_action_spec.shape[0]
def constrained_action_spec(minimum: Union[float, Sequence[float]],
maximum: Union[float, Sequence[float]],
base: specs.BoundedArray) -> specs.BoundedArray:
"""Returns action spec bounds constrained by the given minimum and maximum.
Args:
minimum: The new minimum spec constraint.
maximum: The new maximum spec constraint.
base: The base action space spec.
"""
minimum = np.array(minimum, dtype=base.dtype)
if minimum.shape != base.minimum.shape:
raise ValueError('minimum not compatible with base shape')
minimum = np.maximum(base.minimum, minimum)
maximum = np.array(maximum, dtype=base.dtype)
if maximum.shape != base.maximum.shape:
raise ValueError('maximum not compatible with base shape')
maximum = np.minimum(base.maximum, maximum)
# Check that mins and maxs are non intersecting.
if np.any(minimum > maximum):
raise ValueError('minimum and maximum bounds intersect')
return specs.BoundedArray(
shape=base.shape,
dtype=base.dtype,
minimum=minimum,
maximum=maximum,
name=base.name)
def constrained_action_space(
minimum: Union[float, Sequence[float]],
maximum: Union[float, Sequence[float]],
base: core.ActionSpace[specs.BoundedArray],
name: Optional[str] = None) -> core.ActionSpace[specs.BoundedArray]:
"""Returns an action space that is a constrained version of the base space."""
spec = constrained_action_spec(minimum, maximum, base.spec())
constrained_space = core.IdentityActionSpace(spec)
return SequentialActionSpace([constrained_space, base], name)
| dm_robotics-main | py/agentflow/action_spaces.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for action_spaces."""
from typing import Text
from absl import flags
from absl.testing import absltest
from dm_robotics.agentflow import action_spaces
from dm_robotics.agentflow import testing_functions
import numpy as np
FLAGS = flags.FLAGS
class CompositeActionSpaceTest(absltest.TestCase):
def _create_spec(self, *names: Text):
name = "\t".join(names)
return testing_functions.random_array_spec(shape=(len(names),), name=name)
def test_single_action_space(self):
outer_spec = self._create_spec("a_1", "a_2", "b_1")
primitive = action_spaces.prefix_slicer(outer_spec, "a_")
composite = action_spaces.CompositeActionSpace([primitive])
self.assertEqual(primitive.spec(), composite.spec())
value = testing_functions.valid_value(primitive.spec())
np.testing.assert_array_almost_equal(
primitive.project(value), composite.project(value))
def test_adjacent_action_spaces_full(self):
# Testing two action spaces that project to values that are adjacent in
# the output value and that cover the entire output value
outer_spec = self._create_spec("a_1", "a_2", "b_1", "b_2")
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a_")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "b_")
composite = action_spaces.CompositeActionSpace([primitive_1, primitive_2])
self.assertEqual(outer_spec, composite.spec())
# The composite spec is the two sub specs in order, so project should be
# an identity - the output should be the input.
value = testing_functions.valid_value(composite.spec())
np.testing.assert_array_almost_equal(composite.project(value), value)
def test_adjacent_action_spaces_full_reverse(self):
# As above, but the primitive action spaces are in reverse order, so the
# composite spec should not match and should rearrange values in project.
outer_spec = self._create_spec("a_1", "a_2", "b_1", "b_2")
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a_")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "b_")
composite = action_spaces.CompositeActionSpace([primitive_2, primitive_1])
self.assertEqual(outer_spec.name, "\t".join(["a_1", "a_2", "b_1", "b_2"]))
self.assertEqual(composite.spec().name,
"\t".join(["b_1", "b_2", "a_1", "a_2"]))
input_value = testing_functions.valid_value(composite.spec())
expected_output_value = np.concatenate([input_value[2:], input_value[:2]])
np.testing.assert_array_almost_equal(
composite.project(input_value), expected_output_value)
def test_adjacent_action_spaces_partial(self):
# Testing two action spaces that project to values that are adjacent in
# the output value but do not cover the entire output value
outer_spec = self._create_spec("a_1", "a_2", "b_1", "b_2", "c_1")
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a_")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "b_")
composite = action_spaces.CompositeActionSpace([primitive_1, primitive_2])
input_value = testing_functions.valid_value(composite.spec())
expected_output_value = np.concatenate([input_value,
np.asarray([np.nan])
]).astype(input_value.dtype)
np.testing.assert_array_almost_equal(
composite.project(input_value), expected_output_value)
def test_separated_action_spaces(self):
# like test_adjacent_action_spaces_partial, but the gap is in the middle.
outer_spec = self._create_spec("a_1", "a_2", "c_1", "b_1", "b_2")
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a_")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "b_")
composite = action_spaces.CompositeActionSpace([primitive_1, primitive_2])
input_value = testing_functions.valid_value(composite.spec())
expected_output_value = np.concatenate(
[input_value[:2], [np.nan], input_value[2:]]).astype(input_value.dtype)
np.testing.assert_array_almost_equal(
composite.project(input_value), expected_output_value)
def test_composite_action_spaces(self):
# Compose composite action spaces.
name = "\t".join(["a1", "a2", "b1", "b2", "c1", "c2"] +
["d1", "d2", "e1", "e2", "f1", "f2"])
outer_spec = testing_functions.random_array_spec(shape=(12,), name=name)
# Make specs for [a, c, d, f], I.e. b and e are missing.
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "c")
primitive_3 = action_spaces.prefix_slicer(outer_spec, "d")
primitive_4 = action_spaces.prefix_slicer(outer_spec, "f")
# Make specs for [a, c] and [d, f]
composite_1 = action_spaces.CompositeActionSpace([primitive_1, primitive_2])
composite_2 = action_spaces.CompositeActionSpace([primitive_3, primitive_4])
composite = action_spaces.CompositeActionSpace([composite_1, composite_2])
input_value = testing_functions.valid_value(composite.spec())
two_nans = [np.nan, np.nan]
expected_1 = [input_value[0:2], two_nans, input_value[2:4]]
expected_2 = [input_value[4:6], two_nans, input_value[6:8]]
expected_output_value = np.concatenate(expected_1 + expected_2)
expected_output_value = expected_output_value.astype(input_value.dtype)
np.testing.assert_array_almost_equal(
composite.project(input_value), expected_output_value)
def test_zero_action_spaces(self):
composite = action_spaces.CompositeActionSpace([])
self.assertEqual(composite.spec().shape, (0,))
composite.spec().validate(np.asarray([], dtype=np.float32))
def test_zero_sized_space(self):
# Testing two action spaces that project to values that are adjacent in
# the output value and that cover the entire output value
outer_spec = self._create_spec("a_1", "a_2", "b_1", "b_2")
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a_")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "EMPTY")
primitive_3 = action_spaces.prefix_slicer(outer_spec, "b_")
composite = action_spaces.CompositeActionSpace(
[primitive_1, primitive_2, primitive_3])
self.assertEqual(outer_spec, composite.spec())
self.assertEqual(outer_spec.name, composite.spec().name)
# The composite spec is the two sub specs in order, so project should be
# an identity - the output should be the input.
value = testing_functions.valid_value(composite.spec())
np.testing.assert_array_almost_equal(composite.project(value), value)
def test_with_fixed_space(self):
# Testing two action spaces that project to values that are adjacent in
# the output value and that cover the entire output value
outer_spec = self._create_spec("a_1", "a_2", "b_1", "b_2")
primitive_1 = action_spaces.prefix_slicer(outer_spec, "a_")
primitive_2 = action_spaces.prefix_slicer(outer_spec, "b_")
value_2 = testing_functions.valid_value(primitive_2.spec())
fixed_2 = action_spaces.FixedActionSpace(primitive_2, value_2)
composite = action_spaces.CompositeActionSpace([primitive_1, fixed_2])
self.assertEqual(primitive_1.spec(), composite.spec())
input_value = testing_functions.valid_value(composite.spec())
output_value = composite.project(input_value)
np.testing.assert_array_almost_equal(output_value[:2], input_value)
np.testing.assert_array_almost_equal(output_value[2:], value_2)
if __name__ == "__main__":
absltest.main()
| dm_robotics-main | py/agentflow/composite_action_space_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.action_spaces."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import specs
from dm_robotics.agentflow import action_spaces
from dm_robotics.agentflow import core
from dm_robotics.agentflow import testing_functions
import numpy as np
class ActionSpacesTest(parameterized.TestCase):
def test_constrained_action_spec(self):
spec = specs.BoundedArray(
shape=(2,), dtype=float, minimum=[-50.0, 0.0], maximum=[50.0, 100.0])
space = core.IdentityActionSpace(spec)
constrained_spec = action_spaces.constrained_action_spec(
minimum=[-10.0, 20.0], maximum=[40.0, 90.0], base=spec)
constrained_space = core.IdentityActionSpace(constrained_spec)
good_base_input = np.asarray([0.0, 10.0])
np.testing.assert_almost_equal(
space.project(good_base_input), good_base_input)
# This action is within the new min/max bounds, should pass.
good_smaller_input = np.asarray([0.0, 25.0])
np.testing.assert_almost_equal(
constrained_space.project(good_smaller_input), good_smaller_input)
# The original action that passed the base space should fail in the smaller
# action space.
with self.assertRaises(ValueError):
constrained_space.project(good_base_input)
# Check handling of scalar min/max
spec = specs.BoundedArray(
shape=(3,), dtype=float, minimum=-50.0, maximum=50.0)
constrained_spec = action_spaces.constrained_action_spec(
minimum=-10.0, maximum=40.0, base=spec)
constrained_space = core.IdentityActionSpace(constrained_spec)
good_constrained_input = np.asarray([0.0] * 3)
np.testing.assert_almost_equal(
constrained_space.project(good_constrained_input),
good_constrained_input)
bad_constrained_input = np.asarray([90.0] * 3)
with self.assertRaises(ValueError):
constrained_space.project(bad_constrained_input)
def test_constrained_action_space(self):
spec = specs.BoundedArray(
shape=(2,), dtype=float, minimum=[-50.0, 0.0], maximum=[50.0, 100.0])
space = core.IdentityActionSpace(spec)
constrained_space = action_spaces.constrained_action_space(
minimum=[-10.0, 20.0], maximum=[40.0, 90.0], base=space)
good_base_input = np.asarray([0.0, 10.0])
np.testing.assert_almost_equal(
space.project(good_base_input), good_base_input)
# This action is within the new min/max bounds, should pass.
good_smaller_input = np.asarray([0.0, 25.0])
np.testing.assert_almost_equal(
constrained_space.project(good_smaller_input), good_smaller_input)
# The original action that passed the base space should fail in the smaller
# action space.
with self.assertRaises(ValueError):
constrained_space.project(good_base_input)
def test_simple_fixed_action_space(self):
base = specs.Array(shape=(2,), dtype=np.float32, name='a1\ta2')
base_space = action_spaces.prefix_slicer(base, 'a')
fixed_spec = action_spaces.FixedActionSpace(
base_space, np.asarray([1, 2], dtype=np.float32))
self.assertEqual(base_space.spec().shape, (2,))
self.assertEqual(fixed_spec.spec().shape, (0,))
np.testing.assert_almost_equal(
fixed_spec.project(np.asarray([], np.float32)),
np.asarray([1, 2], dtype=np.float32))
self.assertIsNotNone(fixed_spec.spec().name)
def test_exclusion_slicer(self):
base = specs.Array(shape=(4,), dtype=np.float32,
name='a1\ta2\texclude_action1\texclude_action2')
base_space = action_spaces.prefix_slicer(base,
'^(?!exclude)[[a-zA-Z0-9-_.]+$')
fixed_spec = action_spaces.FixedActionSpace(
base_space, np.asarray([1, 2], dtype=np.float32))
self.assertEqual(base_space.spec().shape, (2,))
self.assertEqual(fixed_spec.spec().shape, (0,))
np.testing.assert_almost_equal(
fixed_spec.project(np.asarray([], np.float32)),
np.asarray([1, 2, np.nan, np.nan], dtype=np.float32))
self.assertIsNotNone(fixed_spec.spec().name)
def test_shrink_to_fit_action_space(self):
# Corresponds to `spec_utils_test.test_primitive`.
spec = specs.BoundedArray(
shape=(3,),
dtype=float,
minimum=[0.0, 0.0, 0.0],
maximum=[20.0, 100.0, 20.0])
action_space = action_spaces.ShrinkToFitActionSpace(spec)
val1 = np.asarray([21.0, 5.0, 21.0]) # over-max, under-min, over-max
factor1 = 20.0 / 21.0
expected1 = np.asarray([20.0, 5.0 * factor1, 20.0])
testing_functions.assert_value(action_space.project(val1), expected1)
val2 = np.asarray([1.0, 200.0, 21.0]) # ok, over-max, over-max
expected2 = np.asarray([0.5, 100.0, 10.5])
testing_functions.assert_value(action_space.project(val2), expected2)
def test_identity_action_space_output(self):
spec = specs.BoundedArray(
shape=(2,), dtype=float, minimum=[-50.0, 0.0], maximum=[50.0, 100.0])
space = core.IdentityActionSpace(spec)
good_input = np.asarray([0.0, 10.0])
bad_input = np.asarray([0.0, 110.0])
np.testing.assert_almost_equal(space.project(good_input), good_input)
try:
space.project(bad_input)
self.fail('Should fail validation')
except ValueError as expected:
del expected
def test_cast_action_space_output(self):
spec = specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=[-1.0, -2.0], maximum=[1.0, 2.0])
# Should pass validation if action has NaN and ignore_nan is True.
space = action_spaces.CastActionSpace(spec, ignore_nan=True)
_ = space.project(np.asarray([0.0, np.nan]))
# Should raise an exception if action has NaN and ignore_nan is False.
space = action_spaces.CastActionSpace(spec, ignore_nan=False)
with self.assertRaises(ValueError):
space.project(np.asarray([0.0, np.nan]))
# Should raise an exception if action has wrong shape.
with self.assertRaises(ValueError):
space.project(np.asarray([0.0, 0.0, 0.0]))
# Should raise an exception if action is out of bounds.
with self.assertRaises(ValueError):
space.project(np.asarray([0.0, 3.0]))
# Should cast a float64 to float32 and pass validation.
good_input = np.asarray([0.0, 1.0], dtype=np.float64)
expected_result = np.asarray([0.0, 1.0], dtype=np.float32)
actual_result = space.project(good_input)
np.testing.assert_array_almost_equal(expected_result, actual_result)
self.assertEqual(expected_result.dtype, actual_result.dtype)
@parameterized.parameters(
specs.Array(shape=(3,), dtype=np.float32, name='a11\ta12\ta2'),
specs.BoundedArray(shape=(3,), dtype=np.float32, name='a11\ta12\ta2',
minimum=[-1., -2., -3.], maximum=[1., 2., 3.]),
)
def test_sequential_action_space(self, base_spec):
base_space = action_spaces.prefix_slicer(base_spec, 'a')
subspace1 = action_spaces.prefix_slicer(base_space.spec(), 'a1')
subspace2 = action_spaces.prefix_slicer(subspace1.spec(), 'a12')
sequential_spec = action_spaces.SequentialActionSpace(
[subspace2, subspace1, base_space], 'Sequential space')
self.assertEqual(base_space.spec().shape, (3,))
self.assertEqual(sequential_spec.spec().shape, subspace2.spec().shape)
expected_result = np.asarray([np.nan, 3., np.nan], dtype=base_spec.dtype)
np.testing.assert_almost_equal(
sequential_spec.project(np.asarray([3.], np.float32)),
expected_result)
self.assertIsNotNone(sequential_spec.spec().name)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/action_spaces_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Decorators for adding dynamic tuning of object properties.
```python
# Decorate with `register_class_properties` to allow registering tunable
# properties.
@register_class_properties
class Foo(object)
def __init__(self, ...):
# Call `register_dynamic_object` during init to tell registry that this
# object has tunables.
register_dynamic_object(self)
@property
@register_property Label this property as a tunable getter.
def some_property(self):
return self._some_property
@some_property.setter
@register_property Label this property as a tunable setter.
def some_property(self, val):
self._some_property = val
```
Follow this pattern for all objects you wish to expose for tuning.
----
To tune properties on all decorated classes add the following to your app main:
```python
gui = property_editor.PropertyEditor(poll_freq=1.)
gui.run()
```
"""
import inspect
from typing import Any, Dict, List, Text
import weakref
registered_properties = {} # type: Dict[type, Dict[Text, 'PropertyType']]
registered_objects = {} # type: Dict[type, 'DynamicObjects']
def overrides(interface):
"""Overrides decorator to annotate method overrides parent's."""
def overrider(method):
if not hasattr(interface, method.__name__):
raise Exception(
'method %s declared to be @override is not defined in %s' %
(method.__name__, interface.__name__))
return method
return overrider
class PropertyType(object):
"""Whether a property has a getter and setter."""
def __init__(self,
getter: bool = False,
setter: bool = False):
self.getter = getter
self.setter = setter
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'PropertyType(setter={}, getter={})'.format(
self.setter, self.getter)
class DynamicObjects(object):
"""A container for a list of objects.
DynamicObjects stores weak-references to objects to allow the cache to reflect
only the currently existing objects whose __init__ was decorated with
`regregister_dynamic_object`.
"""
def __init__(self):
self._object_refs = []
def add_object(self, obj):
self._object_refs.append(weakref.ref(obj))
def get_objects(self) -> List[Any]:
all_objs = [obj_ref() for obj_ref in self._object_refs]
return [obj for obj in all_objs if obj is not None]
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'DynamicObjects(objects={})'.format(self.get_objects())
def register_class_properties(leaf_cls):
"""Decorate a class with this in order to use @register_property.
This decorator will look for properties (setter or getter) on the provided
class and any of its parent classes that are decorated with
`register_property` and register them under this class.
Args:
leaf_cls: A class of some type.
Returns:
The class.
"""
classes_to_add = (leaf_cls,) + leaf_cls.__bases__
for cls in classes_to_add:
for name, method in cls.__dict__.items():
# Add property to registry as getter.
if (hasattr(method, 'fget') and
hasattr(method.fget, 'register_getter')) or hasattr(
method, 'register_getter'):
registered_properties.setdefault(leaf_cls, {})
registered_properties.get(leaf_cls).setdefault(name, PropertyType()) # pytype: disable=attribute-error
registered_properties.get(leaf_cls).get(name).getter = True # pytype: disable=attribute-error
# Add property to registry as setter.
if (hasattr(method, 'fset') and
hasattr(method.fset, 'register_setter')) or hasattr(
method, 'register_setter'):
registered_properties.setdefault(leaf_cls, {})
registered_properties.get(leaf_cls).setdefault(name, PropertyType()) # pytype: disable=attribute-error
registered_properties.get(leaf_cls).get(name).setter = True # pytype: disable=attribute-error
return leaf_cls
def register_property(func):
"""Adds a property to registered_properties. must appear after @property."""
if isinstance(func, property):
raise AssertionError('@register_property must be after @property')
argspec = inspect.getfullargspec(func) # pytype: disable=wrong-arg-types
if len(argspec.args) == 1:
func.register_getter = True
elif len(argspec.args) == 2:
func.register_setter = True
return func
def register_dynamic_object(obj):
"""Stores the provided object in the registry of dynamic objects.
This function should be called during __init__ on all objects that utilize
`register_property` to expose tunable properties.
Args:
obj: The `self` argument to the object to register.
"""
# Add obj reference to registered objects.
cls = obj.__class__
registered_objects.setdefault(cls, DynamicObjects())
registered_objs = registered_objects.get(cls)
assert registered_objs is not None
registered_objs.add_object(obj)
| dm_robotics-main | py/agentflow/decorators.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Core option implementations. See class comments for details."""
import abc
import copy
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple
from absl import logging
import dm_env
from dm_env import specs
from dm_robotics.agentflow import core
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow.decorators import overrides
import numpy as np
OptionPterms = Iterable[Tuple[core.Option, float]]
def any_terminates(pterms: OptionPterms) -> float:
"""Get the probability of any option terminating from options P(terminate).
P(continue) for each option is calculated as `1.0 - pterm`.
The P(continue) values are multiplied together and then a P(terminate) is
returned as the remaining probability: `1.0 - P(continue)`
Args:
pterms: Iterable of (option, P(terminate)).
Returns:
Probability of any option terminating.
"""
# Calculate pcont from pterm.
pconts = [(opt, 1.0 - pterm) for opt, pterm in pterms]
# Log which options may cause termination (assumes binary pterms).
if logging.level_debug():
for option, pcont in pconts:
if pcont < 0.5:
logging.debug('Option %s P(cont): %f', option.name, pcont)
pcont_product = np.prod([pcont for _, pcont in pconts])
return 1 - pcont_product
def all_terminate(pterms: OptionPterms) -> float:
"""Returns the probability of all options terminating.
Args:
pterms: Iterable of (option, P(terminate))
Returns:
The product of termination probabilities.
"""
return np.prod([pterm for _, pterm in pterms])
def options_terminate(*options: core.Option) -> Callable[[OptionPterms], float]:
"""Returns a callable that gives a termination probability.
The returned callable calculates the probability of all the given options
terminating.
This can be useful when creating a `ConcurrentOption` when you want more
control over the termination of that option. `any_terminates` and
`all_terminate` operate on *all* the options in the ConcurrentOption, this
function can be used to terminate when *specific* options terminate, E.g.
```
af.ConcurrentOption(
option_list=[move_left, move_right, hold_others],
termination=options_terminate(move_left, move_right))
```
In this case, the option will terminate when both move_left and move_right
want to terminate (their P(termination) values are combined), regardless of
`hold_others.pterm()`.
Args:
*options: The options to combine the P(termination) values of.
"""
def pterm(pterms: OptionPterms) -> float:
"""Returns product of input P(terminate) values."""
return np.prod([pterm for option, pterm in pterms if option in options])
return pterm
class FixedOp(core.Option):
"""A fixed action, that always succeeds and is valid everywhere.
The action it returns is fixed, whatever the input timestep is. Used when
building tasks that require an action placeholder (not in the tf-sense).
"""
def __init__(self,
action: np.ndarray,
num_steps: Optional[int] = 0,
name: Optional[str] = None):
"""Initialized FixedOp.
Args:
action: The action to return.
num_steps: The number of steps to run before requesting termination. If
None, pterm is always zero.
name: A name for this Option.
"""
super().__init__(name=name)
self._action = action
self._num_steps = num_steps
self._steps_remaining = num_steps or 0
@overrides(core.Option)
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
if timestep.first():
self._steps_remaining = self._num_steps or 0
self._steps_remaining -= 1
return self._action
def pterm(self, timestep) -> float:
return 0.0 if (self._num_steps is None) else (self._steps_remaining <= 0)
def set_action(self, action: np.ndarray):
self._action = action
class RandomOption(core.Option):
"""An option that generates uniform random actions for a provided spec.
The pterm of this option is always zero.
"""
def __init__(self,
action_spec: specs.BoundedArray,
random_state: Optional[np.random.RandomState] = None,
name: Optional[str] = None):
"""Initializer.
Args:
action_spec: Expected output action specification.
random_state: Pseudo RNG state - optional.
name: A name for this Option.
"""
super().__init__(name=name)
self._action_spec = action_spec
self._random_state = random_state or np.random.RandomState()
@overrides(core.Option)
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
del timestep
return self._random_state.uniform(
low=self._action_spec.minimum,
high=self._action_spec.maximum,
size=self._action_spec.shape).astype(self._action_spec.dtype)
def pterm(self, timestep) -> float:
del timestep
return 0.
class ConcurrentOption(core.Option):
"""Container-option which steps multiple options in order on each step.
Use to implement concurrent behaviors or sequential processing pipelines,
e.g. injecting perceptual features and acting on them.
On each step() of the ConcurrentOption, all the options within the
ConcurrentOption are stepped once. Those options are stepped in the order
that they are passed to `__init__` so one can depend on the output of another.
Merging behaviour:
* Actions: The actions emitted by this option come from merging the output of
its constituent options. see `spec_utils.merge_primitives`
* Option arguments: This option creates an argument spec from the argument
specs of the options it's created from. On each step the arguments (if any)
for each sub-option are separated and passed to that option.
* Result: Termination reasons are 'escalated' so if any option returns a
termination type of FAILURE, the whole option returns this termination type.
The option's result values are combined in a list.
"""
def __init__(self,
options_list: Sequence[core.Option],
action_spec: specs.Array,
name: Optional[str] = None,
termination: Optional[Callable[[OptionPterms], float]] = None,
allow_nan_actions: bool = False):
"""ConcurrentOption constructor.
Args:
options_list: The options to run. They're stepped in this order.
action_spec: Expected output action specification.
name: Name of the option.
termination: Configures how the option terminates, `any_terminates` is the
default, which means that if any sub-option terminates this option will
terminate.
allow_nan_actions: Whether this option can emit actions with NaNs or not.
If this is False, the options within this option can still emit NaN
values, as long as there are no NaNs after those actions are combined
with `spec_utils.merge_primitives`.
Raises:
ValueError: If no options are given.
"""
if not options_list:
raise ValueError('options_list should have non-zero length')
super().__init__(name=name)
self._options_list = options_list
self._action_spec = action_spec
self._termination = termination or any_terminates
self._ignore_nans = allow_nan_actions
self._arg_spec = None # type: core.ArgSpec
self._child_args = {} # type: Dict[int, core.Arg]
@property
def options_list(self):
return self._options_list
def child_policies(self):
return self.options_list
@overrides(core.Option)
def arg_spec(self) -> core.ArgSpec:
"""Returns an argument specification for the option.
Returns:
The arg specs for each child/sub option, merged into one spec.
"""
if self._arg_spec:
return self._arg_spec
child_specs = []
for child in self._options_list:
child_spec = child.arg_spec()
if child_spec is not None:
if not isinstance(child_spec, specs.BoundedArray):
child_spec = specs.BoundedArray(
shape=child_spec.shape,
dtype=child_spec.dtype,
minimum=np.ones_like(child_spec.generate_value()) * -np.inf,
maximum=np.ones_like(child_spec.generate_value()) * np.inf)
child_specs.append(child_spec)
self._arg_spec = spec_utils.merge_specs(child_specs)
return self._arg_spec
def _get_child_arg(self, timestep: dm_env.TimeStep,
child_index: int) -> Optional[core.Arg]:
if child_index in self._child_args:
return self._child_args[child_index]
child_args = timestep.observation.get(self.arg_key, None)
if child_args is None:
return None
target_spec = self._options_list[child_index].arg_spec()
if target_spec is None:
return None
start_idx = 0
for i in range(child_index):
child_spec = self._options_list[i].arg_spec()
if child_spec is not None:
start_idx += child_spec.shape[0]
end_idx = start_idx + target_spec.shape[0]
self._child_args[child_index] = child_args[start_idx:end_idx]
return self._child_args[child_index]
def _get_child_timestep(self, timestep: dm_env.TimeStep,
child_index: int) -> dm_env.TimeStep:
child_arg = self._get_child_arg(timestep, child_index)
if child_arg is None:
return timestep
child = self._options_list[child_index]
child_observation = copy.copy(timestep.observation)
child_observation[child.arg_key] = child_arg
return timestep._replace(observation=child_observation)
@overrides(core.Policy)
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
actions = self._get_child_actions(timestep)
output_action = spec_utils.merge_primitives(actions)
spec_utils.validate(
self._action_spec, output_action, ignore_nan=self._ignore_nans)
return output_action
def _get_child_actions(self, timestep: dm_env.TimeStep) -> List[np.ndarray]:
actions = [] # type: List[np.ndarray]
for index, opt in enumerate(self._options_list):
actions.append(opt.step(self._get_child_timestep(timestep, index)))
return actions
@overrides(core.Option)
def on_selected(
self,
timestep: dm_env.TimeStep,
prev_option_result: Optional[core.OptionResult] = None) -> None:
for index, opt in enumerate(self._options_list):
opt_timestep = self._get_child_timestep(timestep, index)
opt.on_selected(opt_timestep, prev_option_result)
@overrides(core.Option)
def pterm(self, timestep: dm_env.TimeStep) -> float:
"""Calculate pterm from the termination condition."""
pterms = {} # type: Dict[core.Option, float]
for index, opt in enumerate(self._options_list):
opt_timestep = self._get_child_timestep(timestep, index)
pterms[opt] = opt.pterm(opt_timestep)
return self._termination(pterms.items())
@overrides(core.Option)
def result(self, timestep: dm_env.TimeStep) -> core.OptionResult:
"""Returns result and termination reason.
The returned termination_reason is the max of the termination_reason of the
options list. This model assumes that termination_reason is coded in order
of increasing priority, and takes advantage that `None` (the default when
not terminating) evaluates to min.
Args:
timestep: input timestep.
"""
termination_reason = None
result_data = []
for index, opt in enumerate(self._options_list):
opt_timestep = self._get_child_timestep(timestep, index)
result = opt.result(opt_timestep)
result_data.append(result.data)
if termination_reason is None:
termination_reason = result.termination_reason
else:
termination_reason = max(termination_reason, result.termination_reason)
return core.OptionResult(termination_reason, result_data)
@overrides(core.Option)
def render_frame(self, canvas) -> None:
for opt in self._options_list:
opt.render_frame(canvas)
class PolicyAdapter(core.Policy):
"""A policy that delegates `step` to a given object.
Used to up-class an arbitrary agent-like object to be usable as a `Policy`.
"""
def __init__(self, delegate: Any):
super().__init__()
self._delegate = delegate
def child_policies(self) -> Iterable[core.Policy]:
return [self._delegate]
def step(self, timestep: dm_env.TimeStep):
return self._delegate.step(timestep)
def render_frame(self, canvas) -> None:
# Pass-through `render_frame` call if available
if callable(getattr(self._delegate, 'render_frame', None)):
self._delegate.render_frame(canvas)
class OptionAdapter(core.Option):
"""An Option that delegates `step` to a given object.
Used to up-class an arbitrary agent-like object to be usable as an `Option`.
Note that this Option will never terminate.
"""
def __init__(self, delegate: Any):
super().__init__()
self._delegate = delegate
def child_policies(self) -> Iterable[core.Policy]:
return [self._delegate]
def step(self, timestep: dm_env.TimeStep):
return self._delegate.step(timestep)
def render_frame(self, canvas) -> None:
# Pass-through `render_frame` call if available
if callable(getattr(self._delegate, 'render_frame', None)):
self._delegate.render_frame(canvas)
class DelegateOption(core.Option, abc.ABC):
"""An Option that delegates all methods to a given option."""
def __init__(self, delegate: core.Option, name: Optional[str] = None):
super().__init__(name=name)
self._delegate = delegate # subclasses may overwrite, e.g. Cond.
self._name = name # Overwrite; delegates if None.
@property
def name(self):
return self._name or self._delegate.name
def child_policies(self) -> Iterable[core.Policy]:
return [self._delegate]
@property
def key_prefix(self) -> str:
return self._delegate.key_prefix
@property
def arg_key(self) -> str:
return self._delegate.arg_key
def arg_spec(self) -> Optional[core.ArgSpec]:
return self._delegate.arg_spec()
def on_selected(self, timestep, prev_option_result=None):
self._delegate.on_selected(timestep, prev_option_result)
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
return self._delegate.step(timestep)
def pterm(self, timestep: dm_env.TimeStep) -> float:
return self._delegate.pterm(timestep)
def result(self, unused_timestep: dm_env.TimeStep) -> core.OptionResult:
return self._delegate.result(unused_timestep)
def render_frame(self, canvas):
self._delegate.render_frame(canvas)
@property
def delegate(self) -> core.Option:
return self._delegate
@delegate.setter
def delegate(self, delegate: core.Option):
self._delegate = delegate
def __str__(self):
return f'DelegateOption({str(self._delegate)})'
def __repr__(self):
return f'DelegateOption({repr(self._delegate)})'
class LambdaOption(DelegateOption):
"""An option which can wrap another option and invoke various callables.
The user can specify callables to be invoked at any of the following times:
1) When `on_selected` is called.
2) On every step()
3) On `pterm`, to override the termination signal from the delegate.
The value returned by `on_selected_func` can be inserted as the data in the
`OptionResult` returned from the delegate by setting `func_as_result`.
If the returned value is None, a warning is emitted.
The action itself is delegated to the wrapped option, along with `pterm` and
`result` if not explicitly overridden by the appropriate callables.
"""
def __init__(self,
delegate: core.Option,
func_as_result: bool = False,
on_selected_func: Optional[Callable[
[dm_env.TimeStep, Optional[core.OptionResult]], Any]] = None,
on_step_func: Optional[Callable[[dm_env.TimeStep], Any]] = None,
pterm_func: Optional[Callable[[dm_env.TimeStep], float]] = None,
name: Optional[str] = None,
**kwargs) -> None:
"""Construct LambdaOption.
Args:
delegate: An option to delegate option behavior to.
func_as_result: If True, pack the output of `on_selected_func` in the
OptionResult.
on_selected_func: A callable to invoke when the option is selected.
on_step_func: A callable to invoke when the option is stepped.
pterm_func: Optional function which overrides the pterm of the delegate.
name: Name of the option.
**kwargs: Unused keyword arguments.
"""
super().__init__(delegate=delegate, name=name)
if on_selected_func is not None:
assert callable(on_selected_func)
if on_step_func is not None:
assert callable(on_step_func)
if pterm_func is not None:
assert callable(pterm_func)
self._on_selected_func = on_selected_func
self._on_step_func = on_step_func
self._pterm_func = pterm_func
self._func_as_result = func_as_result
self._func_output = None # type: Any
@overrides(core.Option)
def on_selected(
self,
timestep: dm_env.TimeStep,
prev_option_result: Optional[core.OptionResult] = None) -> None:
if self._on_selected_func is not None:
# Store result to return from `result`.
self._func_output = self._on_selected_func(timestep, prev_option_result)
return self._delegate.on_selected(timestep, prev_option_result)
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
if self._on_step_func is not None:
self._on_step_func(timestep)
return super().step(timestep)
@overrides(core.Option)
def result(self, timestep: dm_env.TimeStep) -> Optional[core.OptionResult]:
delegate_result = super().result(timestep)
if self._func_as_result:
if delegate_result is not None and delegate_result.data is not None:
logging.warning('Discarding delegate option result: %s',
delegate_result)
if isinstance(self._func_output, core.OptionResult):
# This might happen in a refactoring, and is probably not the desired
# behaviour, because it's wrapped it in another OptionResult.
logging.warning('Result is OptionResult - will re-wrap it.')
# Pack result into an OptionResult.
return core.OptionResult(
termination_reason=core.TerminationType.SUCCESS,
data=self._func_output)
else:
return delegate_result
@overrides(core.Option)
def pterm(self, timestep: dm_env.TimeStep) -> float:
if self._pterm_func is not None:
return self._pterm_func(timestep)
else:
return super().pterm(timestep)
class PadOption(DelegateOption):
"""An Option that applies an `ActionSpace` to another option.
This can be used to convert an action for part of the environment (e.g. the
gripper) into an action for the whole environment (e.g. arm and gripper).
"""
def __init__(self, delegate: core.Option, action_space: core.ActionSpace,
**kwargs):
super().__init__(delegate, **kwargs)
self._action_space = action_space
def step(self, timestep) -> np.ndarray:
action = self._delegate.step(timestep)
return self._action_space.project(action)
class ArgAdaptor(DelegateOption):
"""An option that adapts the argument for a wrapped option.
This is helpful when composing options with different arg_specs in a parent
that requires a consistent arg_spec for all children (see TensorflowMdpPolicy)
E.g.:
>>> base_spec = specs.Array(shape=(4,), dtype=np.float32)
>>> sub_spec = specs.Array(shape=(2,), dtype=np.float32)
>>> op_with_spec = SomeOption() # Expects sub_spec
>>> adapted_op = ArgAdaptor( # Expects base_spec
>>> op_with_spec, base_spec, lambda: arg: arg[:2])
>>> adapted_op.step(timestep) # op_with_spec will see sliced arg.
"""
def __init__(self,
delegate: core.Option,
arg_spec: core.ArgSpec,
adaptor_func: Callable[[core.Arg], Optional[core.Arg]],
name: Optional[str] = None):
"""Initialize ArgAdaptor.
Args:
delegate: An option to delegate option behavior to.
arg_spec: An arg_spec for the context in which this option will run.
adaptor_func: A callable that takes an arg matching `arg_spec` and returns
an arg matching `delegate.arg_spec`.
name: Name of the option.
"""
super().__init__(delegate=delegate, name=name)
self._arg_spec = arg_spec
self._adaptor_func = adaptor_func
def arg_spec(self) -> Optional[core.ArgSpec]:
return self._arg_spec
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
"""Adapts the argument and steps the delegate with the modified argument."""
adapted_observation = copy.copy(timestep.observation)
child_spec = self.delegate.arg_spec()
if child_spec is None:
# Remove the arg.
if self.arg_key in timestep.observation:
adapted_observation.pop(self.arg_key)
else:
# Adapt to spec of delegate.
initial_arg = timestep.observation.get(self.arg_key, None)
adapted_arg = self._adaptor_func(initial_arg)
if adapted_arg is None:
raise ValueError(f'Delegate expects arg matching {child_spec} but '
'adaptor_func generated `None`.')
else:
spec_utils.validate(child_spec, adapted_arg)
adapted_observation[self.arg_key] = adapted_arg
timestep = timestep._replace(observation=adapted_observation)
return super().step(timestep)
| dm_robotics-main | py/agentflow/options/basic_options.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for basic_options."""
from typing import Callable, Text
from unittest import mock
from absl.testing import absltest
import dm_env
from dm_env import specs
from dm_robotics.agentflow import core
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.options import basic_options
import numpy as np
class FixedOpTest(absltest.TestCase):
def test_action_returned(self):
expected_action = testing_functions.random_action()
timestep = mock.MagicMock()
num_steps = None
option = basic_options.FixedOp(expected_action, num_steps, 'test_fixed_op')
actual_action = option.step(timestep)
np.testing.assert_almost_equal(actual_action, expected_action)
self.assertEqual(option.pterm(timestep), 0.)
def test_termination(self):
random_action = testing_functions.random_action()
timestep = mock.MagicMock()
# If num_steps is None should never terminate.
num_steps = None
option = basic_options.FixedOp(random_action, num_steps, 'test_fixed_op')
option.step(timestep)
self.assertEqual(option.pterm(timestep), 0.)
# If num_steps is 0 should request termination immediately, even before step
num_steps = 0
option = basic_options.FixedOp(random_action, num_steps, 'test_fixed_op')
self.assertEqual(option.pterm(timestep), 1.)
# If num_steps = n should terminate after nth step.
num_steps = 5
option = basic_options.FixedOp(random_action, num_steps, 'test_fixed_op')
for i in range(num_steps):
option.step(timestep)
expected_pterm = 1. if i == num_steps else 0.
self.assertEqual(option.pterm(timestep), expected_pterm)
def test_set_action(self):
expected_action1 = testing_functions.random_action()
timestep = mock.MagicMock()
num_steps = None
option = basic_options.FixedOp(expected_action1, num_steps, 'test_fixed_op')
actual_action1 = option.step(timestep)
np.testing.assert_almost_equal(actual_action1, expected_action1)
expected_action2 = testing_functions.random_action()
option.set_action(expected_action2)
actual_action2 = option.step(timestep)
np.testing.assert_almost_equal(actual_action2, expected_action2)
class RandomOptionTest(absltest.TestCase):
def test_action_returned(self):
action_spec = testing_functions.random_array_spec()
timestep = mock.MagicMock()
option = basic_options.RandomOption(action_spec)
for _ in range(5):
output_action = option.step(timestep)
spec_utils.validate(action_spec, output_action)
self.assertEqual(option.pterm(timestep), 0.)
class LambdaOptionsTest(absltest.TestCase):
def test_delegation(self):
delegate = mock.MagicMock(spec=core.Option)
option = basic_options.LambdaOption(
on_selected_func=lambda timestep, prev_result: True,
func_as_result=True,
delegate=delegate)
timestep = mock.MagicMock()
previous_result = mock.MagicMock()
option.on_selected(timestep, previous_result)
delegate.on_selected.assert_called_with(timestep, previous_result)
delegate.pterm.return_value = 0.4
self.assertEqual(option.pterm(timestep), 0.4)
delegate.pterm.assert_called_with(timestep)
# For a LambdaOption if `func_as_result=True` we discard the delegate's
# result and replace it with the result of the function.
discarded_result = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS, data='random data')
delegate.result.return_value = discarded_result
expected_result = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS, data=True)
self.assertNotEqual(option.result(timestep), discarded_result)
self.assertEqual(option.result(timestep), expected_result)
delegate.result.assert_called_with(timestep)
def test_result(self):
delegate = mock.MagicMock(spec=core.Option)
lambda_result = 'lambda result'
option = basic_options.LambdaOption(
on_selected_func=lambda timestep, prev_result: lambda_result,
func_as_result=True,
delegate=delegate)
timestep = mock.MagicMock()
previous_result = mock.MagicMock()
delegate.result.return_value = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS,
data='Delegate result should be ignored')
delegate.pterm.return_value = 1.0
option.on_selected(timestep, previous_result)
option.step(timestep)
pterm = option.pterm(timestep)
self.assertEqual(pterm, 1.0)
result = option.result(timestep)
self.assertEqual(result.data, lambda_result)
def test_callables_invoked(self):
delegate = mock.MagicMock(spec=core.Option)
on_selected_func = mock.MagicMock()
on_step_func = mock.MagicMock()
pterm_func = mock.MagicMock()
option = basic_options.LambdaOption(
on_selected_func=on_selected_func,
on_step_func=on_step_func,
pterm_func=pterm_func,
delegate=delegate)
timestep = mock.MagicMock()
previous_result = mock.MagicMock()
option.on_selected(timestep, previous_result)
on_selected_func.assert_called_with(timestep, previous_result)
option.step(timestep)
on_step_func.assert_called_with(timestep)
option.pterm(timestep)
pterm_func.assert_called_with(timestep)
class ConcurrentOptionTest(absltest.TestCase):
def assert_timestep(self, expected: dm_env.TimeStep, actual: dm_env.TimeStep):
self.assertIs(expected.step_type, actual.step_type)
np.testing.assert_almost_equal(expected.discount, actual.discount)
np.testing.assert_almost_equal(expected.reward, actual.reward)
testing_functions.assert_value(expected.observation, actual.observation)
def test_action_merging(self):
spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=[0, 0],
maximum=[1, 1],
name='spec')
value_a = np.asarray([np.nan, 0.2], dtype=np.float32)
value_b = np.asarray([0.1, np.nan], dtype=np.float32)
option = basic_options.ConcurrentOption(
options_list=[(basic_options.FixedOp(action=value_a)),
(basic_options.FixedOp(action=value_b))],
action_spec=spec)
expected_action = np.asarray([0.1, 0.2], dtype=np.float32)
merged_action = option.step(_timestep_with_no_values())
testing_functions.assert_value(merged_action, expected_action)
def test_action_merging_with_empty_actions(self):
spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=[0, 0],
maximum=[1, 1],
name='spec')
value_a = np.asarray([0.1, 0.2], dtype=np.float32)
value_b = np.asarray([np.nan, np.nan], dtype=np.float32)
option = basic_options.ConcurrentOption(
options_list=[(basic_options.FixedOp(action=value_a)),
(basic_options.FixedOp(action=value_b))],
action_spec=spec)
expected_action = np.asarray([0.1, 0.2], dtype=np.float32)
merged_action = option.step(_timestep_with_no_values())
testing_functions.assert_value(merged_action, expected_action)
def test_action_emitting_nans(self):
spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=[0, 0],
maximum=[1, 1],
name='spec')
value_a = np.asarray([np.nan, 0.2], dtype=np.float32)
value_b = np.asarray([np.nan, np.nan], dtype=np.float32)
option = basic_options.ConcurrentOption(
options_list=[(basic_options.FixedOp(action=value_a)),
(basic_options.FixedOp(action=value_b))],
action_spec=spec,
allow_nan_actions=True)
expected_action = np.asarray([np.nan, 0.2], dtype=np.float32)
merged_action = option.step(_timestep_with_no_values())
testing_functions.assert_value(merged_action, expected_action)
def test_pterm(self):
spec_a, value_a = _rand_spec_and_value(shape=(1,), dtype=np.float32)
spec_b, value_b = _rand_spec_and_value(shape=(2,), dtype=np.float32)
overall_spec = testing_functions.composite_spec(spec_a, spec_b)
option = basic_options.ConcurrentOption(
options_list=[(testing_functions.SpyOp(value=value_a, pterm=0.2)),
(testing_functions.SpyOp(value=value_b, pterm=0.5))],
action_spec=overall_spec)
pterm = option.pterm(_timestep_with_no_values())
np.testing.assert_almost_equal(0.6, pterm)
def test_pterm_function_invocation(self):
spec_a, value_a = _rand_spec_and_value(shape=(1,), dtype=np.float32)
spec_b, value_b = _rand_spec_and_value(shape=(2,), dtype=np.float32)
overall_spec = testing_functions.composite_spec(spec_a, spec_b)
option1 = testing_functions.SpyOp(value=value_a, pterm=0.2)
option2 = testing_functions.SpyOp(value=value_b, pterm=0.5)
actual_pterms = []
def custom_termination_function(
pterms: basic_options.OptionPterms) -> float:
actual_pterms.extend(pterms)
return 0.9
option = basic_options.ConcurrentOption(
options_list=[option1, option2],
action_spec=overall_spec,
termination=custom_termination_function)
pterm = option.pterm(_timestep_with_no_values())
np.testing.assert_almost_equal(0.9, pterm)
expected_pterms = [(option1, 0.2), (option2, 0.5)]
actual_pterms_sorted = sorted(actual_pterms, key=lambda opt: opt[0].uid)
expected_pterms_sorted = sorted(expected_pterms, key=lambda opt: opt[0].uid)
self.assertEqual(actual_pterms_sorted, expected_pterms_sorted)
def test_any_terminates(self):
option = basic_options.FixedOp(np.random.random(size=(2,)))
self.assertEqual(1.0, basic_options.any_terminates([(option, 1.0)]))
self.assertEqual(
1.0, basic_options.any_terminates([(option, 0.0), (option, 1.0)]))
self.assertEqual(
0.0, basic_options.any_terminates([(option, 0.0), (option, 0.0)]))
np.testing.assert_almost_equal(
0.3, basic_options.any_terminates([(option, 0.0), (option, 0.3)]))
np.testing.assert_almost_equal(
0.64, basic_options.any_terminates([(option, 0.4), (option, 0.4)]))
def test_all_terminate(self):
option = basic_options.FixedOp(np.random.random(size=(2,)))
self.assertEqual(1.0, basic_options.all_terminate([(option, 1.0)]))
self.assertEqual(0.0, basic_options.all_terminate([(option, 0.0)]))
self.assertEqual(
0.25, basic_options.all_terminate([(option, 0.5), (option, 0.5)]))
def test_options_terminate(self):
option1 = basic_options.FixedOp(np.random.random(size=(2,)))
option2 = basic_options.FixedOp(np.random.random(size=(2,)))
option3 = basic_options.FixedOp(np.random.random(size=(2,)))
o1_terminates = basic_options.options_terminate(option1)
o1_o2_terminates = basic_options.options_terminate(option1, option2)
# o1_terminates should ignore other options
self.assertEqual(1.0, o1_terminates([(option1, 1.0)]))
self.assertEqual(0.3, o1_terminates([(option1, 0.3), (option2, 1.0)]))
# o1_o2_terminates should return the product of option1 and option2 pterms.
self.assertEqual(0.6, o1_o2_terminates([(option1, 0.6)]))
self.assertEqual(0.36, o1_o2_terminates([(option1, 0.6), (option2, 0.6)]))
# o1_o2_terminates should return the product of option1 and option2 pterms.
self.assertEqual(0.6, o1_o2_terminates([(option1, 0.6)]))
self.assertEqual(0.36, o1_o2_terminates([
(option1, 0.6),
(option2, 0.6),
]))
self.assertEqual(
0.36, o1_o2_terminates([
(option1, 0.6),
(option2, 0.6),
(option3, 0.6),
]))
def test_result_overall_failure_if_one_fails(self):
# The result of a ConcurrentOption is a list of result values,
# There should be a single termination reason - which ever is the 'worst'
# termination reason. I.e. if one option failed the whole thing failed.
spec_1, value_1 = _rand_spec_and_value(shape=(1,), dtype=np.float32)
spec_2, value_2 = _rand_spec_and_value(shape=(2,), dtype=np.float32)
overall_spec = testing_functions.composite_spec(spec_1, spec_2)
result_1 = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS, data='data_1')
result_2 = core.OptionResult(
termination_reason=core.TerminationType.FAILURE, data='data_2')
option_1 = testing_functions.SpyOp(value=value_1, result=result_1)
option_2 = testing_functions.SpyOp(value=value_2, result=result_2)
option = basic_options.ConcurrentOption(
options_list=[option_1, option_2], action_spec=overall_spec)
result = option.result(_timestep_with_no_values())
self.assertIs(result.termination_reason, core.TerminationType.FAILURE)
self.assertIsInstance(result.data, list)
self.assertListEqual(result.data, ['data_1', 'data_2'])
def test_result_successful(self):
spec_1, value_1 = _rand_spec_and_value(shape=(1,), dtype=np.float32)
spec_2, value_2 = _rand_spec_and_value(shape=(2,), dtype=np.float32)
overall_spec = testing_functions.composite_spec(spec_1, spec_2)
result_1 = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS, data='data_1')
result_2 = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS, data='data_2')
option_1 = testing_functions.SpyOp(value=value_1, result=result_1)
option_2 = testing_functions.SpyOp(value=value_2, result=result_2)
option = basic_options.ConcurrentOption(
options_list=[option_1, option_2], action_spec=overall_spec)
result = option.result(_timestep_with_no_values())
self.assertIs(result.termination_reason, core.TerminationType.SUCCESS)
self.assertIsInstance(result.data, list)
self.assertListEqual(result.data, ['data_1', 'data_2'])
def test_child_timesteps(self):
# Child options should have the observation in their timestep altered
# to include their arg, if it is present in the input observation.
# This test runs through the methods that are supposed to do this.
spec = specs.BoundedArray(
shape=(2,),
dtype=np.float32,
minimum=[0, 0],
maximum=[1, 1],
name='spec')
value_a = np.asarray([np.nan, 0.2], dtype=np.float32)
value_b = np.asarray([0.1, np.nan], dtype=np.float32)
arg_spec_1, arg_1 = _rand_spec_and_value(shape=(3,))
arg_spec_2, arg_2 = _rand_spec_and_value(shape=(4,))
option_1 = testing_functions.SpyOp(value=value_a, arg_spec=arg_spec_1)
option_2 = testing_functions.SpyOp(value=value_b, arg_spec=arg_spec_2)
option = basic_options.ConcurrentOption(
options_list=[option_1, option_2], action_spec=spec)
observation_with_args = {option.arg_key: np.concatenate([arg_1, arg_2])}
parent_timestep = dm_env.TimeStep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=np.random.random(),
discount=np.random.random(),
observation=observation_with_args)
expected_timestep_1 = dm_env.TimeStep(
step_type=parent_timestep.step_type,
reward=parent_timestep.reward,
discount=parent_timestep.discount,
observation={
option.arg_key: np.concatenate([arg_1, arg_2]),
option_1.arg_key: arg_1
},
)
expected_timestep_2 = dm_env.TimeStep(
step_type=parent_timestep.step_type,
reward=parent_timestep.reward,
discount=parent_timestep.discount,
observation={
option.arg_key: np.concatenate([arg_1, arg_2]),
option_2.arg_key: arg_2
},
)
option.on_selected(parent_timestep)
self.assert_timestep(expected_timestep_1, option_1.timestep)
self.assert_timestep(expected_timestep_2, option_2.timestep)
option_1.clear_timesteps()
option.step(parent_timestep)
self.assert_timestep(expected_timestep_1, option_1.timestep)
self.assert_timestep(expected_timestep_2, option_2.timestep)
option_1.clear_timesteps()
option.pterm(parent_timestep)
self.assert_timestep(expected_timestep_1, option_1.timestep)
self.assert_timestep(expected_timestep_2, option_2.timestep)
option_1.clear_timesteps()
option.result(parent_timestep)
self.assert_timestep(expected_timestep_1, option_1.timestep)
self.assert_timestep(expected_timestep_2, option_2.timestep)
option_1.clear_timesteps()
def test_arg_spec(self):
spec_a, val_a = _rand_spec_and_value(shape=(1,), dtype=np.float32)
spec_b, val_b = _rand_spec_and_value(shape=(2,), dtype=np.float32)
overall_spec = testing_functions.composite_spec(spec_a, spec_b)
arg_spec_a = testing_functions.random_array_spec(
shape=(3,), dtype=np.float32)
arg_spec_b = testing_functions.random_array_spec(
shape=(4,), dtype=np.float32)
option_1 = testing_functions.SpyOp(value={'A': val_a}, arg_spec=arg_spec_a)
option_2 = testing_functions.SpyOp(value={'B': val_b}, arg_spec=arg_spec_b)
option = basic_options.ConcurrentOption(
options_list=[option_1, option_2], action_spec=overall_spec)
testing_functions.assert_spec(
option.arg_spec,
spec_utils.merge_specs([option_1.arg_spec(), option_2.arg_spec()]))
class DelegateOptionTest(absltest.TestCase):
def testDelegateOption(self):
base = basic_options.FixedOp(np.arange(2))
delegate1 = TrivialDelegateOption(base)
delegate2 = TrivialDelegateOption(base)
# uid is delegated, BUT the delegate is not the thing it delegates to
# and therefore it is not (and shouldn't be) considered equal.
self.assertNotEqual(delegate1.uid, base.uid)
self.assertNotEqual(delegate2.uid, base.uid)
# Check __eq__
self.assertIsNot(delegate1, delegate2)
self.assertNotEqual(delegate1, delegate2)
# Check __hash__
self.assertNotEqual(hash(delegate1), hash(delegate2))
class TrivialDelegateOption(basic_options.DelegateOption):
def step(self, timestep):
return np.arange(2)
class FakeActionSpace(core.ActionSpace[core.Spec]):
def __init__(self, func: Callable[[np.ndarray], np.ndarray]):
super().__init__()
self._func = func
@property
def name(self) -> Text:
return 'FakeActionSpace'
def spec(self) -> core.Spec:
raise NotImplementedError()
def project(self, action: np.ndarray) -> np.ndarray:
return self._func(action)
class PadOptionTest(absltest.TestCase):
def testRestructureOutput(self):
value_from_base = np.arange(2)
value_from_padded = np.arange(3)
def adjuster(value: np.ndarray) -> np.ndarray:
np.testing.assert_almost_equal(value, value_from_base)
return value_from_padded
base = basic_options.FixedOp(value_from_base)
padded = basic_options.PadOption(
base, action_space=FakeActionSpace(adjuster))
action = padded.step(timestep=_timestep_with_no_values())
np.testing.assert_almost_equal(action, value_from_padded)
def testArgSpec(self):
expected_arg_spec = specs.Array(
shape=(2, 2), dtype=np.float32, name='expected_arg_spec')
action_spec = specs.Array(shape=(3,), dtype=np.float32, name='action_spec')
class HasSpec(basic_options.FixedOp):
def arg_spec(self):
return expected_arg_spec
base = HasSpec(testing_functions.valid_value(action_spec))
padded = basic_options.PadOption(
base, action_space=core.IdentityActionSpace(action_spec))
self.assertEqual(padded.arg_spec(), expected_arg_spec)
class ArgAdaptorTest(absltest.TestCase):
def testArgToNone(self):
fixed_action = np.arange(2)
random_arg_spec, random_arg = _rand_spec_and_value((4,))
op_without_arg = mock.MagicMock(spec=basic_options.FixedOp)
op_without_arg.arg_spec.return_value = None
type(op_without_arg).arg_key = mock.PropertyMock(
return_value='op_without_arg_key')
op_without_arg.step.return_value = fixed_action
adaptor_func = mock.Mock()
adaptor_func.return_value = None
adapted_op_without_arg = basic_options.ArgAdaptor(
op_without_arg, random_arg_spec, adaptor_func=adaptor_func)
observation = {'op_without_arg_key': random_arg}
timestep = testing_functions.random_timestep(observation=observation)
adapted_op_without_arg.step(timestep)
timestep_without_arg = timestep._replace(observation={})
op_without_arg.step.assert_called_with(timestep_without_arg)
self.assertEqual(adaptor_func.call_count, 0)
def testArgToOtherArg(self):
fixed_action = np.arange(2)
parent_arg_spec, parent_arg = _rand_spec_and_value((4,))
adapted_arg_spec, adapted_arg = _rand_spec_and_value((2,))
op_with_arg = mock.MagicMock(spec=basic_options.FixedOp)
op_with_arg.arg_spec.return_value = adapted_arg_spec
type(op_with_arg).arg_key = mock.PropertyMock(
return_value='op_with_arg_key')
op_with_arg.step.return_value = fixed_action
adaptor_func = mock.Mock()
adaptor_func.return_value = adapted_arg
adapted_op_with_arg = basic_options.ArgAdaptor(
op_with_arg, parent_arg_spec, adaptor_func=adaptor_func)
parent_observation = {'op_with_arg_key': parent_arg}
timestep = testing_functions.random_timestep(observation=parent_observation)
adapted_op_with_arg.step(timestep)
timestep_with_replaced_arg = timestep._replace(
observation={'op_with_arg_key': adapted_arg})
op_with_arg.step.assert_called_with(timestep_with_replaced_arg)
adaptor_func.assert_called_with(parent_arg)
def _rand_spec_and_value(shape, dtype=None):
spec = testing_functions.random_array_spec(shape=shape, dtype=dtype)
return spec, testing_functions.valid_value(spec)
def _timestep_with_no_values():
return dm_env.TimeStep(
step_type=(np.random.choice(list(dm_env.StepType))),
reward=np.random.random(),
discount=np.random.random(),
observation={})
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/options/basic_options_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for timestep_preprocessor."""
import functools
from unittest import mock
from absl.testing import absltest
import dm_env
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.preprocessors import timestep_preprocessor as tsp
import tree
ALWAYS = tsp.ValidationFrequency.ALWAYS
NEVER = tsp.ValidationFrequency.NEVER
ONCE = tsp.ValidationFrequency.ONCE
ONCE_PER_EPISODE = tsp.ValidationFrequency.ONCE_PER_EPISODE
FIRST = dm_env.StepType.FIRST
LAST = dm_env.StepType.LAST
MID = dm_env.StepType.MID
class CompositeTimestepPreprocessorTest(absltest.TestCase):
def test_empty_preprocessor_list_gives_noop_preprocessor(self):
# Check that the preprocessor doesn't change the spec
input_spec = testing_functions.random_timestep_spec()
preprocessor = tsp.CompositeTimestepPreprocessor()
output_spec = preprocessor.setup_io_spec(input_spec)
testing_functions.assert_spec(input_spec, output_spec)
# Check that the preprocessor does not modify the timestep
timestep = testing_functions.random_timestep(input_spec)
input_timestep = (
tsp.PreprocessorTimestep.from_environment_timestep(
timestep, pterm=0.1))
output_timestep = preprocessor.process(input_timestep)
testing_functions.assert_timestep(input_timestep.to_environment_timestep(),
output_timestep.to_environment_timestep())
@mock.patch.object(spec_utils, 'validate_observation', autospec=True)
def test_validation_frequency_controls_calls_to_spec_utils_validate(
self, validate_obs_mock):
input_spec = testing_functions.random_timestep_spec()
timestep = testing_functions.random_timestep(input_spec)
input_timestep = (
tsp.PreprocessorTimestep.from_environment_timestep(timestep, pterm=0.1))
with self.subTest('once_checks_only_once'):
processor = tsp.CompositeTimestepPreprocessor(validation_frequency=ONCE)
processor.setup_io_spec(input_spec)
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
processor.process(input_timestep._replace(step_type=MID))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=LAST))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=MID))
validate_obs_mock.assert_not_called()
with self.subTest('once_per_episode_only_checks_on_first_ts'):
validate_obs_mock.reset_mock()
processor = tsp.CompositeTimestepPreprocessor(
validation_frequency=ONCE_PER_EPISODE)
processor.setup_io_spec(input_spec)
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
processor.process(input_timestep._replace(step_type=MID))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=LAST))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
processor.process(input_timestep._replace(step_type=MID))
validate_obs_mock.assert_not_called()
with self.subTest('never_checks'):
validate_obs_mock.reset_mock()
processor = tsp.CompositeTimestepPreprocessor(validation_frequency=NEVER)
processor.setup_io_spec(input_spec)
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=MID))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=LAST))
validate_obs_mock.assert_not_called()
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_not_called()
with self.subTest('always_checks'):
validate_obs_mock.reset_mock()
processor = tsp.CompositeTimestepPreprocessor(validation_frequency=ALWAYS)
processor.setup_io_spec(input_spec)
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
processor.process(input_timestep._replace(step_type=MID))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
processor.process(input_timestep._replace(step_type=LAST))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
processor.process(input_timestep._replace(step_type=FIRST))
validate_obs_mock.assert_called()
validate_obs_mock.reset_mock()
def test_as_list_allows_tree_traversal(self):
# Tests that we can create a nested CompositeTimestepPreprocessor and use
# the `as_list` mechanism to visit all processors.
class DummyPreprocessor(tsp.TimestepPreprocessor):
"""Dummy processor which passes `np.prod(spec.shape) > 128` check."""
def _process_impl(
self,
timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
return timestep
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
return input_spec
leaf_processors = [
mock.MagicMock(wraps=DummyPreprocessor()),
mock.MagicMock(wraps=DummyPreprocessor()),
mock.MagicMock(wraps=DummyPreprocessor())
]
middle_preprocessor = tsp.CompositeTimestepPreprocessor(*leaf_processors)
middle_preprocessor = mock.MagicMock(wraps=middle_preprocessor)
top_preprocessor = tsp.CompositeTimestepPreprocessor(middle_preprocessor)
top_preprocessor = mock.MagicMock(wraps=top_preprocessor)
# Disable validation for entire processor.
def set_validation_frequency(proc, freq):
proc.set_validation_frequency(freq)
_ = tree.map_structure(
functools.partial(set_validation_frequency, freq=NEVER),
top_preprocessor.as_list())
# Verify all validation is disabled.
expected_validation_frequency_flattened = [NEVER] * 5
actual_validation_frequency_flattened = [
p.validation_frequency for p in tree.flatten(top_preprocessor.as_list())
]
self.assertSequenceEqual(actual_validation_frequency_flattened,
expected_validation_frequency_flattened)
# Verify the structure is preserved when traversing with `map_structure`
actual_processor_names = tree.map_structure(lambda p: p.name,
top_preprocessor.as_list())
expected_processor_names = [
'CompositeTimestepPreprocessor',
[[
'CompositeTimestepPreprocessor',
[['DummyPreprocessor'], ['DummyPreprocessor'],
['DummyPreprocessor']]
]]
]
self.assertSequenceEqual(actual_processor_names, expected_processor_names)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/preprocessors/timestep_preprocessor_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Timestep preprocessors.
Preprocessors exist to transform observations, define termination conditions
and define reward functions.
"""
import abc
import enum
from typing import NamedTuple, Optional, Text, Union, Sequence
import dm_env
from dm_robotics.agentflow import core
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow.decorators import overrides
import numpy as np
# Internal profiling
TimestepPreprocessorTree = Sequence[Union['TimestepPreprocessor',
Sequence['TimestepPreprocessorTree']]]
class PreprocessorTimestep(NamedTuple):
"""Timestep type for subtasks.
This timestep is equivalent to `dm_env.TimeStep`, but also has pterm and
OptionResult. This allows us to use Timestep preprocessors to handle
termination and exit-status.
"""
step_type: dm_env.StepType
reward: Union[np.floating, np.ndarray]
discount: np.float32
observation: spec_utils.ObservationValue
pterm: float
result: Optional[core.OptionResult]
@classmethod
def from_environment_timestep(
cls,
environment_timestep: dm_env.TimeStep,
pterm: float = 0.,
result: Optional[core.OptionResult] = None) -> 'PreprocessorTimestep':
return cls(
step_type=environment_timestep.step_type,
reward=environment_timestep.reward,
discount=environment_timestep.discount,
observation=environment_timestep.observation,
pterm=pterm,
result=result)
def to_environment_timestep(self) -> dm_env.TimeStep:
return dm_env.TimeStep(
step_type=self.step_type,
reward=self.reward,
discount=self.discount,
observation=self.observation)
def first(self) -> bool:
return self.step_type == dm_env.StepType.FIRST
def mid(self) -> bool:
return self.step_type == dm_env.StepType.MID
def last(self) -> bool:
return self.step_type == dm_env.StepType.LAST
def replace(self, **kwargs) -> 'PreprocessorTimestep':
return self._replace(**kwargs)
@enum.unique
class ValidationFrequency(enum.Enum):
"""Determines how often a TimestepPreprocessor should validate specs.
Calling `spec_utils.validate()` can be expensive, so users should tune how
often they would like to check the timestep specs.
"""
# Check the specs once during the lifetime of the TimestepPreprocessor.
ONCE = 0
# Check the specs once per episode.
ONCE_PER_EPISODE = 1
# Never check the specs.
NEVER = 2
# Always check the specs each time process() is called.
ALWAYS = 3
class TimestepPreprocessor(abc.ABC):
"""Instances of this class update values in time steps.
They can change observations (add, remove or modify), discount, reward and
termination probability.
Implementations should reset any state when a timestep is presented to them
with a step_type of FIRST.
"""
def __init__(
self,
validation_frequency: ValidationFrequency = (
ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None):
self._in_spec = None # type: spec_utils.TimeStepSpec
self._out_spec = None # type: spec_utils.TimeStepSpec
self._validation_freq = validation_frequency
self._name = name or self.__class__.__name__
self._validated_specs = False
def process(self, input_ts: PreprocessorTimestep) -> PreprocessorTimestep:
"""Process the timestep.
Args:
input_ts: Input timestep
Returns:
processed timestep
This should not be overridden in subclasses.
"""
output_ts = self._process_impl(input_ts)
if self._should_validate(input_ts):
# Make sure all the required keys are present and have the correct specs
# Ignore the extra keys in the input and output timesteps.
self._validate(self._in_spec, input_ts, 'input timestamp')
self._validate(self._out_spec, output_ts, 'output timestamp')
self._validated_specs = True
return output_ts
def setup_io_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
"""Setup the input and output specs.
Args:
input_spec: Input timestep spec
Returns:
Timestep spec of processed output.
This should not be overridden in subclasses.
"""
if self._in_spec or self._out_spec:
raise ValueError('Specs already setup')
self._in_spec = input_spec
self._out_spec = self._output_spec(input_spec)
return self._out_spec
def get_input_spec(self) -> spec_utils.TimeStepSpec:
"""Input spec getter."""
return self._in_spec
def get_output_spec(self) -> spec_utils.TimeStepSpec:
"""Output spec getter."""
return self._out_spec
def _should_validate(self, timestep: PreprocessorTimestep) -> bool:
"""Returns whether or not to validate the specs."""
if self._validation_freq == ValidationFrequency.ALWAYS:
return True
if (self._validation_freq == ValidationFrequency.ONCE_PER_EPISODE and
(timestep.first() or not self._validated_specs)):
return True
if (self._validation_freq == ValidationFrequency.ONCE and
not self._validated_specs):
return True
return False
def _validate(self, spec: spec_utils.TimeStepSpec,
timestep: PreprocessorTimestep, message: Text):
"""Validate the observation against the environment specs."""
failure_msg = '{} failed validation for {} preprocessor'.format(
message, type(self))
# We allow the timesteps from demonstrations to have extra keys compared to
# the environment.
# E.g we have collected demos with cameras but want to train a proprio agent
# only (i.e. the environment has no more cameras)
spec_utils.validate_observation(spec.observation_spec, timestep.observation,
check_extra_keys=False, msg=failure_msg)
spec_utils.validate(spec.reward_spec, timestep.reward, ignore_nan=True,
msg=failure_msg)
spec_utils.validate(spec.discount_spec, timestep.discount, msg=failure_msg)
@abc.abstractmethod
def _process_impl(self,
timestep: PreprocessorTimestep) -> PreprocessorTimestep:
raise NotImplementedError('This should be overridden.')
@abc.abstractmethod
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
raise NotImplementedError('This should be overridden.')
@property
def validation_frequency(self) -> ValidationFrequency:
return self._validation_freq
@property
def name(self) -> str:
return self._name
def set_validation_frequency(
self, validation_frequency: ValidationFrequency) -> None:
"""Sets the validation frequency of the preprocessor."""
self._validation_freq = validation_frequency
def as_list(self) -> TimestepPreprocessorTree:
"""Returns a list containing the processor and any child processors.
Child-classes implementing TimestepPreprocessor containers should implement
their own `as_list` method which includes the processor itself and all
children.
"""
return [self]
class CompositeTimestepPreprocessor(TimestepPreprocessor, core.Renderable):
"""Apply an ordered list of timestep preprocessors."""
def __init__(
self,
*preprocessors: TimestepPreprocessor,
validation_frequency: ValidationFrequency = (
ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None):
super().__init__(validation_frequency=validation_frequency, name=name)
self._timestep_preprocessors = list(preprocessors)
@overrides(TimestepPreprocessor)
# Profiling for .wrap('CompositeTimestepPreprocessor._process_impl')
def _process_impl(self,
timestep: PreprocessorTimestep) -> PreprocessorTimestep:
for timestep_preprocessor in self._timestep_preprocessors:
timestep = timestep_preprocessor.process(timestep)
return timestep
@overrides(TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
out_spec = input_spec
for timestep_preprocessor in self._timestep_preprocessors:
out_spec = timestep_preprocessor.setup_io_spec(out_spec)
return out_spec
def add_preprocessor(self, preprocessor: TimestepPreprocessor):
if self._out_spec:
raise ValueError(
'Cannot append to an initialized CompositeTimestepPreprocessor.')
else:
self._timestep_preprocessors.append(preprocessor)
def render_frame(self, canvas) -> None:
"""Callback to allow preprocessors to draw on a canvas."""
for preprocessor in self._timestep_preprocessors:
if isinstance(preprocessor, core.Renderable):
preprocessor.render_frame(canvas)
def as_list(self) -> TimestepPreprocessorTree:
"""Recursively lists processor and any child processor lists.
This method allows traversal of complex nested processors using `tree`:
>>> tree.map_structure(
... lambda p: p.validation_frequency, processor.as_list())
Returns:
A list containing the processor and the result of `as_list` on any
child-processors.
"""
return [self, [proc.as_list() for proc in self._timestep_preprocessors]]
| dm_robotics-main | py/agentflow/preprocessors/timestep_preprocessor.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A collection of timestep preprocessors that transform observations."""
import collections
from typing import Any, Callable, FrozenSet, Mapping, Optional, Sequence, Tuple
from absl import logging
import cv2
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow.decorators import overrides
from dm_robotics.agentflow.preprocessors import timestep_preprocessor as tsp
from dm_robotics.geometry import geometry
import numpy as np
# Internal profiling
class MisconfigurationError(Exception):
"""Error raised when the preprocessor is misconfigured."""
class CastPreprocessor(tsp.TimestepPreprocessor):
"""Preprocessor to cast observations, reward and discount."""
def __init__(
self,
dtype: type = np.float32, # pylint: disable=g-bare-generic
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Initialize CastPreprocessor.
Args:
dtype: The target dtype to cast to.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._dtype = dtype
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('CastPreprocessor._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
cast_obs = {
k: np.asarray(v).astype(self._dtype)
for k, v in timestep.observation.items()
}
return tsp.PreprocessorTimestep(
step_type=timestep.step_type,
reward=(self._dtype(timestep.reward) if np.isscalar(timestep.reward)
else timestep.reward.astype(self._dtype)),
discount=self._dtype(timestep.discount),
observation=cast_obs,
pterm=timestep.pterm,
result=timestep.result)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
obs_spec = {
k: v.replace(dtype=self._dtype)
for k, v in input_spec.observation_spec.items()
}
return spec_utils.TimeStepSpec(
observation_spec=obs_spec,
reward_spec=input_spec.reward_spec.replace(dtype=self._dtype),
discount_spec=input_spec.discount_spec.replace(dtype=self._dtype))
class DowncastFloatPreprocessor(tsp.TimestepPreprocessor):
"""Preprocessor to cast observations, reward and discount.
This preprocessor downcasts all floating point observations (etc)
with more bits than the target dtype to the target dtype.
It does not change the dtype of non-floating point data (e.g.
uint8 in images).
"""
def __init__(
self,
max_float_dtype: type, # pylint: disable=g-bare-generic
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Initialize DowncastFloatPreprocessor.
Args:
max_float_dtype: The target dtype to cast floating point types with more
bits to, e.g. np.float32.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
if not np.issubdtype(max_float_dtype, np.floating):
raise ValueError('DowncastFloatPreprocessor only supports floating point '
f'dtypes, not {max_float_dtype}')
self._dtype = max_float_dtype
self._max_bits = np.finfo(self._dtype).bits
def _dtype_needs_downcast(self, dtype):
return (np.issubdtype(dtype, np.floating) and
np.finfo(dtype).bits > self._max_bits)
def _downcast_if_necessary(self, value):
if ((hasattr(value, 'dtype') and self._dtype_needs_downcast(value.dtype)) or
self._dtype_needs_downcast(type(value))):
return np.asarray(value).astype(self._dtype)
else:
return value
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('DowncastFloatPreprocessor._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
cast_obs = {
k: self._downcast_if_necessary(v)
for k, v in timestep.observation.items()
}
return tsp.PreprocessorTimestep(
step_type=timestep.step_type,
reward=self._downcast_if_necessary(timestep.reward),
discount=self._downcast_if_necessary(timestep.discount),
observation=cast_obs,
pterm=timestep.pterm,
result=timestep.result)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
obs_spec = {}
for k, v in input_spec.observation_spec.items():
if self._dtype_needs_downcast(v.dtype):
obs_spec[k] = v.replace(dtype=self._dtype)
else:
obs_spec[k] = v
if self._dtype_needs_downcast(input_spec.reward_spec.dtype):
reward_spec_dtype = self._dtype
else:
reward_spec_dtype = input_spec.reward_spec.dtype
if self._dtype_needs_downcast(input_spec.discount_spec.dtype):
discount_spec_dtype = self._dtype
else:
discount_spec_dtype = input_spec.reward_spec.dtype
return spec_utils.TimeStepSpec(
observation_spec=obs_spec,
reward_spec=input_spec.reward_spec.replace(dtype=reward_spec_dtype),
discount_spec=input_spec.discount_spec.replace(
dtype=discount_spec_dtype))
class ObsRelativeToEpisodeStartPreprocessor(tsp.TimestepPreprocessor):
"""Offset specified observations to be relative to initial values."""
def __init__(
self,
target_obs: str,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
super().__init__(validation_frequency)
self._target_obs = target_obs
self._initial_values = {}
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('ObsRelativeToEpisodeStartPreprocessor._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
if timestep.first():
self._initial_values = {}
for k, v in timestep.observation.items():
if k in self._target_obs:
self._initial_values[k] = np.array(v)
corrected_obs = {}
for k, v in timestep.observation.items():
if k in self._initial_values:
corrected_obs[k] = v - self._initial_values[k]
else:
corrected_obs[k] = v
return timestep._replace(observation=corrected_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
return input_spec
class PoseRelativeToEpisodeStart(tsp.TimestepPreprocessor):
"""Change pose observations to be relative to episode start."""
def __init__(
self,
pos_obs_name: str,
quat_obs_name: str,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""PoseRelativeToEpisodeStart constructor.
Args:
pos_obs_name: Observation key of the pos observation.
quat_obs_name: Observation key of the quaternion observation.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._pos_obs_name = pos_obs_name
self._quat_obs_name = quat_obs_name
self._initial_pose = None # type: Optional[geometry.PoseStamped]
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('PoseRelativeToEpisodeStart._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
pos = np.array(timestep.observation[self._pos_obs_name])
quat = np.array(timestep.observation[self._quat_obs_name])
if timestep.first():
self._initial_pose = geometry.PoseStamped(geometry.Pose(pos, quat))
corrected_obs = {}
cur_pose = geometry.PoseStamped(geometry.Pose(pos, quat))
rel_pose = geometry.frame_relative_pose(cur_pose, self._initial_pose)
for k, v in timestep.observation.items():
if k == self._pos_obs_name:
corrected_obs[k] = rel_pose.position.astype(pos.dtype)
elif k == self._quat_obs_name:
corrected_obs[k] = rel_pose.quaternion.astype(quat.dtype)
else:
corrected_obs[k] = v
return timestep._replace(observation=corrected_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
if self._pos_obs_name not in input_spec.observation_spec:
raise ValueError(f'{self._pos_obs_name} not in timestep observations')
if self._quat_obs_name not in input_spec.observation_spec:
raise ValueError(f'{self._quat_obs_name} not in timestep observations')
return input_spec
class ObsOffsetAndScalingPreprocessor(tsp.TimestepPreprocessor):
"""Preprocessor to offset and scale specified observations."""
def __init__(
self,
obs_offsets: Mapping[str, np.floating],
obs_scales: Mapping[str, np.floating],
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
super().__init__(validation_frequency)
self._obs_offsets = obs_offsets
self._obs_scales = obs_scales
# Profiling for .wrap('ObsOffsetAndScalingPreprocessor._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
corrected_obs = {}
for k, obs in timestep.observation.items():
apply_offset = k in self._obs_offsets
apply_scaling = k in self._obs_scales
if apply_offset:
obs -= obs.dtype.type(self._obs_offsets[k])
if apply_scaling:
obs /= obs.dtype.type(self._obs_scales[k])
corrected_obs[k] = obs
return timestep._replace(observation=corrected_obs)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
return input_spec
class RemoveObservations(tsp.TimestepPreprocessor):
"""Removes the specified fields from observations."""
def __init__(
self,
obs_to_strip: Sequence[str],
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Initialize RemoveObs.
Args:
obs_to_strip: A list of strings corresponding to keys to remove from
timestep.observation.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._obs_to_strip = obs_to_strip
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('RemoveObservations._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
retained_obs = {
k: v
for k, v in timestep.observation.items()
if k not in self._obs_to_strip
}
return timestep._replace(observation=retained_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
obs_spec = {
k: v
for k, v in input_spec.observation_spec.items()
if k not in self._obs_to_strip
}
return input_spec.replace(observation_spec=obs_spec)
class RetainObservations(tsp.TimestepPreprocessor):
"""Leaves only the specified observations."""
def __init__(
self,
obs_to_leave: Sequence[str],
raise_on_missing=True,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Initialize RetainObservations.
Args:
obs_to_leave: A list of strings corresponding to keys to retain in
timestep.observation.
raise_on_missing: Whether to raise a MisconfigurationError if we are asked
to keep a non-existent observation.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._obs_to_leave: FrozenSet[str] = frozenset(obs_to_leave)
self._raise_on_missing = raise_on_missing
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('RetainObservations._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
retained_obs = {
k: v for k, v in timestep.observation.items() if k in self._obs_to_leave
}
return timestep._replace(observation=retained_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
obs_spec = {
k: v
for k, v in input_spec.observation_spec.items()
if k in self._obs_to_leave
}
not_in_spec = self._obs_to_leave - set(obs_spec)
if not_in_spec:
log_message = ('RetainObservations asked to retain observations that do '
'not exist in the incoming observation spec: '
f'{not_in_spec}')
if self._raise_on_missing:
raise MisconfigurationError(log_message)
else:
logging.warning(log_message)
return input_spec.replace(observation_spec=obs_spec)
class RenameObservations(tsp.TimestepPreprocessor):
"""Renames a set of observations."""
def __init__(
self,
obs_mapping: Mapping[str, str],
raise_on_missing: bool = True,
raise_on_overwrite: bool = True,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Initialize RenameObservations.
Args:
obs_mapping: Mapping from old and the new observation names.
raise_on_missing: Whether to raise a MisconfigurationError if we are asked
to rename a non-existent observation.
raise_on_overwrite: Whether to raise a MisconfigurationError we are asked
to rename an observation by overwriting an existing observation.
validation_frequency: How often should we validate the obs specs.
Raises:
MisconfigurationError: If the mapping has duplicate names.
"""
super().__init__(validation_frequency)
self._raise_on_missing = raise_on_missing
self._raise_on_overwrite = raise_on_overwrite
self._obs_mapping = obs_mapping
# Check that there are no duplicates in the mapped names.
if len(set(obs_mapping.values())) != len(obs_mapping.values()):
log_message = (f'The new set of observation names {obs_mapping.values()}'
' has duplicate elements.')
raise MisconfigurationError(log_message)
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('RenameObservations._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
observation = self._replace_obs(timestep.observation)
return timestep._replace(observation=observation)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
obs_spec = input_spec.observation_spec
self._check_valid_mapping(obs_spec)
obs_spec = self._replace_obs(obs_spec)
return input_spec.replace(observation_spec=obs_spec)
def _replace_obs(self, orig: Mapping[str, Any]) -> Mapping[str, Any]:
new_dict = {}
for obs_key in orig:
if obs_key in self._obs_mapping:
new_dict[self._obs_mapping[obs_key]] = orig[obs_key]
else:
new_dict[obs_key] = orig[obs_key]
return new_dict
def _check_valid_mapping(self, obs_spec):
"""Checks that the renaming of observations is valid."""
full_mapping = {key: key for key in obs_spec}
full_mapping.update(self._obs_mapping)
# Check that the renamed observations exist.
not_in_spec = set(full_mapping) - set(obs_spec)
if not_in_spec:
log_message = ('RenameObservations asked to rename observations that do'
'not exist in the incoming observation spec: '
f'{not_in_spec}')
if self._raise_on_missing:
raise MisconfigurationError(log_message)
else:
logging.warning(log_message)
# Check that we do not overwrite existing observations.
c = collections.Counter(full_mapping.values())
overwritten_names = [key for key, count in c.items() if count > 1]
if overwritten_names:
log_message = ('RenameObservations asked to overwrite the following '
f'existing observations: {overwritten_names}')
if self._raise_on_overwrite:
raise MisconfigurationError(log_message)
else:
logging.warning(log_message)
class MergeObservations(tsp.TimestepPreprocessor):
"""Creates a single observation by merging several observations together."""
def __init__(
self,
obs_to_merge: Sequence[str],
new_obs: str,
raise_on_missing: bool = True,
raise_on_overwrite: bool = True,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Initialize MergeObservations.
Args:
obs_to_merge: Names of the observations to merge.
new_obs: Name of the merged observation.
raise_on_missing: Whether to raise a MisconfigurationError if we are asked
to merge a non-existent observation.
raise_on_overwrite: Whether to raise a MisconfigurationError if the
new_obs name overwrites an existing observation.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._obs_to_merge = tuple(obs_to_merge)
self._new_obs = new_obs
self._raise_on_missing = raise_on_missing
self._raise_on_overwrite = raise_on_overwrite
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('MergeObs._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
obs = dict(timestep.observation)
# Create the merged observation.
merged_obs = np.concatenate([
timestep.observation[obs_key]
for obs_key in self._obs_to_merge
if obs_key in obs
])
# Remove the observations that have been merged.
for obs_key in self._obs_to_merge:
obs.pop(obs_key)
obs[self._new_obs] = merged_obs
return timestep._replace(observation=obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
obs_spec = dict(input_spec.observation_spec)
self._check_valid_merge(obs_spec)
# Create the merged observation.
model_array = np.concatenate([
input_spec.observation_spec[obs_key].generate_value()
for obs_key in self._obs_to_merge
if obs_key in obs_spec
])
# Remove the observations that have been merged.
for obs_key in self._obs_to_merge:
obs_spec.pop(obs_key)
obs_spec[self._new_obs] = specs.Array(
shape=model_array.shape, dtype=model_array.dtype, name=self._new_obs)
return input_spec.replace(observation_spec=obs_spec)
def _check_valid_merge(self, obs_spec):
"""Checks if the observation merging is valid."""
all_current_names = set(obs_spec.keys())
merged_names = set(self._obs_to_merge)
# Check that the merged observations exist.
not_in_spec = merged_names - all_current_names
if not_in_spec:
log_message = ('MergeObservations asked to merge observations that do not'
f'exist in the incoming observation spec: {not_in_spec}')
if self._raise_on_missing:
raise MisconfigurationError(log_message)
else:
logging.warning(log_message)
# Check that the merged observation name doesn't overwrite an existing one.
available_names = all_current_names - merged_names
if self._new_obs in available_names:
log_message = ('MergeObservations asked to overwrite observation name: '
f'{self._new_obs}')
if self._raise_on_overwrite:
raise MisconfigurationError(log_message)
else:
logging.warning(log_message)
class StackObservations(tsp.TimestepPreprocessor):
"""A timestep preprocessor that stacks observations.
This is useful for environments that are n-step markov (like a robot that
takes a few cycles to reach the setpoints we command). On the initial
timestep, all elements of the stack are initialized with the value of the
first observation.
"""
def __init__(
self,
obs_to_stack: Sequence[str],
stack_depth: np.integer,
*,
add_leading_dim: bool = False,
override_obs: bool = True,
added_obs_prefix: str = 'stacked_',
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""StackObservations preprocessor constructor.
Args:
obs_to_stack: A list of observation to stack.
stack_depth: How deep to stack them. The stacked observations will be
concatenated and replace the original observation if `override_obs` is
set to True. Otherwise, extra observations with prefix
`added_obs_prefix` will be added.
add_leading_dim: If False, stacks the observations along the first
dimension. If True, stacks the observations along an extra leading
dimension. E.g.: (7,) stacked 3 times becomes: - (21,) if
add_leading_dim=True - (3,7) if add_leading_dim=True (4,5) stacked 3
times becomes: - (12, 5) if add_leading_dim=False - (3, 4, 5) if
add_leading_dim=True
override_obs: If True, add the stacked observations and replace the
existing ones. Otherwise, the stacked observations will be added to the
existing ones. The name of the stacked observation is given by
`added_obs_prefix` added to their original name.
added_obs_prefix: The prefix to be added to the original observation name.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._obs_to_stack: FrozenSet[str] = frozenset(obs_to_stack)
self._stack_depth = stack_depth # type: np.integer
self._add_leading_dim = add_leading_dim
self._stacks = {
name: collections.deque(maxlen=self._stack_depth)
for name in self._obs_to_stack
}
self._override_obs = override_obs
self._added_obs_prefix = added_obs_prefix
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap('StackObservations._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
if self._override_obs:
processed_obs = {
k: self._maybe_process(timestep, k, v)
for k, v in timestep.observation.items()
}
else:
stacked_obs = {
self._added_obs_prefix + str(k):
self._maybe_process(timestep, k, timestep.observation[k])
for k in self._obs_to_stack
}
processed_obs = {**timestep.observation, **stacked_obs}
return timestep._replace(observation=processed_obs)
def _maybe_process(self, timestep, key, val):
if key not in self._obs_to_stack:
return val
stack = self._stacks[key]
if timestep.first():
stack.clear()
stack.extend([val] * (self._stack_depth - 1))
stack.appendleft(val)
if self._add_leading_dim:
return np.array(stack)
else:
return np.concatenate(stack)
def _maybe_process_spec(self, key, spec):
if key not in self._obs_to_stack:
return spec
if self._add_leading_dim:
model_array = np.array([spec.generate_value()] * self._stack_depth)
else:
model_array = np.concatenate([spec.generate_value()] * self._stack_depth)
return specs.Array(
shape=model_array.shape, dtype=model_array.dtype, name=spec.name)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
if self._override_obs:
processed_obs_spec = {
k: self._maybe_process_spec(k, v)
for k, v in input_spec.observation_spec.items()
}
else:
stacked_obs_spec = {
self._added_obs_prefix + str(k):
self._maybe_process_spec(k, input_spec.observation_spec[k])
for k in self._obs_to_stack
}
processed_obs_spec = {**input_spec.observation_spec, **stacked_obs_spec}
return input_spec.replace(processed_obs_spec)
class FoldObservations(tsp.TimestepPreprocessor):
"""Performs a fold operation and transormation some observation."""
def __init__(
self,
output_obs_name: str,
obs_to_fold: str,
fold_fn: Callable[[np.ndarray, np.ndarray], np.ndarray],
output_fn: Callable[[np.ndarray], np.ndarray],
init_val: np.ndarray,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
super().__init__(validation_frequency)
self._output_obs_name = output_obs_name
self._obs_to_fold = obs_to_fold
self._fold_fn = fold_fn
self._output_fn = output_fn
self._init_val = init_val
self._cur_val = init_val
@overrides(tsp.TimestepPreprocessor)
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
if timestep.step_type.first():
self._cur_val = self._init_val
step_val = timestep.observation[self._obs_to_fold]
self._cur_val = self._fold_fn(self._cur_val, step_val)
processed_obs = {k: v for k, v in timestep.observation.items()}
output_val = self._output_fn(self._cur_val).astype(self._init_val.dtype)
processed_obs[self._output_obs_name] = output_val
return timestep._replace(observation=processed_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
observation_spec = {k: v for k, v in input_spec.observation_spec.items()}
observation_spec[self._output_obs_name] = specs.Array(
shape=self._init_val.shape,
dtype=self._init_val.dtype,
name=self._output_obs_name)
return input_spec.replace(observation_spec=observation_spec)
class ImageCropper(object):
"""Helper class that crops an image."""
def __init__(
self,
crop_width_relative: float,
crop_height_relative: Optional[float] = None,
x_offset_relative: float = 0.0,
y_offset_relative: float = 0.0,
):
"""This initializes internal variables that are reused for every crop operation.
Args:
crop_width_relative: What fraction of the original width to crop to. For
example, for an image that is 100px wide, a value of 0.65 would crop a
region that is 65px wide. Cannot be zero.
crop_height_relative: Optional fraction of the original height to crop to.
Cannot be zero. If omitted, default to a square with the side length
implied by crop_width_relative.
x_offset_relative: X offset for the crop. 0 means the left edge of the
crop is aligned with the left edge of the source. 0.5 means the *center*
of the crop is aligned with the center of the source. 1.0 means the
*right* edge of the crop is aligned with the right edge of the source.
y_offset_relative: Behaves like x_offset_relative, but for the y axis.
"""
# Check parameters for limit violations (all the limits are [0,1])
def check_limit(value: float, name: str):
if value < 0.0 or value > 1.0:
raise ValueError('{} must be between 0 and 1, is {}'.format(
name, value))
check_limit(crop_width_relative, 'Crop width')
if crop_width_relative == 0.0:
raise ValueError('Crop width cannot be zero!')
if crop_height_relative is not None:
check_limit(crop_height_relative, 'Crop height')
if crop_height_relative == 0.0:
raise ValueError('Crop height cannot be zero!')
check_limit(x_offset_relative, 'X offset')
check_limit(y_offset_relative, 'Y offset')
self._x_offset_relative = x_offset_relative
self._y_offset_relative = y_offset_relative
self._crop_width_relative = crop_width_relative
self._crop_height_relative = crop_height_relative
self._cropped_width = None # type: Optional[int]
self._cropped_height = None # type: Optional[int]
self._x_offset = None # type: Optional[int]
self._y_offset = None # type: Optional[int]
self._last_input_width = None # type: Optional[int]
self._last_input_height = None # type: Optional[int]
def calculate_crop_params(self, input_width: int,
input_height: int) -> Tuple[int, int]:
"""Calculate the actual size of the crop in pixels.
Saves the width and height used to avoid unnecessary calculations.
Args:
input_width: Width of the image to be cropped, in pixels.
input_height: Height of the image to be cropped, in pixels.
Returns:
A tuple (output_width, output_height).
Raises:
ValueError if only crop width was set (in this case, crop height
defaults to be equal to the width), and the resulting square is larger
than the image.
"""
# Only do the math if input_width or input_height changed from the last time
# we were called.
if (input_width != self._last_input_width or
input_height != self._last_input_height):
self._cropped_width = max(
1, self._fraction_of_pixels(self._crop_width_relative, input_width))
self._cropped_height = (
self._cropped_width if self._crop_height_relative is None else max(
1, self._fraction_of_pixels(self._crop_height_relative,
input_height)))
if self._cropped_height > input_height:
raise ValueError(
'Crop height is {}, but input is only {} pixels high!'.format(
self._cropped_height, input_height))
self._x_offset = self._fraction_of_pixels(
self._x_offset_relative, input_width - self._cropped_width)
self._y_offset = self._fraction_of_pixels(
self._y_offset_relative, input_height - self._cropped_height)
# Return the results to use outside this class.
return (self._cropped_width, self._cropped_height)
def _fraction_of_pixels(self, fraction: float, total_pixels: int) -> int:
"""Calculate a number of pixels based on ratio and total_pixels.
This function exists to ensure that all conversions from relative sizes to
pixels use the same logic.
Args:
fraction: ]0.0,1.0], fraction of total_pixels to calculate.
total_pixels: Total number of pixels in the relevant dimensions.
Returns:
The requested fraction of the given pixel size, rounded to the next
integer. I.e. running this with ratio=1 will always return total_pixels,
running with ratio=0 will always return 0.
Raises:
ValueError if ratio is not in [0,1]
ValueError if total_pixels is < 0
"""
if fraction < 0.0 or fraction > 1.0:
raise ValueError(
'Fraction must be between 0 and 1, is {}'.format(fraction))
if total_pixels < 0:
raise ValueError('Total number of pixels must be positive, got {}'.format(
total_pixels))
return int(round(float(total_pixels) * fraction))
def crop(self, image: np.ndarray) -> np.ndarray:
"""Crop the given image."""
if len(image.shape) < 2:
raise ValueError('Cropper requires at least 2 dimensions, got '
'shape {}'.format(image.shape))
width = image.shape[1]
height = image.shape[0]
# This bails out early if we already know the parameters for this width and
# height.
self.calculate_crop_params(input_width=width, input_height=height)
return image[self._y_offset:self._y_offset + self._cropped_height,
self._x_offset:self._x_offset + self._cropped_width]
class CropImageObservation(tsp.TimestepPreprocessor):
"""Crops an image observation to the desired shape."""
def __init__(
self,
input_obs_name: str,
output_obs_name: str,
crop_width_relative: float,
crop_height_relative: Optional[float] = None,
x_offset_relative: float = 0.0,
y_offset_relative: float = 0.0,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Build a CropImageObservation preprocessor.
Args:
input_obs_name: Name of the input observation. This must be a 2D array.
output_obs_name: Name of the output observation.
crop_width_relative: What fraction of the original width to crop to. For
example, for an image that is 100px wide, a value of 0.65 would crop a
region that is 65px wide. Cannot be zero.
crop_height_relative: Optional fraction of the original height to crop to.
Cannot be zero. If omitted, default to a square with the side length
implied by crop_width_relative.
x_offset_relative: X offset for the crop. 0 means the left edge of the
crop is aligned with the left edge of the source. 0.5 means the *center*
of the crop is aligned with the center of the source. 1.0 means the
*right* edge of the crop is aligned with the right edge of the source.
y_offset_relative: Behaves like x_offset_relative, but for the y axis.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
# Will raise a ValueError if any of the parameters are not OK.
self._cropper = ImageCropper(
crop_width_relative=crop_width_relative,
crop_height_relative=crop_height_relative,
x_offset_relative=x_offset_relative,
y_offset_relative=y_offset_relative)
self._input_obs_name = input_obs_name
self._output_obs_name = output_obs_name
def _process_image(self, image: np.ndarray):
return self._cropper.crop(image)
@overrides(tsp.TimestepPreprocessor)
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
processed_obs = dict(timestep.observation)
processed_obs[self._output_obs_name] = self._process_image(
timestep.observation[self._input_obs_name])
return timestep._replace(observation=processed_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
input_observation_spec = input_spec.observation_spec[self._input_obs_name]
shape = input_observation_spec.shape
if len(shape) < 2:
raise ValueError(
'CropImageObservation preprocessor expects 2D image observation, got '
'shape {}'.format(shape))
width = shape[1]
height = shape[0]
cropped_width, cropped_height = self._cropper.calculate_crop_params(
input_width=width, input_height=height)
observation_spec = dict(input_spec.observation_spec)
observation_spec[self._output_obs_name] = specs.Array(
shape=(cropped_height, cropped_width) + shape[2:],
dtype=input_observation_spec.dtype,
name=self._output_obs_name)
return input_spec.replace(observation_spec=observation_spec)
class CropSquareAndResize(CropImageObservation):
"""Crop a square from an image observation and resample it to the desired size in pixels."""
def __init__(
self,
input_obs_name: str,
output_obs_name: str,
crop_width_relative: float,
side_length_pixels: int,
x_offset_relative: float = 0.0,
y_offset_relative: float = 0.0,
interpolation=cv2.INTER_LINEAR,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Build a CropImageObservation preprocessor.
Args:
input_obs_name: Name of the input observation. This must be a 2D array.
output_obs_name: Name of the output observation.
crop_width_relative: What fraction of the original width to crop to. For
example, for an image that is 100px wide, a value of 0.65 would crop a
region that is 65px wide. This defines both the width and height of the
crop, so if the image is wider than it is tall, there exist values that
can lead to invalid crops at runtime! Cannot be zero.
side_length_pixels: The crop will be resampled so that its side length
matches this.
x_offset_relative: What fraction of the original width to offset the crop
by. Defaults to 0.0.
y_offset_relative: What fraction of the original height to offset the crop
by. Defaults to 0.0.
interpolation: The interpolation method to use. Supported values are
cv2.INTER_LINEAR, cv2.INTER_NEAREST, cv2.INTER_AREA, cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(
input_obs_name=input_obs_name,
output_obs_name=output_obs_name,
crop_width_relative=crop_width_relative,
crop_height_relative=None,
x_offset_relative=x_offset_relative,
y_offset_relative=y_offset_relative,
validation_frequency=validation_frequency,
)
if side_length_pixels <= 0:
raise ValueError(
'Side length must be > 0, got {}'.format(side_length_pixels))
self._side_length_pixels = side_length_pixels
self._interpolation = interpolation
def _process_image(self, image: np.ndarray):
crop = super()._process_image(image)
return cv2.resize(
crop, (self._side_length_pixels, self._side_length_pixels),
interpolation=self._interpolation)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
cropped_input_spec = super()._output_spec(input_spec)
input_observation_spec = cropped_input_spec.observation_spec[
self._input_obs_name]
shape = input_observation_spec.shape
observation_spec = dict(input_spec.observation_spec)
observation_spec[self._output_obs_name] = specs.Array(
shape=(self._side_length_pixels, self._side_length_pixels) + shape[2:],
dtype=input_observation_spec.dtype,
name=self._output_obs_name)
return input_spec.replace(observation_spec=observation_spec)
class ResizeImage(tsp.TimestepPreprocessor):
"""Resample an image observation to the desired size in pixels.
Resulting image is reshaped into a square if it is not already.
"""
def __init__(
self,
input_obs_name: str,
output_obs_name: str,
side_length_pixels: int,
interpolation=cv2.INTER_LINEAR,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""Build a ResizeImage preprocessor.
Args:
input_obs_name: Name of the input observation. The observation must be a
2D array.
output_obs_name: Name of the output observation.
side_length_pixels: The image will be resampled so that its side length
matches this.
interpolation: The interpolation method to use. Supported values are
cv2.INTER_LINEAR, cv2.INTER_NEAREST, cv2.INTER_AREA, cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
if side_length_pixels <= 0:
raise ValueError(
'Side length must be > 0, got {}'.format(side_length_pixels))
self._input_obs_name = input_obs_name
self._output_obs_name = output_obs_name
self._side_length_pixels = side_length_pixels
self._interpolation = interpolation
def _process_image(self, image: np.ndarray):
return cv2.resize(
image, (self._side_length_pixels, self._side_length_pixels),
interpolation=self._interpolation)
@overrides(tsp.TimestepPreprocessor)
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
processed_obs = dict(timestep.observation)
processed_obs[self._output_obs_name] = self._process_image(
timestep.observation[self._input_obs_name])
return timestep._replace(observation=processed_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
input_observation_spec = input_spec.observation_spec[self._input_obs_name]
shape = input_observation_spec.shape
observation_spec = dict(input_spec.observation_spec)
observation_spec[self._output_obs_name] = specs.Array(
shape=(self._side_length_pixels, self._side_length_pixels) + shape[2:],
dtype=input_observation_spec.dtype,
name=self._output_obs_name)
return input_spec.replace(observation_spec=observation_spec)
class AddObservation(tsp.TimestepPreprocessor):
"""Preprocessor that adds an observation."""
def __init__(
self,
obs_name: str,
obs_callable: Callable[[tsp.PreprocessorTimestep], np.ndarray],
obs_spec: Optional[specs.Array] = None,
validation_frequency: tsp.ValidationFrequency = (
tsp.ValidationFrequency.ONCE_PER_EPISODE),
):
"""AddObservation constructor.
Args:
obs_name: Name of the observation to add.
obs_callable: Callable generating the observation to be added value given
a timestep.
obs_spec: Specs for the output of `obs_callable`. If `None` is provided
the specs are inferred as a `dm_env.specs.Array` with shape and dtype
matching the output of `obs_callable` and name set to `obs_name`.
validation_frequency: How often should we validate the obs specs.
"""
super().__init__(validation_frequency)
self._obs_name = obs_name
self._obs_callable = obs_callable
self._obs_spec = obs_spec
@overrides(tsp.TimestepPreprocessor)
# Profiling for .wrap_scope('AddObservation._process_impl')
def _process_impl(
self, timestep: tsp.PreprocessorTimestep) -> tsp.PreprocessorTimestep:
processed_obs = dict(timestep.observation)
processed_obs[self._obs_name] = np.asarray(self._obs_callable(timestep))
return timestep._replace(observation=processed_obs)
@overrides(tsp.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
observation_spec = dict(input_spec.observation_spec)
if self._obs_name in observation_spec.keys():
raise ValueError(f'Observation {self._obs_name} already exists.')
dummy_input = tsp.PreprocessorTimestep.from_environment_timestep(
input_spec.minimum(), pterm=0.0)
try:
dummy_obs = np.asarray(self._obs_callable(dummy_input))
except Exception:
logging.exception('Failed to run the obs_callable to add observation %s.',
self._obs_name)
raise
if self._obs_spec is None:
self._obs_spec = specs.Array(
shape=dummy_obs.shape, dtype=dummy_obs.dtype, name=self._obs_name)
observation_spec[self._obs_name] = self._obs_spec
return input_spec.replace(observation_spec=observation_spec)
| dm_robotics-main | py/agentflow/preprocessors/observation_transforms.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A collection of timestep preprocessors that define rewards."""
from typing import Callable, Sequence, Text, Union, Optional
from dm_env import specs
from dm_robotics.agentflow import core
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow.decorators import overrides
from dm_robotics.agentflow.preprocessors import timestep_preprocessor
import numpy as np
import tree
# Internal profiling
# All rewards should either be a single float or array of floats.
RewardVal = Union[float, np.floating, np.ndarray]
# Callable for reward composition for `CombineRewards`.
# This callable receives the list of rewards generated from the list of
# `reward_preprocessors` passed to `CombineRewards` and returns a single reward.
RewardCombinationStrategy = Callable[[Sequence[RewardVal]], RewardVal]
class ThresholdReward(timestep_preprocessor.TimestepPreprocessor):
"""Returns a sparse reward if reward is above a threshold.
"""
def __init__(
self,
*,
threshold: float = 0.5,
hi: float = 1.0,
lo: float = 0.0,
validation_frequency: timestep_preprocessor.ValidationFrequency = (
timestep_preprocessor.ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None,
):
"""Initializes ThresholdReward.
Args:
threshold: Reward threshold.
hi: Value to emit in reward field if incoming reward is greater than or
equal to `threshold`.
lo: Value to emit in reward field if incoming reward is below `threshold`.
validation_frequency: How often should we validate the obs specs.
name: A name for this preprocessor.
"""
super().__init__(validation_frequency=validation_frequency, name=name)
self._threshold = threshold
self._hi = hi
self._lo = lo
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
reward = self._hi if timestep.reward >= self._threshold else self._lo
return timestep._replace(reward=reward)
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
# Reward not computed from observation, so dtype should match input_spec.
self._hi = input_spec.reward_spec.dtype.type(self._hi)
self._lo = input_spec.reward_spec.dtype.type(self._lo)
return input_spec
class L2Reward(timestep_preprocessor.TimestepPreprocessor):
"""Returns a continuous reward based on the L2-distance between two keypoints.
The keypoint position are sourced from the observations.
"""
def __init__(
self,
obs0: Text,
obs1: Text,
*,
reward_scale: float = 1.0,
reward_offset: float = 1.0,
validation_frequency: timestep_preprocessor.ValidationFrequency = (
timestep_preprocessor.ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None,
):
"""Initializes L2Reward.
Args:
obs0: The observation key for the first keypoint.
obs1: The observation key for the second keypoint.
reward_scale: Scalar multiplier.
reward_offset: Scalar offset.
validation_frequency: How often should we validate the obs specs.
name: A name for this preprocessor.
"""
super().__init__(validation_frequency=validation_frequency, name=name)
self._obs0 = obs0
self._obs1 = obs1
self._reward_scale = reward_scale
self._reward_offset = reward_offset
self._output_type = None # type: np.dtype
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
try:
obs0_val = timestep.observation[self._obs0]
obs1_val = timestep.observation[self._obs1]
except KeyError as key_missing:
raise KeyError(
f'{self._obs0} or {self._obs1} not a valid observation name. Valid '
f'names are {list(timestep.observation.keys())}') from key_missing
dist = np.linalg.norm(obs0_val - obs1_val)
reward = self._output_type.type(-1 * dist * self._reward_scale +
self._reward_offset)
return timestep._replace(reward=reward)
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
# Reward computed from observation, so dtype can change; Set accordingly.
type0 = input_spec.observation_spec[self._obs0]
type1 = input_spec.observation_spec[self._obs1]
self._output_type = np.promote_types(type0, type1)
return input_spec.replace(
reward_spec=input_spec.reward_spec.replace(
dtype=self._output_type.type))
class ThresholdedL2Reward(timestep_preprocessor.TimestepPreprocessor):
"""Returns a sparse reward if two keypoints are within a threshold distance.
The keypoint position are sourced from the observations.
"""
def __init__(
self,
obs0,
obs1,
*,
threshold,
reward: float = 1.0,
validation_frequency: timestep_preprocessor.ValidationFrequency = (
timestep_preprocessor.ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None,
):
super().__init__(validation_frequency=validation_frequency, name=name)
self._obs0 = obs0
self._obs1 = obs1
self._threshold = threshold
self._reward = reward
self._zero_reward = 0.0
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
try:
obs0_val = timestep.observation[self._obs0]
obs1_val = timestep.observation[self._obs1]
except KeyError as key_missing:
raise KeyError(
f'{self._obs0} or {self._obs1} not a valid observation name. Valid '
f'names are {list(timestep.observation.keys())}') from key_missing
dist = np.linalg.norm(obs0_val - obs1_val)
reward = self._reward if dist < self._threshold else self._zero_reward
return timestep._replace(reward=reward)
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
# Verify required keys are in the spec.
for key in [self._obs0, self._obs1]:
if key not in input_spec.observation_spec:
raise KeyError('Expected "{}" key in observation not found. Existing '
'keys: {}'.format(
key, input_spec.observation_spec.keys()))
# Reward not computed from observation, so dtype should match input_spec.
self._reward = input_spec.reward_spec.dtype.type(self._reward)
self._zero_reward = input_spec.reward_spec.dtype.type(self._zero_reward)
return input_spec
def _cast_reward_to_type(reward: RewardVal, dtype: np.dtype) -> RewardVal:
if np.isscalar(reward):
return dtype.type(reward)
return reward.astype(dtype) # pytype: disable=attribute-error
class ComputeReward(timestep_preprocessor.TimestepPreprocessor):
"""Computes a reward from the observations and adds it to the timestep."""
def __init__(
self,
reward_function: Callable[[spec_utils.ObservationValue], RewardVal],
*,
output_spec_shape: Sequence[int] = (),
validation_frequency: timestep_preprocessor.ValidationFrequency = (
timestep_preprocessor.ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None,
):
"""ComputeReward constructor.
Args:
reward_function: Function that takes the timestep observation as input
and returns a reward.
output_spec_shape: Shape of the output reward. Defaults to an empty shape
denoting a scalar reward.
validation_frequency: How often should we validate the obs specs.
name: A name for this preprocessor.
"""
super().__init__(validation_frequency=validation_frequency, name=name)
self._reward_function = reward_function
self._output_shape = output_spec_shape
@overrides(timestep_preprocessor.TimestepPreprocessor)
# Profiling for .wrap_scope('ComputeReward._process_impl')
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
reward = self._reward_function(timestep.observation)
# Cast (possibly nested) reward to expected dtype.
reward = tree.map_structure(
lambda r: _cast_reward_to_type(r, self._out_spec.reward_spec.dtype),
reward)
return timestep.replace(reward=reward)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
return input_spec.replace(reward_spec=specs.Array(
shape=self._output_shape, dtype=input_spec.reward_spec.dtype))
class StagedWithActiveThreshold(RewardCombinationStrategy):
"""A RewardCombinationStrategy that stages a sequences of rewards.
It creates a reward for following a particular sequence of tasks in order,
given a reward value for each individual task.
This works by cycling through the terms backwards and using the last reward
that gives a response above the provided threshold + the number of terms
preceding it.
With this strategy the agent starts working on a task as soon as it's above
the provided threshold. This was useful for the RGB stacking task in which it
wasn't clear how close a reward could get to 1.0 for many tasks, but it was
easy to see when a reward started to move and should therefore be switched to.
E.g. if the threshold is 0.9 and the reward sequence is [0.95, 0.92, 0.6] it
will output (1 + 0.92) / 3 = 0.64.
Note: Preceding terms are given 1.0, not their current reward value.
This assumes tasks are ordered such that success on task `i` implies all
previous tasks `<i` are also solved, and thus removes the need to tune earlier
rewards to remain above-threshold in all down-stream tasks.
Use this for tasks in which it is more natural to express a threshold on which
a task is active, vs. when it is solved. See `StagedWithSuccessThreshold` if
the converse is true.
Rewards must be in [0;1], otherwise they will be clipped.
"""
def __init__(
self,
threshold: float = 0.1,
):
"""Initialize Staged.
Args:
threshold: A threshold that a reward must exceed for that task to be
considered "active". All previous tasks are assumed solved.
"""
self._thresh = threshold
def __call__(self, rewards: Sequence[RewardVal]) -> RewardVal:
rewards = np.clip(rewards, 0, 1)
last_reward = 0.
num_stages = len(rewards)
for i, last_reward in enumerate(reversed(rewards)):
if last_reward >= self._thresh:
# Found a reward at/above the threshold, add number of preceding terms
# and normalize with the number of terms.
return (num_stages - (i + 1) + last_reward) / float(num_stages)
# Return the accumulated rewards.
return last_reward / num_stages
class StagedWithSuccessThreshold(RewardCombinationStrategy):
"""A RewardCombinationStrategy that stages a sequences of rewards.
It creates a reward for following a particular sequence of tasks in order,
given a reward value for each individual task.
Unlike `StagedWithActiveThreshold`, which only gives reward for tasks above
threshold, this function gives (normalized) reward 1.0 for all solved tasks,
as well as the current shaped value for the first unsolved task.
E.g. if the threshold is 0.9 and the reward sequence is [0.95, 0.92, 0.6] it
will output (2 + 0.6) / 3 = 0.8666.
With this strategy the agent starts working on a task as soon as the PREVIOUS
task is above the provided threshold. Use this for tasks in which it is more
natural to express a threshold on which a task is solved, vs. when it is
active.
E.g. a sequence of object-rearrangement tasks may have arbitrary starting
reward due to their current positions, but the reward will always saturate
towards 1 when the task is solved. In this case it would be difficult to set
an "active" threshold without skipping stages.
Rewards must be in [0;1], otherwise they will be clipped.
"""
def __init__(
self,
threshold: float = 0.9,
*,
assume_cumulative_success: bool = True,
):
"""Initialize Staged.
Args:
threshold: A threshold that each reward must exceed for that task to be
considered "solved".
assume_cumulative_success: If True, assumes all tasks before the last task
above threshold are also solved and given reward 1.0, regardless of
their current value. If False, only the first K continguous tasks above
threshold are considered solved.
"""
self._thresh = threshold
self._assume_cumulative_success = assume_cumulative_success
def __call__(self, rewards: Sequence[RewardVal]) -> RewardVal:
rewards = np.clip(rewards, 0, 1)
num_stages = len(rewards)
tasks_above_threshold = np.asarray(rewards) > self._thresh
if self._assume_cumulative_success:
if np.any(tasks_above_threshold):
solved_task_idxs = np.argwhere(tasks_above_threshold) # last "True"
num_tasks_solved = solved_task_idxs.max() + 1
else:
num_tasks_solved = 0
else:
num_tasks_solved = np.argmin(tasks_above_threshold) # first "False"
# The last task should never be considered "solved" because we add
# current_task_reward. If you want to apply a reward threshold to the last
# stage to make it sparse, do that before or after passing it to this
# function.
num_tasks_solved = min(num_tasks_solved, num_stages - 1)
current_task_reward = rewards[num_tasks_solved]
return (num_tasks_solved + current_task_reward) / float(num_stages)
class CombineRewards(timestep_preprocessor.TimestepPreprocessor,
core.Renderable):
"""Preprocessor which steps multiple rewards in sequence and combines them."""
def __init__(
self,
reward_preprocessors: Sequence[
timestep_preprocessor.TimestepPreprocessor],
combination_strategy: RewardCombinationStrategy = np.max,
*,
output_spec_shape: Sequence[int] = (),
flatten_rewards: bool = True,
validation_frequency: timestep_preprocessor.ValidationFrequency = (
timestep_preprocessor.ValidationFrequency.ONCE_PER_EPISODE),
name: Optional[str] = None,
):
"""CombineRewards constructor.
Args:
reward_preprocessors: List of rewards preprocessor to be evaluated
sequentially.
combination_strategy: Callable that takes the list of rewards coming from
the `reward_preprocessors` and outputs a new reward. Defaults to
`np.max`, which means that it returns the maximum of all the rewards.
output_spec_shape: The shape of the output reward from
`combination_strategy`. Defaults to an empty shape (for scalar rewards).
flatten_rewards: If True, flattens any reward arrays coming from the
`reward_preprocessors` before feeding them to the
`combination_strategy`.
validation_frequency: How often should we validate the obs specs.
name: A name for this preprocessor.
Raises:
ValueError: If no reward_preprocessors are given.
"""
super().__init__(validation_frequency=validation_frequency, name=name)
if not reward_preprocessors:
raise ValueError('reward_preprocessors should have non-zero length')
self._reward_preprocessors = reward_preprocessors
self._combination_strategy = combination_strategy
self._flatten_rewards = flatten_rewards
self._output_shape = output_spec_shape
self._output_type = None # type: np.dtype
@overrides(timestep_preprocessor.TimestepPreprocessor)
# Profiling for .wrap_scope('CombineRewards._process_impl')
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
# If this processor hasn't been setup yet, infer the type from the input
# timestep, as opposed to the input_spec (should be equivalent). This
# typically shouldn't happen, but allows stand-alone use-cases in which the
# processor isn't run by a subtask or environment.
output_type = self._output_type or np.asarray(timestep.reward).dtype
rewards = []
for reward_preprocessor in self._reward_preprocessors:
timestep = reward_preprocessor.process(timestep)
if not np.isscalar(timestep.reward) and self._flatten_rewards:
rewards.extend(timestep.reward)
else:
rewards.append(timestep.reward)
reward = self._combination_strategy(rewards)
# Cast (possibly nested) reward to expected dtype.
reward = tree.map_structure(
lambda r: _cast_reward_to_type(r, output_type), reward)
return timestep.replace(reward=reward)
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
for reward_preprocessor in self._reward_preprocessors:
input_spec = reward_preprocessor.setup_io_spec(input_spec)
self._output_type = input_spec.reward_spec.dtype
return input_spec.replace(reward_spec=specs.Array(
shape=self._output_shape, dtype=self._output_type))
def render_frame(self, canvas) -> None:
"""Callback to allow preprocessors to draw on a canvas."""
for preprocessor in self._reward_preprocessors:
if isinstance(preprocessor, core.Renderable):
preprocessor.render_frame(canvas)
def as_list(self) -> timestep_preprocessor.TimestepPreprocessorTree:
"""Recursively lists processor and any child processor lists."""
return [self, [proc.as_list() for proc in self._reward_preprocessors]]
| dm_robotics-main | py/agentflow/preprocessors/rewards.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.preprocessors.rewards."""
from typing import Sequence, Text, Union
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.decorators import overrides
from dm_robotics.agentflow.preprocessors import rewards
from dm_robotics.agentflow.preprocessors import timestep_preprocessor
import numpy as np
def random_scalar_spec(name):
return specs.Array(
shape=(), dtype=np.random.choice([np.float32, np.float64]), name=name)
def create_timestep_spec(observation_spec):
return spec_utils.TimeStepSpec(
observation_spec,
reward_spec=random_scalar_spec('reward'),
discount_spec=random_scalar_spec('discount'))
def create_timestep(
input_spec: spec_utils.TimeStepSpec,
name: Text,
value: Sequence[float],
) -> timestep_preprocessor.PreprocessorTimestep:
dtype = input_spec.observation_spec[name].dtype
observation = testing_functions.valid_value(input_spec.observation_spec)
observation[name] = np.asarray(value, dtype=dtype)
timestep = testing_functions.random_timestep(
spec=input_spec, observation=observation)
return timestep_preprocessor.PreprocessorTimestep.from_environment_timestep(
timestep, pterm=0.0)
class RewardsTest(absltest.TestCase):
def test_threshold_reward(self):
input_spec = create_timestep_spec({})
threshold = 0.5
hi = 1.
lo = 0.
reward_preprocessor = rewards.ThresholdReward(
threshold=threshold, hi=hi, lo=lo)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
# Assert discount specs are unchanged.
self.assertEqual(input_spec.reward_spec,
output_spec.reward_spec)
self.assertEqual(input_spec.discount_spec,
output_spec.discount_spec)
# Test logic.
to_target_dtype = input_spec.reward_spec.dtype.type
random_input = testing_functions.random_timestep(spec=input_spec)
# Greater than threshold -> hi.
random_input = random_input._replace(reward=to_target_dtype(0.8))
output = reward_preprocessor.process(random_input)
np.testing.assert_allclose(output.reward, to_target_dtype(hi))
# Equal to threshold -> hi.
random_input = random_input._replace(reward=to_target_dtype(0.5))
output = reward_preprocessor.process(random_input)
np.testing.assert_allclose(output.reward, to_target_dtype(hi))
# Less than threshold -> lo.
random_input = random_input._replace(reward=to_target_dtype(0.4))
output = reward_preprocessor.process(random_input)
np.testing.assert_allclose(output.reward, to_target_dtype(lo))
def test_l2_spec_updated_properly(self):
observation_spec = {
'obs0': testing_functions.random_array_spec(),
'obs1': testing_functions.random_array_spec()
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.L2Reward(
obs0='obs0', obs1='obs1', reward_scale=1.0, reward_offset=1.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
# Assert discount specs are unchanged.
self.assertEqual(input_spec.discount_spec,
output_spec.discount_spec)
# Assert observation specs are unchanged.
self.assertEqual(input_spec.observation_spec,
output_spec.observation_spec)
# Assert reward specs match observation spec dtypes.
type0 = input_spec.observation_spec['obs0']
type1 = input_spec.observation_spec['obs1']
targ_type = np.promote_types(type0, type1)
self.assertEqual(output_spec.reward_spec.dtype, targ_type)
def test_thresholded_l2_spec_unchanged(self):
observation_spec = {
'obs0': testing_functions.random_array_spec(),
'obs1': testing_functions.random_array_spec()
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.ThresholdedL2Reward(
obs0='obs0', obs1='obs1', threshold=0.5, reward=1.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
self.assertEqual(input_spec, output_spec)
def test_spec_validation_missing_observation(self):
observation_spec = {
'wrong_name':
specs.Array(shape=(2,), dtype=np.int32, name='bool as two'),
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessors = [
rewards.L2Reward(
obs0='obs0', obs1='obs1', reward_scale=1.0, reward_offset=1.0),
rewards.ThresholdedL2Reward(
obs0='obs0', obs1='obs1', threshold=0.5, reward=1.0)
]
for rp in reward_preprocessors:
try:
rp.setup_io_spec(input_spec)
self.fail('Exception expected due to missing observation')
except KeyError:
pass # expected
def test_l2_spec_numerics(self):
random_arr_spec = testing_functions.random_array_spec()
observation_spec = {
'obs0': random_arr_spec,
'obs1': random_arr_spec
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.L2Reward(
obs0='obs0', obs1='obs1', reward_scale=1.5, reward_offset=2.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
timestep = testing_functions.valid_value(input_spec)
processed_timestep = reward_preprocessor.process(timestep)
dist = np.linalg.norm(timestep.observation['obs0'] -
timestep.observation['obs1'])
expected_reward = output_spec.reward_spec.dtype.type(-1 * dist * 1.5 + 2.0)
self.assertEqual(expected_reward,
processed_timestep.reward)
self.assertEqual(expected_reward.dtype,
processed_timestep.reward.dtype)
def test_thresholded_l2_spec_numerics(self):
random_arr_spec = testing_functions.random_array_spec()
observation_spec = {
'obs0': random_arr_spec,
'obs1': random_arr_spec
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.ThresholdedL2Reward(
obs0='obs0', obs1='obs1', threshold=0.5, reward=1.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
timestep = testing_functions.valid_value(input_spec)
processed_timestep = reward_preprocessor.process(timestep)
self.assertEqual(output_spec.reward_spec.dtype,
processed_timestep.reward.dtype)
class ComputeRewardTest(absltest.TestCase):
def test_scalar_reward_computed_based_on_observation(self):
reward_fn = lambda obs: obs['obs'][0]
observation_spec = {
'obs': specs.Array(shape=(2,), dtype=np.float32)
}
input_spec = create_timestep_spec(observation_spec)
input_timestep = create_timestep(input_spec, 'obs', [2.0, 3.0])
reward_preprocessor = rewards.ComputeReward(reward_function=reward_fn)
reward_preprocessor.setup_io_spec(input_spec)
output_timestep = reward_preprocessor.process(input_timestep)
self.assertEqual(output_timestep.reward, 2.0)
def test_array_rewards_fail_without_correct_shape(self):
reward_fn = lambda obs: obs['obs']
observation_spec = {
'obs': specs.Array(shape=(2,), dtype=np.float32)
}
input_spec = create_timestep_spec(observation_spec)
input_timestep = create_timestep(input_spec, 'obs', [2.0, 3.0])
reward_preprocessor = rewards.ComputeReward(reward_function=reward_fn)
with self.assertRaises(ValueError):
reward_preprocessor.setup_io_spec(input_spec)
reward_preprocessor.process(input_timestep)
def test_array_rewards_succeeds_with_correct_shape(self):
reward_fn = lambda obs: obs['obs']
observation_spec = {
'obs': specs.Array(shape=(2,), dtype=np.float32)
}
input_spec = create_timestep_spec(observation_spec)
input_timestep = create_timestep(input_spec, 'obs', [2.0, 3.0])
reward_preprocessor = rewards.ComputeReward(
reward_function=reward_fn, output_spec_shape=(2,))
reward_preprocessor.setup_io_spec(input_spec)
output_timestep = reward_preprocessor.process(input_timestep)
np.testing.assert_allclose(output_timestep.reward, [2.0, 3.0])
class CombineRewardsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._reward_1 = _TestReward(reward_value=1.)
self._reward_10 = _TestReward(reward_value=10.)
observation_spec = {
'unused_obs': testing_functions.random_array_spec(
shape=(2,),
minimum=np.asarray([0, 0]),
maximum=np.asarray([10, 10]))
}
self._input_spec = create_timestep_spec(observation_spec)
self._input_timestep = create_timestep(
self._input_spec, 'unused_obs', [2.0, 3.0])
def test_default_combination(self):
# Default combination is maximum of all rewards.
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10])
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 10.0)
def test_max_combination(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.max)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 10.0)
def test_min_combination(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.min)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 1.0)
def test_sum_combination(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.sum)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 11.0)
def test_sum_combination_with_list_input(self):
reward_array = _TestReward(reward_value=np.ones(3))
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, reward_array],
combination_strategy=np.sum)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 4.0)
def test_output_list_of_rewards_fails_without_correct_shape(self):
# Must update the output shape when returning an array of rewards.
with self.assertRaises(ValueError):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.stack)
combined_reward.setup_io_spec(self._input_spec)
combined_reward.process(self._input_timestep)
def test_output_list_of_rewards_succeeds_with_correct_shape(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.stack, output_spec_shape=(2,))
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
np.testing.assert_allclose(output_timestep.reward, [1., 10.])
def test_processing_unflattened_rewards(self):
zero_rewards = _TestReward(np.zeros(3))
one_rewards = _TestReward(np.ones(3))
combined_reward = rewards.CombineRewards(
reward_preprocessors=[zero_rewards, one_rewards],
combination_strategy=lambda rewards: np.mean(rewards, axis=0),
output_spec_shape=(3,), flatten_rewards=False)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
np.testing.assert_allclose(output_timestep.reward, [0.5, 0.5, 0.5])
# Check to make sure the flattened version gives a different result.
# Reset things to help set up the specs.
input_spec = self._input_spec.replace() # makes a copy of the spec.
zero_rewards = _TestReward(np.zeros(3))
one_rewards = _TestReward(np.ones(3))
combined_reward = rewards.CombineRewards(
reward_preprocessors=[zero_rewards, one_rewards],
combination_strategy=lambda rewards: np.mean(rewards, axis=0),
output_spec_shape=(), flatten_rewards=True)
combined_reward.setup_io_spec(input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 0.5)
def test_staged_active_monotonous(self):
"""More stages above threshold mean more reward, no matter the values."""
reward_100 = _TestReward(reward_value=100.)
reward_0 = _TestReward(reward_value=0.)
staged_combination = rewards.StagedWithActiveThreshold(threshold=1e-6)
combined_reward_1_above_thresh = rewards.CombineRewards(
reward_preprocessors=[reward_100, reward_0],
combination_strategy=staged_combination)
reward_100 = _TestReward(reward_value=100.)
reward_001 = _TestReward(reward_value=0.001)
staged_combination = rewards.StagedWithActiveThreshold(threshold=1e-6)
combined_reward_2_above_thresh = rewards.CombineRewards(
reward_preprocessors=[reward_100, reward_001],
combination_strategy=staged_combination)
combined_reward_1_above_thresh.setup_io_spec(self._input_spec)
combined_reward_2_above_thresh.setup_io_spec(self._input_spec)
output_timestep_1_above_thresh = combined_reward_1_above_thresh.process(
self._input_timestep)
output_timestep_2_above_thresh = combined_reward_2_above_thresh.process(
self._input_timestep)
self.assertLess(output_timestep_1_above_thresh.reward,
output_timestep_2_above_thresh.reward)
@parameterized.named_parameters(
# Count 1, value 1.
('_contiguous', (1., 1., 0.5, 0.8, 0.), 0.9, 0.4),
# Count 3 despite 0.5 among them; value 0.8.
('_count_below_thresh', (1., 1., 0.5, 0.8, 0.), 0.7, 0.76),
# Count 3; value 1, NOT 100.
('_clip_final', (1., 1., 0.5, 100, 0.), 0.7, 0.8),
# Count 3 despite 500 among them.
('_clip_mid', (1., 1., 500, 0.8, 0.), 0.7, 0.76),
)
def test_staged_active_clipping(self, term_rewards, threshold,
expected_reward):
"""Terms are clipped if too large."""
reward_preprocessors = [_TestReward(reward_value=r) for r in term_rewards]
staged_combination = rewards.StagedWithActiveThreshold(threshold=threshold)
combined_reward = rewards.CombineRewards(
reward_preprocessors=reward_preprocessors,
combination_strategy=staged_combination)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertAlmostEqual(expected_reward, output_timestep.reward)
@parameterized.named_parameters(
# Should not affect the last stage, even if above thresh.
('_singleton_above_thresh', (0.92,), 0.9, 0.92, True),
# Should not affect the last stage, even if below thresh.
('_singleton_below_thresh', (0.82,), 0.9, 0.82, True),
# First two tasks are solved so we work on third. `assume` flag irrelevant
('_monotonic_cumululative', (0.92, 0.91, 0.1), 0.9, 0.7, True),
# First two tasks are solved so we work on third. `assume` flag irrelevant
('_monotonic_not_cumululative', (0.92, 0.91, 0.1), 0.9, 0.7, False),
# Second task is solved so we assume first is too and work on third.
('_not_monotonic_cumululative', (0.6, 0.91, 0.1), 0.9, 0.7, True),
# Second task is solved but first isn't so we work on that.
('_not_monotonic_not_cumululative', (0.6, 0.91, 0.1), 0.9, 0.2, False),
# Nothing is solved so we work on the first task.
('_none_solved', (0.6, 0.2, 0.6), 0.9, 0.2, False),
)
def test_staged_success(self, term_rewards, threshold, expected_reward,
assume_cumulative_success):
"""Terms are clipped if too large."""
reward_preprocessors = [_TestReward(reward_value=r) for r in term_rewards]
staged_combination = rewards.StagedWithSuccessThreshold(
threshold=threshold,
assume_cumulative_success=assume_cumulative_success)
combined_reward = rewards.CombineRewards(
reward_preprocessors=reward_preprocessors,
combination_strategy=staged_combination)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertAlmostEqual(expected_reward, output_timestep.reward)
class _TestReward(timestep_preprocessor.TimestepPreprocessor):
def __init__(self, reward_value: Union[float, int, np.ndarray]):
super().__init__()
self._reward_value = reward_value
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
return timestep._replace(reward=self._reward_value)
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
if np.isscalar(self._reward_value):
self._reward_value = input_spec.reward_spec.dtype.type(self._reward_value)
else:
return input_spec.replace(reward_spec=specs.Array(
self._reward_value.shape, self._reward_value.dtype))
return input_spec
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/preprocessors/rewards_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for observations_transforms."""
import copy
from typing import Mapping, Optional, Type
from absl.testing import absltest
from absl.testing import parameterized
import cv2
import dm_env
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.preprocessors import observation_transforms
from dm_robotics.agentflow.preprocessors import timestep_preprocessor
from dm_robotics.transformations import transformations as tr
import numpy as np
_DEFAULT_TYPE = np.float64
def scalar_array_spec(name: str, dtype: Type[np.floating] = _DEFAULT_TYPE):
return specs.Array(shape=(), dtype=dtype, name=name)
@parameterized.parameters(
(observation_transforms.CastPreprocessor, float, float, float),
(observation_transforms.CastPreprocessor, np.float32, float,
float),
(observation_transforms.CastPreprocessor, np.float64, float,
float),
(observation_transforms.CastPreprocessor, float, np.float32,
np.float32),
(observation_transforms.CastPreprocessor, np.float32, np.float32,
np.float32),
(observation_transforms.CastPreprocessor, np.float64, np.float32,
np.float32),
(observation_transforms.CastPreprocessor, float, np.float64,
np.float64),
(observation_transforms.CastPreprocessor, np.float32, np.float64,
np.float64),
(observation_transforms.CastPreprocessor, np.float64, np.float64,
np.float64),
(observation_transforms.DowncastFloatPreprocessor, float, float,
float),
(observation_transforms.DowncastFloatPreprocessor, np.float32,
float, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float64,
float, float),
(observation_transforms.DowncastFloatPreprocessor, float,
np.float32, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float32,
np.float32, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float64,
np.float32, np.float32),
(observation_transforms.DowncastFloatPreprocessor, float,
np.float64, float),
(observation_transforms.DowncastFloatPreprocessor, np.float32,
np.float64, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float64,
np.float64, np.float64),
(observation_transforms.DowncastFloatPreprocessor, np.float128,
np.float64, np.float64),
# Non-floating point types should not be interefered with.
(observation_transforms.DowncastFloatPreprocessor, np.int32,
np.float64, np.int32),
)
class CastAndDowncastPreprocessorTest(absltest.TestCase):
def testCastPreprocessor_Array(
self, processor_type: timestep_preprocessor.TimestepPreprocessor,
src_type: Type[np.number], transform_type: Type[np.number],
expected_type: Type[np.number]):
# Arrange:
name = testing_functions.random_string(3)
processor = processor_type(transform_type)
input_observation_spec = {
name: specs.Array(shape=(2,), dtype=src_type, name=name),
}
expected_observation_spec = {
name: specs.Array(shape=(2,), dtype=expected_type, name=name),
}
input_reward_spec = scalar_array_spec(dtype=src_type,
name='reward')
expected_reward_spec = scalar_array_spec(dtype=expected_type,
name='reward')
input_discount_spec = scalar_array_spec(dtype=src_type,
name='discount')
expected_discount_spec = scalar_array_spec(dtype=expected_type,
name='discount')
input_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=input_observation_spec,
reward_spec=input_reward_spec,
discount_spec=input_discount_spec)
input_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=src_type(0.1),
discount=src_type(0.2),
observation={name: np.asarray([0.3, 0.4], dtype=src_type)},
pterm=0.1,
result=None)
# Act:
spec_utils.validate_timestep(input_timestep_spec, input_timestep)
output_timestep_spec = processor.setup_io_spec(input_timestep_spec)
# Assert:
expected_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=input_timestep.step_type,
reward=expected_type(0.1),
discount=expected_type(0.2),
observation={name: np.asarray([0.3, 0.4], dtype=expected_type)},
pterm=input_timestep.pterm,
result=None)
self.assertEqual(output_timestep_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_timestep_spec.reward_spec, expected_reward_spec)
self.assertEqual(output_timestep_spec.discount_spec, expected_discount_spec)
output_timestep = processor.process(input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(output_timestep.observation[name],
expected_timestep.observation[name])
np.testing.assert_almost_equal(output_timestep.reward,
expected_timestep.reward)
np.testing.assert_almost_equal(output_timestep.discount,
expected_timestep.discount)
def testCastPreprocessor_BoundedArray(
self, processor_type: timestep_preprocessor.TimestepPreprocessor,
src_type: Type[np.number], transform_type: Type[np.number],
expected_type: Type[np.number]):
"""Same as previous test, but using BoundedArray specs."""
# Arrange:
name = testing_functions.random_string(3)
processor = processor_type(transform_type)
input_minimum = np.asarray([0.3, 0.4], dtype=src_type)
input_maximum = np.asarray([0.5, 0.6], dtype=src_type)
input_observation_spec = {
name:
specs.BoundedArray(
shape=(2,),
dtype=src_type,
minimum=input_minimum,
maximum=input_maximum,
name=name),
}
input_reward_spec = scalar_array_spec(name='reward', dtype=src_type)
input_discount_spec = scalar_array_spec(name='discount', dtype=src_type)
input_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=input_observation_spec,
reward_spec=input_reward_spec,
discount_spec=input_discount_spec)
input_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=src_type(0.1),
discount=src_type(0.2),
observation={name: np.asarray([0.4, 0.5], dtype=src_type)},
pterm=0.1,
result=None)
# Act:
spec_utils.validate_timestep(input_timestep_spec, input_timestep)
output_timestep_spec = processor.setup_io_spec(input_timestep_spec)
# Assert:
expected_minimum = np.asarray([0.3, 0.4], dtype=expected_type)
expected_maximum = np.asarray([0.5, 0.6], dtype=expected_type)
expected_output_observation_spec = {
name:
specs.BoundedArray(
shape=(2,),
dtype=expected_type,
minimum=expected_minimum,
maximum=expected_maximum,
name=name),
}
expected_output_reward_spec = scalar_array_spec(
name='reward', dtype=expected_type)
expected_output_discount_spec = scalar_array_spec(
name='discount', dtype=expected_type)
expected_output_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=input_timestep.step_type,
reward=expected_type(0.1),
discount=expected_type(0.2),
observation={name: np.asarray([0.4, 0.5], dtype=expected_type)},
pterm=input_timestep.pterm,
result=None)
self.assertEqual(
set(output_timestep_spec.observation_spec.keys()),
set(expected_output_observation_spec.keys()))
spec_utils.verify_specs_equal_bounded(
output_timestep_spec.observation_spec[name],
expected_output_observation_spec[name])
self.assertEqual(output_timestep_spec.reward_spec,
expected_output_reward_spec)
self.assertEqual(output_timestep_spec.discount_spec,
expected_output_discount_spec)
output_timestep = processor.process(input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(output_timestep.observation[name],
expected_output_timestep.observation[name])
np.testing.assert_almost_equal(output_timestep.reward,
expected_output_timestep.reward)
np.testing.assert_almost_equal(output_timestep.discount,
expected_output_timestep.discount)
def testCastPreprocessor_RewardArray(
self, processor_type: timestep_preprocessor.TimestepPreprocessor,
src_type: Type[np.number], transform_type: Type[np.number],
expected_type: Type[np.number]):
# Arrange:
name = testing_functions.random_string(3)
processor = processor_type(transform_type)
input_observation_spec = {
name: specs.Array(shape=(2,), dtype=src_type, name=name),
}
expected_observation_spec = {
name: specs.Array(shape=(2,), dtype=expected_type, name=name),
}
input_reward_spec = specs.Array(shape=(3,), dtype=src_type,
name='reward')
expected_reward_spec = specs.Array(
shape=(3,), dtype=expected_type, name='reward')
input_discount_spec = scalar_array_spec(dtype=src_type,
name='discount')
expected_discount_spec = scalar_array_spec(dtype=expected_type,
name='discount')
input_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=input_observation_spec,
reward_spec=input_reward_spec,
discount_spec=input_discount_spec)
# Some test data that matches the src_type.
if np.issubdtype(src_type, np.floating):
numbers = (0.1, 0.2, 0.3, 0.4, 0.1)
elif np.issubdtype(src_type, np.integer):
numbers = (1, 2, 3, 4, 5)
else:
raise ValueError(
'Only ints and floats are currently supported.')
input_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=numbers[0] * np.ones(shape=(3,), dtype=src_type),
discount=src_type(numbers[1]),
observation={name: np.asarray(numbers[2:4], dtype=src_type)},
pterm=numbers[4],
result=None)
# Act:
spec_utils.validate_timestep(input_timestep_spec, input_timestep)
output_timestep_spec = processor.setup_io_spec(input_timestep_spec)
# Assert:
expected_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=input_timestep.step_type,
reward=numbers[0] * np.ones(shape=(3,), dtype=expected_type),
discount=expected_type(numbers[1]),
observation={name: np.asarray(numbers[2:4], dtype=expected_type)},
pterm=input_timestep.pterm,
result=None)
self.assertEqual(output_timestep_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_timestep_spec.reward_spec, expected_reward_spec)
self.assertEqual(output_timestep_spec.discount_spec, expected_discount_spec)
output_timestep = processor.process(input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(output_timestep.observation[name],
expected_timestep.observation[name])
np.testing.assert_almost_equal(output_timestep.reward,
expected_timestep.reward)
np.testing.assert_almost_equal(output_timestep.discount,
expected_timestep.discount)
class RenameObservationsTest(absltest.TestCase):
def test_rename_observations(self):
preprocessor = observation_transforms.RenameObservations(
obs_mapping={'foo': 'pow', 'faw': 'biz'})
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
input_obs = {'foo': [1., 2.], 'bar': [3., 4.], 'faw': [5., 6.]}
input_timestep = dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=_DEFAULT_TYPE(0.1),
discount=_DEFAULT_TYPE(0.8),
observation=input_obs)
# Setup expectations.
expected_output_spec = input_spec.replace(observation_spec={
'pow': specs.Array(shape=(2,), dtype=np.float64, name='pow'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'biz': specs.Array(shape=(2,), dtype=np.float64, name='biz'),
})
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
np.testing.assert_array_equal(output_timestep.observation['pow'], [1., 2.])
def test_failure_when_renaming_missing_observations(self):
preprocessor = observation_transforms.RenameObservations(
obs_mapping={'foo': 'pow', 'faw': 'biz'})
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
def test_failure_for_duplicate_rename_targets(self):
obs_mapping = {'foo': 'pow', 'bar': 'pow'}
# Initialization should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
observation_transforms.RenameObservations(obs_mapping)
def test_failure_for_conflicting_rename_targets(self):
# Create the spec and timestep.
preprocessor = observation_transforms.RenameObservations(
obs_mapping={'foo': 'pow', 'faw': 'bar'})
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
class MergeObservationsTest(absltest.TestCase):
def test_merge_observation(self):
preprocessor = observation_transforms.MergeObservations(
obs_to_merge=['foo', 'bar'], new_obs='baz')
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
input_obs = {'foo': [1., 2.], 'bar': [3., 4.], 'faw': [3., 4.]}
input_timestep = dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=_DEFAULT_TYPE(0.1),
discount=_DEFAULT_TYPE(0.8),
observation=input_obs)
# Setup expectations.
expected_output_spec = input_spec.replace(observation_spec={
'baz': specs.Array(shape=(4,), dtype=np.float64, name='baz'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw')
})
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
np.testing.assert_array_equal(output_timestep.observation['baz'],
[1., 2., 3., 4.])
def test_failure_when_merging_missing_observation(self):
preprocessor = observation_transforms.MergeObservations(
obs_to_merge=['foo', 'bar'], new_obs='baz')
# Generate the input spec
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo')}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
def test_failure_for_conflicting_new_name(self):
preprocessor = observation_transforms.MergeObservations(
obs_to_merge=['foo', 'bar'], new_obs='faw')
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
class CropImageObservationTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._input_obs_name = 'input_obs'
self._output_obs_name = 'output_obs'
# This has a shape of (4,5)
self._input_spec = testing_functions.random_array_spec(
shape=(4, 5, 3), dtype=float, name=self._input_obs_name)
self._input_observation_spec = {self._input_obs_name: self._input_spec}
self._input_obs_value = testing_functions.valid_value(self._input_spec)
self._input_timestep_spec = testing_functions.random_timestep_spec(
observation_spec=self._input_observation_spec)
self._input_timestep = testing_functions.random_timestep(
spec=self._input_timestep_spec,
observation={self._input_obs_name: self._input_obs_value})
spec_utils.validate_timestep(self._input_timestep_spec,
self._input_timestep)
def _get_expected_spec(self, value: np.ndarray):
return testing_functions.random_array_spec(
shape=value.shape, dtype=value.dtype, name=self._output_obs_name)
def testFullCrop(self):
"""Don't modify the input at all."""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=1.0,
crop_height_relative=1.0,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._input_spec.replace(name=self._output_obs_name),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation."""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.4,
crop_height_relative=0.75,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value[:3, :2]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testSquareCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation.
Leaving out the height parameter should default to a square crop.
"""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.4,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value[:2, :2]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testCropWithOffset(self):
"""Crop to the center of the observation."""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.6,
crop_height_relative=0.5,
x_offset_relative=0.5,
y_offset_relative=0.5)
expected_value = self._input_obs_value[1:3, 1:4]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testInvalidParams(self):
"""Ensure that invalid parameters cause Exceptions."""
# Zero width and height are invalid
with self.assertRaisesRegex(ValueError, 'zero'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.,
crop_height_relative=0.,
x_offset_relative=0.,
y_offset_relative=0.)
# Negative width is invalid
with self.assertRaisesRegex(ValueError, 'width must be between'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=-1.,
crop_height_relative=1.,
x_offset_relative=0.,
y_offset_relative=0.)
# Height > 1.0 is invalid
with self.assertRaisesRegex(ValueError, 'height must be between'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=1.,
crop_height_relative=1.5,
x_offset_relative=0.,
y_offset_relative=0.)
# Offset > 1.0 is invalid
with self.assertRaisesRegex(ValueError, 'offset must be between'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.6,
crop_height_relative=1.,
x_offset_relative=1.5,
y_offset_relative=0.)
class CropSquareAndResizeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._input_obs_name = 'input_obs'
self._output_obs_name = 'output_obs'
# This has a shape of (4,5)
self._input_spec = testing_functions.random_array_spec(
shape=(4, 5), dtype=float, name=self._input_obs_name)
self._input_observation_spec = {self._input_obs_name: self._input_spec}
self._input_obs_value = testing_functions.valid_value(self._input_spec)
self._input_timestep_spec = testing_functions.random_timestep_spec(
observation_spec=self._input_observation_spec)
self._input_timestep = testing_functions.random_timestep(
spec=self._input_timestep_spec,
observation={self._input_obs_name: self._input_obs_value})
spec_utils.validate_timestep(self._input_timestep_spec,
self._input_timestep)
def _get_expected_spec(self, value: np.ndarray):
return testing_functions.random_array_spec(
shape=value.shape, dtype=value.dtype, name=self._output_obs_name)
def testCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation."""
processor = observation_transforms.CropSquareAndResize(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.8,
side_length_pixels=4,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value[:4, :4]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testScaledCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation."""
processor = observation_transforms.CropSquareAndResize(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.8,
side_length_pixels=8,
x_offset_relative=0.0,
y_offset_relative=0.0,
interpolation=cv2.INTER_NEAREST)
# Nearest neighbor sampling should just duplicate the original pixels
expected_value = np.repeat(
np.repeat(self._input_obs_value[:4, :4], 2, axis=0), 2, axis=1)
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
class PoseRelativeTest(absltest.TestCase):
def _check_spec_float_unchanged(self, dtype):
preprocessor = observation_transforms.PoseRelativeToEpisodeStart(
pos_obs_name='pos', quat_obs_name='quat')
# Generate the input spec and input timestep
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=dtype, name='pos'),
'quat': specs.Array(shape=(4,), dtype=dtype, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
first_input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST)
# Setup expectations.
expected_output_spec = input_spec
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(first_input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
def test_spec_float32_unchanged(self):
self._check_spec_float_unchanged(dtype=np.float32)
def test_spec_float64_unchanged(self):
self._check_spec_float_unchanged(dtype=np.float64)
def test_initial_observations(self):
preprocessor = observation_transforms.PoseRelativeToEpisodeStart(
pos_obs_name='pos', quat_obs_name='quat')
# Generate the input spec and input timestep
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float64, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float64, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': [1.0, -1.5, 3.2],
'quat': tr.euler_to_quat([0.1, 0.2, 0.3])
}
first_input_timestep = testing_functions.random_timestep(
spec=input_spec, step_type=dm_env.StepType.FIRST, observation=input_obs)
# Setup expectations.
expected_output_spec = input_spec
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(first_input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
output_pos = output_timestep.observation['pos']
np.testing.assert_array_almost_equal(output_pos, [0., 0., 0.])
output_euler = tr.quat_to_euler(output_timestep.observation['quat'])
np.testing.assert_array_almost_equal(output_euler, [0., 0., 0.])
def test_relative_observations(self):
preprocessor = observation_transforms.PoseRelativeToEpisodeStart(
pos_obs_name='pos', quat_obs_name='quat')
# Generate the input spec and input timestep
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float64, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float64, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': np.array([1.0, -1.5, 3.2]),
'quat': tr.euler_to_quat([0.0, 0.0, 0.0])
}
first_input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST,
observation=input_obs)
preprocessor.setup_io_spec(input_spec)
preprocessor.process(first_input_timestep)
pos_offset = np.array([0.1, -0.2, -0.3])
input_obs = {
'pos': (input_obs['pos'] + pos_offset),
'quat': tr.euler_to_quat([0.2, 0.0, 0.0])
}
second_input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.MID,
observation=input_obs)
output_timestep = preprocessor.process(second_input_timestep)
output_pos = output_timestep.observation['pos']
np.testing.assert_array_almost_equal(output_pos, pos_offset)
output_euler = tr.quat_to_euler(output_timestep.observation['quat'])
np.testing.assert_array_almost_equal(output_euler, [0.2, 0., 0.])
class StackObservationsTest(parameterized.TestCase):
@parameterized.parameters(
(False, (4,), (12,)),
(True, (4,), (3, 4)),
(False, (1,), (3,)),
(True, (1,), (3, 1)),
(False, (4, 4), (12, 4)),
(True, (4, 4), (3, 4, 4)),
)
def test_stack_observations_spec(
self, add_leading_dim, input_shape, output_shape):
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
}
input_spec = _build_unit_timestep_spec(
observation_spec=input_obs_spec)
# Generate the expected stacked output spec.
expected_output_obs_spec = {
'pos': specs.Array(shape=output_shape, dtype=np.float32, name='pos'),
}
expected_output_spec = _build_unit_timestep_spec(
observation_spec=expected_output_obs_spec)
preprocessor = observation_transforms.StackObservations(
obs_to_stack=['pos'],
stack_depth=3,
add_leading_dim=add_leading_dim)
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(expected_output_spec, output_spec)
@parameterized.parameters(
(False, (4,), (12,)),
(True, (4,), (3, 4)),
(False, (1,), (3,)),
(True, (1,), (3, 1)),
(False, (4, 4), (12, 4)),
(True, (4, 4), (3, 4, 4)),
)
def test_stack_observations(self, add_leading_dim, input_shape, output_shape):
# Generate the input spec.
input_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
}
input_spec = _build_unit_timestep_spec(
observation_spec=input_obs_spec)
preprocessor = observation_transforms.StackObservations(
obs_to_stack=['pos'],
stack_depth=3,
add_leading_dim=add_leading_dim)
preprocessor.setup_io_spec(input_spec)
input_pos = np.random.random(input_shape).astype(np.float32)
if add_leading_dim:
expected_output_pos = np.stack([input_pos for _ in range(3)], axis=0)
else:
expected_output_pos = np.concatenate(
[input_pos for _ in range(3)], axis=0)
input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST,
observation={'pos': input_pos,})
output_timestep = preprocessor.process(input_timestep)
output_pos = output_timestep.observation['pos']
np.testing.assert_allclose(expected_output_pos, output_pos)
np.testing.assert_allclose(expected_output_pos.shape, output_shape)
@parameterized.parameters(
(False, (4,), (12,)),
(True, (4,), (3, 4)),
(False, (1,), (3,)),
(True, (1,), (3, 1)),
(False, (4, 4), (12, 4)),
(True, (4, 4), (3, 4, 4)),
)
def test_add_stack_observations_spec(
self, add_leading_dim, input_shape, output_shape):
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
}
input_spec = _build_unit_timestep_spec(
observation_spec=input_obs_spec)
# Generate the expected stacked output spec.
expected_output_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
'stacked_pos': specs.Array(
shape=output_shape, dtype=np.float32, name='pos'),
}
expected_output_spec = _build_unit_timestep_spec(
observation_spec=expected_output_obs_spec)
preprocessor = observation_transforms.StackObservations(
obs_to_stack=['pos'],
stack_depth=3,
add_leading_dim=add_leading_dim,
override_obs=False)
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(expected_output_spec, output_spec)
@parameterized.parameters(
(False, (4,), (12,)),
(True, (4,), (3, 4)),
(False, (1,), (3,)),
(True, (1,), (3, 1)),
(False, (4, 4), (12, 4)),
(True, (4, 4), (3, 4, 4)),
)
def test_add_stack_observations(self,
add_leading_dim, input_shape, output_shape):
# Generate the input spec.
input_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
}
input_spec = _build_unit_timestep_spec(
observation_spec=input_obs_spec)
preprocessor = observation_transforms.StackObservations(
obs_to_stack=['pos'],
stack_depth=3,
add_leading_dim=add_leading_dim,
override_obs=False)
preprocessor.setup_io_spec(input_spec)
input_pos = np.random.random(input_shape).astype(np.float32)
if add_leading_dim:
expected_output_pos = np.stack([input_pos for _ in range(3)], axis=0)
else:
expected_output_pos = np.concatenate(
[input_pos for _ in range(3)], axis=0)
input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST,
observation={'pos': input_pos,})
output_timestep = preprocessor.process(input_timestep)
output_stacked_pos = output_timestep.observation['stacked_pos']
output_pos = output_timestep.observation['pos']
np.testing.assert_allclose(expected_output_pos, output_stacked_pos)
np.testing.assert_allclose(input_pos, output_pos)
np.testing.assert_allclose(expected_output_pos.shape, output_shape)
class AddObservationTest(absltest.TestCase):
def test_no_overwriting(self):
preprocessor = observation_transforms.AddObservation(
obs_name='pos',
obs_callable=lambda _: [1., 1., 1.])
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
error_msg = 'Observation pos already exists.'
with self.assertRaisesWithLiteralMatch(ValueError, error_msg):
preprocessor.setup_io_spec(input_spec)
def test_fail_to_run_obs_callable(self):
preprocessor = observation_transforms.AddObservation(
obs_name='new_obs',
obs_callable=lambda timestep: timestep.observation['not_exist'])
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
# The obs_callable is trying to use an observation named `not_exist` not
# present.
with self.assertRaisesRegex(KeyError, 'not_exist'):
preprocessor.setup_io_spec(input_spec)
def test_add_obs_correctly(self):
preprocessor = observation_transforms.AddObservation(
obs_name='new_obs',
obs_callable=lambda _: np.asarray([1., 1., 1.], dtype=np.float32))
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': np.array([1.0, -1.5, 3.2], dtype=np.float32),
'quat': np.asarray(tr.euler_to_quat([0.1, 0.2, 0.3]), dtype=np.float32)
}
input_timestep = testing_functions.random_timestep(
spec=input_spec, step_type=dm_env.StepType.MID, observation=input_obs)
# Setup the expected output specs.
expected_observation_spec = input_obs_spec.copy()
expected_observation_spec['new_obs'] = (
specs.Array(shape=[3,], dtype=np.float32, name='new_obs'))
expected_output_spec = copy.deepcopy(input_spec)
# Check the specs.
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_spec.reward_spec,
expected_output_spec.reward_spec)
self.assertEqual(output_spec.discount_spec,
expected_output_spec.discount_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
output_new_obs = output_timestep.observation['new_obs']
np.testing.assert_array_almost_equal(output_new_obs, [1., 1., 1.])
def test_add_obs_correctly_with_provided_specs(self):
new_obs_spec = specs.BoundedArray(
shape=(3,), dtype=np.int32, minimum=-1, maximum=3, name='new_obs')
preprocessor = observation_transforms.AddObservation(
obs_name='new_obs',
obs_callable=lambda _: np.array([1, 1, 1], dtype=np.int32),
obs_spec=new_obs_spec)
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': np.array([1.0, -1.5, 3.2], dtype=np.float32),
'quat': np.asarray(tr.euler_to_quat([0.1, 0.2, 0.3]), dtype=np.float32)
}
input_timestep = testing_functions.random_timestep(
spec=input_spec, step_type=dm_env.StepType.MID, observation=input_obs)
# Setup the expected specs.
expected_observation_spec = dict(input_obs_spec)
expected_observation_spec['new_obs'] = new_obs_spec
expected_output_spec = copy.deepcopy(input_spec)
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_spec.reward_spec,
expected_output_spec.reward_spec)
self.assertEqual(output_spec.discount_spec,
expected_output_spec.discount_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
output_new_obs = output_timestep.observation['new_obs']
np.testing.assert_array_almost_equal(output_new_obs, [1., 1., 1.])
def _build_unit_timestep_spec(
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.BoundedArray] = None):
if observation_spec is None:
name = 'foo'
observation_spec = {
name: specs.Array(shape=(2,), dtype=_DEFAULT_TYPE, name=name),
}
if reward_spec is None:
reward_spec = scalar_array_spec(name='reward')
if discount_spec is None:
discount_spec = scalar_array_spec(name='discount')
return spec_utils.TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/preprocessors/observation_transforms_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.logging.utils."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.agentflow.loggers import utils
import numpy as np
class UtilsTest(parameterized.TestCase):
@parameterized.named_parameters(
('simple', [0., 1., 2.], [1., 1., 0.], 0.9),
('random', np.random.rand(3), np.random.rand(3), np.random.rand(1)),
)
def test_compute_return(self, rewards, discounts, additional_discount):
actual_return = utils.compute_return(
rewards,
np.asarray(discounts) * additional_discount)
expected_return = (
rewards[0] + rewards[1] * discounts[0] * additional_discount +
rewards[2] * discounts[0] * discounts[1] * additional_discount**2)
np.testing.assert_almost_equal(actual_return, expected_return)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/loggers/utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A rudimentary logging class that outputs data as strings."""
from typing import Any, Callable, Mapping, Optional
import numpy as np
class PrintLogger:
"""Serializes logging vales to strings and prints them."""
def __init__(
self,
print_fn: Callable[[str], None] = print,
serialize_fn: Optional[Callable[[Mapping[str, Any]], str]] = None,
):
"""Initializes the logger.
Args:
print_fn: function to call which acts like print.
serialize_fn: function to call which formats a values dict.
"""
self._print_fn = print_fn
self._serialize_fn = serialize_fn or _serialize
def write(self, values: Mapping[str, Any]):
self._print_fn(self._serialize_fn(values))
def _format_value(value: Any) -> str:
if isinstance(value, (float, np.number)):
return f'{value:0.3f}'
return str(value)
def _serialize(values: Mapping[str, Any]) -> str:
return ', '.join(
f'{k} = {_format_value(v)}' for k, v in sorted(values.items()))
| dm_robotics-main | py/agentflow/loggers/print_logger.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.logging.subtask_logger."""
import json
from typing import Any, List, Mapping
from absl.testing import absltest
import dm_env
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.loggers import subtask_logger
from dm_robotics.agentflow.loggers import utils
import numpy as np
class SubtaskLoggerTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._parent_timestep_spec = testing_functions.random_timestep_spec(
reward_spec=testing_functions.random_reward_spec(dtype=np.float64),
discount_spec=testing_functions.random_discount_spec(dtype=np.float64))
self._agent_timestep_spec = testing_functions.random_timestep_spec(
reward_spec=testing_functions.random_reward_spec(dtype=np.float64),
discount_spec=testing_functions.random_discount_spec(dtype=np.float64))
def _step_through_sequence(self, observer: subtask_logger.SubTaskLogger,
rewards: List[float], discounts: List[float],
step_type: dm_env.StepType):
for reward, discount in zip(rewards, discounts):
dummy_parent_timestep = testing_functions.random_timestep(
self._parent_timestep_spec)
agent_timestep = testing_functions.random_timestep(
self._agent_timestep_spec,
step_type=step_type,
reward=reward,
discount=discount)
dummy_parent_action = testing_functions.random_action()
dummy_agent_action = testing_functions.random_action()
observer.step(dummy_parent_timestep, dummy_parent_action,
agent_timestep, dummy_agent_action)
def test_episode_return_logger(self):
additional_discount = 0.8
episode_len = 3
aggregator = subtask_logger.EpisodeReturnAggregator(additional_discount)
logger = FakeLogger()
observer = subtask_logger.SubTaskLogger(logger, aggregator)
rewards = np.hstack(([0], np.random.rand(episode_len - 1)))
discounts = np.hstack(([1], np.random.rand(episode_len - 1)))
# Initialize; Shouldn't call logger.write until a LAST step is received.
# First timestep has no reward or discount by convention.
self._step_through_sequence(observer, rewards[:1], discounts[:1],
dm_env.StepType.FIRST)
self.assertEmpty(logger.logs())
# Run episode up to last step.
self._step_through_sequence(observer, rewards[1:-1], discounts[1:-1],
dm_env.StepType.MID)
# Shouldn't call logger.write until a LAST step is received.
self.assertEmpty(logger.logs())
# Last-step observer, should call logger.
self._step_through_sequence(observer, rewards[-1:], discounts[-1:],
dm_env.StepType.LAST)
expected_return = utils.compute_return(rewards,
discounts * additional_discount)
self.assertTrue(logger.is_logged(episode_return=expected_return,
episode_length=episode_len))
class FakeLogger:
def __init__(self):
super().__init__()
self._log = []
def write(self, values: Mapping[str, Any]):
self._log.append(json.dumps(values, sort_keys=True))
def logs(self):
return list(self._log)
def is_logged(self, **kwargs):
expected_entry = json.dumps(kwargs, sort_keys=True)
return expected_entry in self._log
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/loggers/subtask_logger_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Logger type."""
from typing import Any, Mapping
import typing_extensions
@typing_extensions.runtime
class Logger(typing_extensions.Protocol):
def write(self, data: Mapping[str, Any]) -> None:
pass
| dm_robotics-main | py/agentflow/loggers/types.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Utilities for subtask logging."""
from typing import Sequence
def compute_return(episode_rewards: Sequence[float],
episode_discounts: Sequence[float]) -> float:
"""Computes the return of an episode from a list of rewards and discounts."""
if len(episode_rewards) <= 0:
raise ValueError('Length of episode_rewards must be greater than zero.')
if len(episode_discounts) <= 0:
raise ValueError('Length of episode_discounts must be greater than zero.')
if len(episode_rewards) != len(episode_discounts):
raise ValueError('episode_rewards and episode_discounts must be same length'
' but are {episode_rewards} and {episode_discounts}')
episode_return = episode_rewards[0]
total_discount = episode_discounts[0]
for reward, discount in zip(episode_rewards[1:],
episode_discounts[1:]):
episode_return += reward * total_discount
total_discount *= discount
return episode_return
| dm_robotics-main | py/agentflow/loggers/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Module for logging subtasks.
"""
import abc
from typing import Any, List, Mapping, Optional
import dm_env
from dm_robotics.agentflow import subtask
from dm_robotics.agentflow.loggers import types
from dm_robotics.agentflow.loggers import utils
import numpy as np
class Aggregator(abc.ABC):
"""Base class for data-aggregators for SubTaskLogger.
An `Aggregator` handles the job of accumulating data to log from parent and
child timesteps & actions within a subtask.
"""
@abc.abstractmethod
def accumulate(self, parent_timestep: dm_env.TimeStep,
parent_action: np.ndarray, agent_timestep: dm_env.TimeStep,
agent_action: np.ndarray) -> Optional[Mapping[str, Any]]:
"""Step aggregator and optionally return a dict of information to log.
Args:
parent_timestep: The timestep passed to the SubTask by its parent.
parent_action: The action being returned to the parent. Typically an
exteneded or modified version of `agent_action`.
agent_timestep: The timestep this subtask passed to its agent. Typically a
reduced or modified version of `parent_timestep`.
agent_action: The action returned by the agent this step.
Returns:
A dictionary of information that can be passed to an acme logger. Can also
return None, which skips logging this step.
"""
pass
class EpisodeReturnAggregator(Aggregator):
"""An Aggregator that computes episode return and length when subtask ends."""
def __init__(self,
additional_discount: float = 1.,
return_name: str = 'episode_return',
length_name: str = 'episode_length'):
self._additional_discount = additional_discount
self._return_name = return_name
self._length_name = length_name
self._episode_rewards = [] # type: List[float]
self._episode_discounts = [] # type: List[float]
def accumulate(self, parent_timestep: dm_env.TimeStep,
parent_action: np.ndarray, agent_timestep: dm_env.TimeStep,
agent_action: np.ndarray) -> Optional[Mapping[str, Any]]:
if agent_timestep.first():
self._episode_rewards.clear()
self._episode_discounts.clear()
if agent_timestep.reward is None or agent_timestep.discount is None:
return # Some environments omit reward and discount on first step.
self._episode_rewards.append(agent_timestep.reward)
self._episode_discounts.append(agent_timestep.discount)
if agent_timestep.last():
return {
self._return_name: utils.compute_return(
self._episode_rewards,
np.array(self._episode_discounts) * self._additional_discount),
self._length_name: len(self._episode_rewards)
}
return
class SubTaskLogger(subtask.SubTaskObserver):
"""A subtask observer that logs agent performance to an Acme logger."""
def __init__(self, logger: types.Logger, aggregator: Aggregator):
"""Initialize SubTaskLogger."""
self._logger = logger
self._aggregator = aggregator
def step( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
self, parent_timestep: dm_env.TimeStep, parent_action: np.ndarray,
agent_timestep: dm_env.TimeStep, agent_action: np.ndarray) -> None:
# Fetch current data to log.
data = self._aggregator.accumulate(parent_timestep, agent_action,
agent_timestep, agent_action)
# Log the given results.
if data is not None:
self._logger.write(data)
| dm_robotics-main | py/agentflow/loggers/subtask_logger.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_robotics.agentflow.meta_options.bt.sequence."""
from typing import List, Tuple
from unittest import mock
from absl.testing import absltest
import dm_env
from dm_robotics.agentflow import core
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.meta_options.control_flow import cond
from dm_robotics.agentflow.meta_options.control_flow import sequence
from dm_robotics.agentflow.options import basic_options
_SUCCESS_RESULT = core.OptionResult(core.TerminationType.SUCCESS)
_FAILURE_RESULT = core.OptionResult(core.TerminationType.FAILURE)
_FIXED_ACTION = [0.1, 0.2, -0.3, 0.05]
def _make_simple_option():
fixed_action = _FIXED_ACTION
option = mock.MagicMock(spec=basic_options.FixedOp)
option.step.return_value = fixed_action
option.pterm.return_value = 1.0
option.result.return_value = _SUCCESS_RESULT
return option
class SequenceTest(absltest.TestCase):
"""Test case for Sequence Option."""
def _make_agent(
self, terminate_on_option_failure
) -> Tuple[sequence.Sequence, List[core.Option]]:
# create options
option_list = []
for _ in range(3):
option_list.append(_make_simple_option())
agent = sequence.Sequence(
option_list=option_list,
terminate_on_option_failure=terminate_on_option_failure,
name='TestSequence')
return agent, option_list
def test_basic(self):
"""Test that sets up a basic sequence and runs a few steps."""
agent, option_list = self._make_agent(terminate_on_option_failure=False)
# Ensure we can directly step an option before it is selected.
first_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.FIRST)
mid_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.MID)
last_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.LAST)
agent.step(first_timestep) # marks first option for termination.
self.assertTrue(agent._terminate_option)
# Select option and verify no option has been touched yet.
agent.on_selected(first_timestep)
self.assertIsNone(agent._current_option)
self.assertIsNone(agent._previous_option)
# Step first option.
agent.step(first_timestep)
self.assertTrue(agent._terminate_option) # marked for termination.
self.assertIsNone(agent._previous_option) # haven't advanced yet.
# Assert sequence isn't terminal yet.
self.assertEqual(agent.pterm(mid_timestep), 0.0)
# Step through second option.
option_list[1].pterm.return_value = 0.0 # make non-terminal
agent.step(mid_timestep) # now switches to option1.
# Assert we haven't advanced yet.
self.assertIs(option_list[0], agent._previous_option)
# Step again and assert we still haven't advanced
agent.step(mid_timestep)
self.assertIs(option_list[0], agent._previous_option)
# Make option terminal and assert we advance.
option_list[1].pterm.return_value = 1.0 # make non-terminal
agent.step(mid_timestep) # marks option1 for termination
self.assertTrue(agent._terminate_option)
# Assert sequence isn't terminal yet.
self.assertEqual(agent.pterm(mid_timestep), 0.0)
# Step through third option.
agent.step(mid_timestep) # transitions & steps option2.
self.assertEqual(agent.pterm(mid_timestep), 1.0) # immediately terminal.
self.assertTrue(agent._terminate_option) # wants to terminate option2.
# Assert we haven't transitioned yet.
self.assertIs(option_list[1], agent._previous_option)
self.assertIs(option_list[2], agent._current_option)
agent.step(last_timestep) # transitions to terminal state.
self.assertIs(option_list[2], agent._previous_option)
# Assert sequence is terminal.
self.assertEqual(agent.pterm(mid_timestep), 1.0)
def test_sequence_failure_on_option_failure(self):
"""Test that sets up a basic sequence and runs a few steps."""
agent, option_list = self._make_agent(terminate_on_option_failure=True)
# Select option and verify no option has been touched yet.
first_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.FIRST)
mid_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.MID)
last_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.LAST)
agent.on_selected(first_timestep)
self.assertIsNone(agent._current_option)
self.assertIsNone(agent._previous_option)
# Step first option (won't switch until next step).
agent.step(first_timestep)
# Assert sequence isn't terminal yet.
self.assertEqual(agent.pterm(first_timestep), 0.0)
# Step through second option.
option_list[1].pterm.return_value = 0.0 # make non-terminal
agent.step(mid_timestep)
# Assert we haven't advanced yet.
self.assertIs(option_list[0], agent._previous_option)
# Step again and assert we still haven't advanced
agent.step(mid_timestep)
self.assertIs(option_list[0], agent._previous_option)
# Make option terminal and FAIL and assert we advance.
option_list[1].pterm.return_value = 1.0 # make non-terminal
option_list[1].result.return_value = _FAILURE_RESULT # make option fail.
agent.step(mid_timestep) # option1 marked for termination.
self.assertTrue(agent._terminate_option) # option1 terminal
# Assert we haven't advanced yet.
self.assertIs(option_list[0], agent._previous_option)
self.assertIs(option_list[1], agent._current_option)
# Assert sequence is terminal and failure because the option failed.
self.assertEqual(agent.pterm(mid_timestep), 1.0)
# Pass another MID timestep and assert we haven't advanced yet.
agent.step(mid_timestep) # stays stuck on option1 b/c sequence_terminal
self.assertIs(option_list[0], agent._previous_option)
self.assertIs(option_list[1], agent._current_option)
# Pass a LAST timestep and assert still haven't advanced (sequence_terminal)
agent.step(last_timestep)
self.assertIs(option_list[1], agent._previous_option)
self.assertIs(option_list[2], agent._current_option) # won't be selected.
self.assertEqual(
agent.result(last_timestep).termination_reason,
core.TerminationType.FAILURE)
def test_nested_with_cond(self):
"""Test that composes a nested set of Sequence and Cond options."""
# State used to track which options are executed.
counter = 0
true_counter = 0
truth_test_counter = 0
def increment_counter(unused_timestep, unused_result):
nonlocal counter
counter += 1
def increment_true_counter(unused_timestep, unused_result):
nonlocal true_counter
true_counter += 1
def counter_is_even(unused_timestep, unused_result):
nonlocal counter
nonlocal truth_test_counter
truth_test_counter += 1
return counter % 2 == 0
# options that update the tracking state - these are the instrumented
# options.
inc_counter_op = basic_options.LambdaOption(
delegate=_make_simple_option(),
on_selected_func=increment_counter)
inc_true_counter_op = basic_options.LambdaOption(
delegate=_make_simple_option(),
on_selected_func=increment_true_counter)
# inner increments counter unconditionally, and
# increments true_counter on the condition's True branch.
option_list = [inc_counter_op,
cond.Cond(counter_is_even,
inc_true_counter_op,
_make_simple_option())]
inner = sequence.Sequence(option_list=option_list, name='inner')
# outer executes inner a few times.
# We could use a loop here to test all meta_options.
outer = sequence.Sequence(option_list=[inner] * 10, name='outer')
timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.FIRST)
outer.on_selected(timestep)
outer.step(timestep)
# outer.step -> inner.step -> inc_counter_op.step
# We should have incrementd the counter, but not checked the condition.
self.assertEqual(counter, 1)
self.assertEqual(truth_test_counter, 0)
self.assertEqual(true_counter, 0)
outer.step(testing_functions.random_timestep(step_type=dm_env.StepType.MID))
# outer.step -> inner.step -> cond.step ->
# counter_is_even (False) and the simple option
self.assertEqual(counter, 1) # on_selected, step(FIRST)
self.assertGreaterEqual(truth_test_counter, 1) # on_selected, step(FIRST)
truth_test_counter = 0 # Reset for future tests.
self.assertEqual(true_counter, 0)
outer.step(testing_functions.random_timestep(step_type=dm_env.StepType.MID))
# outer.step -> inner.step -> inc_counter_op.step
self.assertEqual(counter, 2)
self.assertGreaterEqual(truth_test_counter, 0)
self.assertEqual(true_counter, 0)
outer.step(testing_functions.random_timestep(step_type=dm_env.StepType.MID))
# outer.step -> inner.step -> cond.step ->
# counter_is_even (True) and the simple option
self.assertEqual(counter, 2)
self.assertGreaterEqual(truth_test_counter, 1) # on_selected, step(FIRST)
truth_test_counter = 0 # Reset for future tests.
self.assertEqual(true_counter, 1)
outer.step(testing_functions.random_timestep(step_type=dm_env.StepType.MID))
# outer.step -> inner.step -> inc_counter_op.step
self.assertEqual(counter, 3)
self.assertGreaterEqual(truth_test_counter, 0)
self.assertEqual(true_counter, 1)
def test_with_single_child(self):
"""Test that Sequence wrapping a single child can step and pterm."""
child_op = _make_simple_option()
sequence_op = sequence.Sequence([child_op])
# Verify pterm can be called before option is stepped.
# Not guaranteed to match pterm of child bc Sequence needs to be FIRST
# stepped before the child is activated.
timestep = testing_functions.random_timestep()
pterm = sequence_op.pterm(timestep)
self.assertEqual(pterm, 0.) # Should be zero before sequence is stepped.
# FIRST-step the sequence_op and verify the underlying op pterm gets through
first_timestep = testing_functions.random_timestep(
step_type=dm_env.StepType.FIRST)
sequence_op.step(first_timestep)
expected_pterm = child_op.pterm(first_timestep)
actual_pterm = sequence_op.pterm(first_timestep)
self.assertEqual(expected_pterm, actual_pterm)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/meta_options/control_flow/sequence_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Module for an agent to run options in a sequence."""
import typing
from typing import Any, List, Optional, Text, Tuple
import dm_env
from dm_robotics.agentflow import core
from dm_robotics.agentflow import util
from dm_robotics.agentflow.decorators import overrides
import numpy as np
OptionTimestep = typing.NewType("OptionTimestep", Any)
class Step(typing.NamedTuple):
option: core.Option
action: np.ndarray
result: core.OptionResult
class Sequence(core.MetaOption):
"""An Option that executes options in a sequence.
Sequence runs each Option from a list in order until one fails, or the final
one succeeds.
"""
def __init__(self,
option_list: List[core.Option],
terminate_on_option_failure: bool = False,
allow_stepping_after_terminal: bool = True,
name: Optional[Text] = None):
"""Initialize Sequence.
Args:
option_list: A list of Options to run.
terminate_on_option_failure: If True exits with a `FAILURE` code when any
option fails. If False it proceeds to the next option.
allow_stepping_after_terminal: If True, allows this option to be stepped
even after it has requested termination and received a LAST timestep.
This is required when the option is driven by a run-loop that doesn't
respect the option life-cycle (e.g. a standard run loop).
name: A name for this Option.
"""
super().__init__(name=name)
self._option_list = option_list
self._terminate_on_option_failure = terminate_on_option_failure
self._allow_stepping_after_terminal = allow_stepping_after_terminal
self._initialize()
def _initialize(self):
self._option_idx = 0
self._current_option = None # type: core.Option
self._current_option_result = core.OptionResult.success_result()
self._current_option_first_step = None # type: bool
self._previous_option = None # type: core.Option
self._previous_option_result = core.OptionResult.success_result()
self._terminate_option = False
self._terminate_sequence = False
self._logged_termination = False
def _make_current_option_previous(self):
self._previous_option = self._current_option
self._previous_option_result = self._current_option_result
self._current_option = None
self._current_option_result = core.OptionResult.success_result()
self._option_idx += 1
def _select_option(self, timestep: dm_env.TimeStep):
if self._option_idx >= len(self._option_list):
# If no options available just return; step() will handle last timestep.
return
self._current_option = self._option_list[self._option_idx]
self._current_option_result = core.OptionResult.success_result()
self._current_option_first_step = True
def _step_current_option(
self, timestep: dm_env.TimeStep) -> Tuple[np.ndarray, bool, bool]:
if self._current_option is None:
raise ValueError("No current option")
option_timestep = OptionTimestep(timestep)
if self._current_option_first_step:
option_timestep = option_timestep._replace(
step_type=dm_env.StepType.FIRST)
self._current_option.on_selected(option_timestep,
self._previous_option_result)
self._current_option_first_step = False
action = self._current_option.step(option_timestep)
pterm = self._current_option.pterm(option_timestep)
terminate_option = pterm > np.random.uniform()
terminate_sequence = False
if terminate_option:
self._current_option_result = self._current_option.result(option_timestep)
terminate_early = (
self._terminate_on_option_failure and
self._current_option_result.termination_reason ==
core.TerminationType.FAILURE)
if terminate_early:
# If the option failed and the user asked, terminate the whole sequence.
terminate_sequence = True
return action, terminate_option, terminate_sequence
def _step_completed(self) -> None:
if not self._terminate_option and self._current_option_first_step:
self._current_option_first_step = False
def _last_step_previous_option(
self, timestep: dm_env.TimeStep) -> np.ndarray:
prev_option = self._previous_option
if prev_option is None:
raise ValueError("Expected previous option is None.")
timestep = OptionTimestep(timestep)
termination_timestep = timestep._replace(step_type=dm_env.StepType.LAST)
action = prev_option.step(termination_timestep)
return action
def arg_spec(self):
return # Sequence cannot take runtime arguments.
def on_selected(self, timestep, prev_option_result=None):
self._initialize()
def step(self, timestep: dm_env.TimeStep) -> np.ndarray:
if timestep.first():
self._initialize() # in case `on_selected` not called (top-level agent).
self._select_option(timestep)
on_last_option = self._option_idx == len(self._option_list) - 1
block_termination = ((on_last_option or self._terminate_sequence) and
not timestep.last())
if self._terminate_option and not block_termination:
# Advance to next option iff parent respected pterm and sent LAST timestep
util.log_termination_reason(self._current_option,
self._current_option_result)
self._make_current_option_previous()
self._select_option(timestep)
action = self._last_step_previous_option(timestep)
self._terminate_option = False
if timestep.last(): # Done.
return action
if self._current_option is None:
if self._allow_stepping_after_terminal:
# Agentflow options will nominally pass only a single LAST step when
# options become terminal, but the top-level run loop may continue to
# drive with MID timesteps (which become LAST if the option is >1 level
# deep in the hierarchy). We can allow this by stepping the last child,
# if the user wishes not to treat as an error.
return self._last_step_previous_option(timestep)
raise ValueError(f"{str(self)} is terminal but was stepped. Is "
f"this agent in a non-agentflow run_loop?")
action, self._terminate_option, self._terminate_sequence = (
self._step_current_option(timestep))
self._step_completed()
return action
def pterm(self, timestep) -> float:
terminate_early = self._terminate_sequence
out_of_options = self._option_idx >= len(self._option_list)
on_last_option = self._option_idx == len(self._option_list) - 1
last_option_finished = False
if on_last_option:
if self._current_option is None:
return 0. # If not FIRST stepped should still be able to query pterm.
pterm = self._current_option.pterm(OptionTimestep(timestep))
last_option_finished = pterm > np.random.uniform()
if terminate_early or out_of_options or last_option_finished:
return 1.
return 0.
def result(self, unused_timestep) -> core.OptionResult:
return self._previous_option_result
def child_policies(self):
return self._option_list
@property
def terminate_on_option_failure(self) -> bool:
return self._terminate_on_option_failure
@overrides(core.Option)
def render_frame(self, canvas) -> None:
if self._current_option:
self._current_option.render_frame(canvas)
| dm_robotics-main | py/agentflow/meta_options/control_flow/sequence.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control-flow module."""
from dm_robotics.agentflow.meta_options.control_flow.cond import Cond
from dm_robotics.agentflow.meta_options.control_flow.loop_ops import Repeat
from dm_robotics.agentflow.meta_options.control_flow.loop_ops import While
from dm_robotics.agentflow.meta_options.control_flow.sequence import Sequence
| dm_robotics-main | py/agentflow/meta_options/control_flow/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.meta_options.control_flow.cond."""
from unittest import mock
from absl.testing import absltest
import dm_env
from dm_robotics.agentflow import core
from dm_robotics.agentflow.meta_options.control_flow import loop_ops
import numpy as np
class WhileOptionTest(absltest.TestCase):
def _timestep(self, first=False, last=False, observation=None):
step_type = dm_env.StepType.MID
if first:
step_type = dm_env.StepType.FIRST
if last:
step_type = dm_env.StepType.LAST
if first and last:
raise ValueError('FIRST and LAST not allowed')
reward = 0
discount = np.random.random()
observation = observation or {}
return dm_env.TimeStep(step_type, reward, discount, observation)
def assert_mock_step_type(self, mock_step, step_type):
self.assertEqual(mock_step.call_args[0][0].step_type, step_type)
def assert_delegate_just_restarted(self, option):
self.assertEqual(option.step.call_args_list[-2][0][0].step_type,
dm_env.StepType.LAST)
self.assertEqual(option.step.call_args_list[-1][0][0].step_type,
dm_env.StepType.FIRST)
def test_while_terminates_if_false(self):
# Test whether While terminates if cond is False.
option = mock.MagicMock(spec=core.Option)
option.result.return_value = core.OptionResult(core.TerminationType.SUCCESS)
first_timestep = self._timestep(first=True)
mid_timestep = self._timestep()
last_timestep = self._timestep(last=True)
result_success = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS,
data='discarded result')
cond = mock.MagicMock()
cond.return_value = True
option.pterm.return_value = 0.0
# If cond is true first step should go through.
while_option = loop_ops.While(cond, option, eval_every_step=True)
while_option.on_selected(first_timestep, result_success)
while_option.step(first_timestep)
option.step.assert_called_with(first_timestep)
self.assertEqual(while_option.pterm(first_timestep), 0.0)
# Regular step should go through.
while_option.step(mid_timestep)
option.step.assert_called_with(mid_timestep)
# If cond goes false should terminate and push last step to delegate.
cond.return_value = False
while_option.step(mid_timestep)
self.assertEqual(while_option.pterm(mid_timestep), 1.0)
self.assert_mock_step_type(option.step, dm_env.StepType.LAST)
cond.return_value = True
while_option.on_selected(first_timestep, result_success)
while_option.step(last_timestep)
option.step.assert_called_with(last_timestep)
def test_while_calls_cond_selectively(self):
# Test whether While terminates if cond is False.
option = mock.MagicMock(spec=core.Option)
first_timestep = self._timestep(first=True)
mid_timestep = self._timestep()
result_success = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS,
data='discarded result')
cond = mock.MagicMock()
cond.return_value = True
option.pterm.return_value = 0.0
# on_selected shouldn't call the cond.
while_option_default = loop_ops.While(cond, option, eval_every_step=True)
while_option_lazy = loop_ops.While(cond, option, eval_every_step=False)
while_option_default.on_selected(first_timestep, result_success)
while_option_lazy.on_selected(first_timestep, result_success)
cond.assert_not_called()
# Should only call on step if eval_every_step=True
while_option_lazy.step(first_timestep)
cond.assert_not_called()
while_option_default.step(first_timestep)
cond.assert_called_with(first_timestep)
# step should always call the cond if delegate terminates.
cond.reset_mock()
cond.return_value = True
option.pterm.return_value = 1.0
while_option_lazy.step(mid_timestep)
cond.assert_called_with(mid_timestep)
cond.reset_mock()
cond.return_value = True
while_option_default.step(mid_timestep)
cond.assert_called_with(mid_timestep)
def test_while_restarts_delegate(self):
# Test that While restarts option if cond is True and delegate requests term
option = mock.MagicMock(spec=core.Option)
first_timestep = self._timestep(first=True)
mid_timestep = self._timestep()
result_success = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS,
data='discarded result')
cond = mock.MagicMock()
cond.return_value = True
option.pterm.return_value = 0.0
option.result.return_value = core.OptionResult(core.TerminationType.SUCCESS)
# If cond is true first step should go through.
while_option = loop_ops.While(cond, option, eval_every_step=True)
while_option.on_selected(first_timestep, result_success)
while_option.step(first_timestep)
for _ in range(10):
while_option.step(mid_timestep)
option.step.assert_called_with(mid_timestep)
self.assertEqual(option.step.call_count, 11)
self.assertEqual(option.on_selected.call_count, 1)
# Terminate delegate and verify it's restarted properly.
option.pterm.return_value = 1.0
while_option.step(mid_timestep)
self.assertEqual(option.on_selected.call_count, 2)
self.assert_delegate_just_restarted(option)
# Verify that delegate sees last step if cond flips to false.
option.reset_mock()
option.pterm.return_value = 0.0
option.result.return_value = core.OptionResult(core.TerminationType.SUCCESS)
cond.return_value = False
while_option.step(mid_timestep)
self.assert_mock_step_type(option.step, dm_env.StepType.LAST)
def test_for(self):
option = mock.MagicMock(spec=core.Option)
first_timestep = self._timestep(first=True)
mid_timestep = self._timestep()
result_success = core.OptionResult(
termination_reason=core.TerminationType.SUCCESS,
data='discarded result')
cond = mock.MagicMock()
cond.return_value = True
option.pterm.return_value = 0.0
# Run first iteration of delegate.
for_option = loop_ops.Repeat(3, option)
for_option.on_selected(first_timestep, result_success)
for_option.step(first_timestep)
for _ in range(10):
for_option.step(mid_timestep)
self.assertEqual(for_option.delegate_episode_ctr, 0)
# Verify it resets and increments if delegate terminates.
option.pterm.return_value = 1.0
for_option.step(mid_timestep)
self.assert_delegate_just_restarted(option)
self.assertEqual(for_option.delegate_episode_ctr, 1)
# Verify it doesn't increment while stepping delegate on 2nd iteration.
option.pterm.return_value = 0.0
for _ in range(10):
for_option.step(mid_timestep)
self.assertEqual(for_option.delegate_episode_ctr, 1)
# Verify it resets and increments if delegate terminates.
option.pterm.return_value = 1.0
for_option.step(mid_timestep)
self.assert_delegate_just_restarted(option)
self.assertEqual(for_option.delegate_episode_ctr, 2)
# Verify we can run 3rd iteration.
option.pterm.return_value = 0.0
for _ in range(10):
for_option.step(mid_timestep)
self.assertEqual(for_option.delegate_episode_ctr, 2)
self.assertEqual(for_option.pterm(mid_timestep), 0.0)
# Verify that loop terminates when this iteration ends.
option.pterm.return_value = 1.0 # delegate asks for termination
for_option.step(mid_timestep) # delegate gets terminated
# Assert delegate is terminated without restarting.
self.assert_mock_step_type(option.step, dm_env.StepType.LAST)
# Assert loop requests termination.
self.assertEqual(for_option.pterm(mid_timestep), 1.0)
if __name__ == '__main__':
absltest.main()
| dm_robotics-main | py/agentflow/meta_options/control_flow/loop_ops_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.