python_code
stringlengths
0
780k
repo_name
stringlengths
7
38
file_path
stringlengths
5
103
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for arm_effector.""" from absl.testing import absltest from dm_control import mjcf from dm_robotics.moma.effectors import arm_effector from dm_robotics.moma.models.robots.robot_arms import sawyer import numpy as np class ArmEffectorTest(absltest.TestCase): def setUp(self): super().setUp() self._arm = sawyer.Sawyer(with_pedestal=False) self._physics = mjcf.Physics.from_mjcf_model(self._arm.mjcf_model) def test_setting_control(self): effector = arm_effector.ArmEffector( arm=self._arm, action_range_override=None, robot_name='sawyer') joint_command = np.ones(7, dtype=np.float32) * 0.02 effector.set_control(self._physics, joint_command) np.testing.assert_allclose( self._physics.bind(self._arm.actuators).ctrl, joint_command) def test_action_range_override_affects_action_spec(self): effector = arm_effector.ArmEffector( arm=self._arm, action_range_override=[(-0.1, 0.1)], robot_name='sawyer') action_spec = effector.action_spec(self._physics) np.testing.assert_allclose(action_spec.minimum, np.ones(7) * -0.1) np.testing.assert_allclose(action_spec.maximum, np.ones(7) * 0.1) if __name__ == '__main__': absltest.main()
dm_robotics-main
py/moma/effectors/arm_effector_test.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Standard effector for arms in sim.""" from typing import List, Optional, Tuple from dm_control import mjcf # type: ignore from dm_env import specs from dm_robotics.moma import effector from dm_robotics.moma.effectors import mujoco_actuation from dm_robotics.moma.models.robots.robot_arms import robot_arm import numpy as np class ArmEffector(effector.Effector): """An effector interface for a robot arm.""" def __init__(self, arm: robot_arm.RobotArm, action_range_override: Optional[List[Tuple[float, float]]], robot_name: str): """Constructor. Args: arm: The MoMa robot arm that we want to control. action_range_override: Optional argument to change the control range of the robot. robot_name: Name of the robot, used to identify the effector in case multiple arms are used. """ self._arm = arm self._effector_prefix = f'{robot_name}_arm_joint' self._mujoco_effector = mujoco_actuation.MujocoEffector( self._arm.actuators, self._effector_prefix, action_range_override ) def initialize_episode(self, physics, random_state) -> None: pass def action_spec(self, physics: mjcf.Physics) -> specs.BoundedArray: return self._mujoco_effector.action_spec(physics) def set_control(self, physics: mjcf.Physics, command: np.ndarray) -> None: self._mujoco_effector.set_control(physics, command) @property def prefix(self) -> str: return self._mujoco_effector.prefix
dm_robotics-main
py/moma/effectors/arm_effector.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Effectors that constrain actions to joint limits or Cartesian bounds.""" from typing import Callable, Optional, TypeVar, Generic from dm_control import mjcf from dm_env import specs from dm_robotics.moma import effector from dm_robotics.moma.models.robots.robot_arms import robot_arm import numpy as np # The state which we check in order to determine whether to constrain certain # DOFs. _State = np.ndarray # The limits (upper or lower) of the state. _StateLimits = np.ndarray # The command which will be modified based on the state, if the state goes # outside the bounds. _Command = np.ndarray # Callable that returns the state indices that are NOT valid, i.e. which DOFs # are outside the limits. _StateValidityChecker = Callable[[_State, _StateLimits, _Command], np.ndarray] T = TypeVar('T', bound=effector.Effector) class ConstrainedActionEffector(effector.Effector, Generic[T]): """Effector wrapper that limits certain DOFs based on their state. For instance, if you want to limit a joint torque command based on whether certain joints are close to their velocity limits, you may use this effector like so: ``` my_safe_effector = ConstrainedActionEffector( delegate=my_raw_joint_torque_effector, min_limits=min_arm_joint_velocities, max_limits=max_arm_joint_velocities, state_getter=lambda physics: physics.bind(arm.joints).qvel) ``` Any command DOFs whose corresponding state surpasses the provided limits will be set to 0. """ def __init__( self, delegate: T, min_limits: np.ndarray, max_limits: np.ndarray, state_getter: Callable[[mjcf.Physics], np.ndarray], min_state_checker: Optional[_StateValidityChecker] = None, max_state_checker: Optional[_StateValidityChecker] = None): """Constructor for ConstrainedActionEffector. Args: delegate: Underlying effector which actually actuates the command. min_limits: The lower limits of the state of whatever is being actuated. If the state goes below this limit, the command gets set to 0. For instance, if the delegate is a joint velocity effector, and the state is the joint positions, if the 3rd joint position is below the 3rd limit, then the 3rd action will be set to 0. max_limits: The upper limits of the state of whatever is being actuated. See `min_limits` description for how these limits are used. state_getter: Callable that takes a physics object and returns the relevant "state" of the actuated entity. The limits will be applied to this state. When the state falls outside the bounds of the limits, the commanded action will be set to 0. min_state_checker: Optional callable that takes the state as returned by `state_getter`, the `min_limits`, and the input command to the effector, and determines which controllable DOFs are not valid. Returns a boolean np.ndarray mask that has the same shape as the input command. `True` DOFs in the mask are set to 0. If not provided, this defaults to a simple min bounds check. max_state_checker: Optional callable that takes the state as returned by `state_getter`, the `max_limits`, and the input command to the effector, and determines which controllable DOFs are not valid. Returns a boolean np.ndarray that has the same shape as the input command. `True` DOFs in the mask are set to 0. If not provided, this defaults to a simple max bounds check. """ if min_limits.shape != max_limits.shape: raise ValueError('The min and max limits must have the same shape. ' f'Min: {min_limits.shape}, max: {max_limits.shape}.') self._delegate = delegate self._min_limits = min_limits self._max_limits = max_limits self._get_state = state_getter self._min_state_checker = (min_state_checker or self._default_min_state_checker) self._max_state_checker = (max_state_checker or self._default_max_state_checker) def after_compile(self, mjcf_model: mjcf.RootElement, physics: mjcf.Physics) -> None: self._delegate.after_compile(mjcf_model, physics) def initialize_episode(self, physics, random_state) -> None: self._delegate.initialize_episode(physics, random_state) def action_spec(self, physics: mjcf.Physics) -> specs.BoundedArray: # Make sure that the delegate effector and limits are compatible. if self._delegate.action_spec(physics).shape != self._min_limits.shape: raise ValueError('The delegate effector action spec and the provided ' 'limits have different shapes. Delegate action spec: ' f'{self._delegate.action_spec(physics)}. Limits shape: ' f'{self._min_limits.shape}') return self._delegate.action_spec(physics) def set_control(self, physics: mjcf.Physics, command: np.ndarray) -> None: constrained_action = self._get_contstrained_action(physics, command) self._delegate.set_control(physics, constrained_action) def _get_contstrained_action( self, physics: mjcf.Physics, command: np.ndarray) -> np.ndarray: # Limit any DOFs whose state falls outside the provided limits. constrained_action = command[:] state = self._get_state(physics) constrained_action[ self._min_state_checker(state, self._min_limits, command)] = 0. constrained_action[ self._max_state_checker(state, self._max_limits, command)] = 0. return constrained_action @property def delegate(self) -> T: return self._delegate @property def prefix(self) -> str: return self._delegate.prefix def _default_min_state_checker( self, state: np.ndarray, limits: np.ndarray, command: np.ndarray ) -> np.ndarray: """Returns a bool mask for `command` for which DOFs are invalid.""" return (state < limits) & (command < 0.) def _default_max_state_checker( self, state: np.ndarray, limits: np.ndarray, command: np.ndarray ) -> np.ndarray: """Returns a bool mask for `command` for which DOFs are invalid.""" return (state > limits) & (command > 0.) class LimitJointPositions(ConstrainedActionEffector): """Limits joint actions to stay within a safe joint position range. The current implementation assumes all joints are controllable. NOTE: Do NOT use this effector with a joint position effector. This is meant to be used with joint velocity, torque, etc. effectors. """ def __init__(self, joint_effector: effector.Effector, min_joint_limits: np.ndarray, max_joint_limits: np.ndarray, arm: robot_arm.RobotArm): if len(min_joint_limits) != len(arm.joints): raise ValueError('The joint limits must match the number of joints. ' f'Length of joint limits: {len(min_joint_limits)}. ' f'Number of joints: {len(arm.joints)}') super().__init__( delegate=joint_effector, min_limits=min_joint_limits, max_limits=max_joint_limits, state_getter=lambda physics: physics.bind(arm.joints).qpos)
dm_robotics-main
py/moma/effectors/constrained_actions_effectors.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for min_max_effector.""" from absl.testing import absltest from absl.testing import parameterized from dm_robotics.moma.effectors import min_max_effector from dm_robotics.moma.effectors import test_utils import numpy as np class MinMaxEffectorTest(parameterized.TestCase): def test_min_max_effector_sends_correct_command(self): spy_effector = test_utils.SpyEffector(dofs=3) min_action = np.array([-0.9, -0.5, -0.2]) max_action = np.array([0.2, 0.5, 0.8]) test_effector = min_max_effector.MinMaxEffector( base_effector=spy_effector, min_action=min_action, max_action=max_action) # Ensure that the effector correctly transforms the input command. sent_command = np.array([-0.8, 0., 0.3]) expected_command = np.array([-0.9, -0.5, 0.8]) test_effector.set_control(None, sent_command) np.testing.assert_allclose(expected_command, spy_effector.previous_action) def test_default_spec_min_max_effector_sends_correct_command(self): spy_effector = test_utils.SpyEffector(dofs=3) test_effector = min_max_effector.MinMaxEffector(base_effector=spy_effector) # Ensure that the effector correctly transforms the input command. sent_command = np.array([-0.3, 0., 0.6]) expected_command = np.array([-1., -1., 1.]) test_effector.set_control(None, sent_command) np.testing.assert_allclose(expected_command, spy_effector.previous_action) @parameterized.named_parameters( ('min_action_wrong', np.array([1., 2.]), np.array([4., 5., 6.])), ('max_action_wrong', np.array([1., 2., 3.]), np.array([4., 5.])),) def test_raises_if_wrong_shaped_action_is_passed( self, min_action, max_action): spy_effector = test_utils.SpyEffector(dofs=3) test_effector = min_max_effector.MinMaxEffector( base_effector=spy_effector, min_action=min_action, max_action=max_action) # Assert the effector raises an error. with self.assertRaises(ValueError): test_effector.action_spec(None) if __name__ == '__main__': absltest.main()
dm_robotics-main
py/moma/effectors/min_max_effector_test.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module for sampling prop and robot pose distributions.""" import abc from typing import Callable, Optional, Sequence, Tuple, Union from dm_robotics.geometry import geometry from dm_robotics.transformations import transformations as tr import numpy as np SamplePoseFn = Callable[[np.random.RandomState, Optional[geometry.Physics]], Tuple[np.ndarray, np.ndarray]] class Distribution(abc.ABC): """A basic interface for probability distributions.""" @abc.abstractmethod def sample(self, random_state: np.random.RandomState) -> np.ndarray: """Returns a sample from the distribution.""" pass @abc.abstractmethod def mean(self) -> np.ndarray: """Returns the mean of the distribution.""" pass class PoseDistribution(abc.ABC): """An interface for pose distributions.""" @abc.abstractmethod def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: """Returns a (pos, quat) pose tuple sampled from some distribution. Args: random_state: Numpy random state for sampling. physics: Required if the frame for the distribution has a Grounding. """ @abc.abstractmethod def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: """Returns the mean (pos, quat) pose tuple of the distribution. Args: physics: Required if the frame for the distribution has a Grounding. parent. """ class PoseStampedDistribution(PoseDistribution): """A PoseDistribution allowing parameterization relative to other frames.""" def __init__(self, pose_dist: 'PoseDistribution', frame: geometry.Frame): super().__init__() self._pose_dist = pose_dist self._frame = frame @property def pose_dist(self): return self._pose_dist @property def frame(self): return self._frame def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: """Returns a (pos, quat) pose tuple sampled from some distribution. Args: random_state: Numpy random state for sampling. physics: Required if the frame for the distribution has a Grounding. """ sampled_local_pose = geometry.Pose( *self._pose_dist.sample_pose(random_state)) sampled_world_pose = geometry.PoseStamped( pose=sampled_local_pose, frame=self._frame).get_world_pose(physics) return sampled_world_pose.position, sampled_world_pose.quaternion def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: """Returns the mean (pos, quat) pose tuple of the distribution.""" mean_local_pose = geometry.Pose(*self._pose_dist.mean_pose()) mean_world_pose = geometry.PoseStamped( pose=mean_local_pose, frame=self._frame).get_world_pose(physics) return mean_world_pose.position, mean_world_pose.quaternion class CompositePoseDistribution(PoseDistribution): """A PoseDistribution composed of a pose and a quaternion distribution.""" def __init__(self, pos_dist: Distribution, quat_dist: Distribution): super().__init__() self._pos_dist = pos_dist self._quat_dist = quat_dist def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics return (self._pos_dist.sample(random_state), self._quat_dist.sample(random_state)) def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics return (self._pos_dist.mean(), self._quat_dist.mean()) def truncated_normal_pose_distribution(mean_pose: Union[Sequence[float], geometry.Pose], pos_sd: Sequence[float], rot_sd: Sequence[float], pos_clip_sd: float = 2., rot_clip_sd: float = 2.): """Convenience-method for generating a TruncatedNormal PoseDistribution. Args: mean_pose: (list or Pose) A mean-pose represented as a 6D list composed of 3D pose and 3D euler angle, or a `Pose`. pos_sd: (3d array) Standard deviation of the position (in meters), relative to `mean_pose`. rot_sd: (3d array) Standard deviation represented as axis-angle, relative to `mean_pose`. pos_clip_sd: (float) Scalar threshold on position standard-deviation. rot_clip_sd: (float) Scalar threshold on standard-deviation. Returns: A CompositePoseDistribution with the provided parameters. """ if isinstance(mean_pose, list) or isinstance(mean_pose, np.ndarray): mean_pose = geometry.Pose.from_poseuler(mean_pose) elif not isinstance(mean_pose, geometry.Pose): raise ValueError('Invalid mean_pose argument ({}). Expected a list or ' 'numpy array, or a `Pose`'.format(mean_pose)) pos_dist = TruncatedNormal(mean_pose.position, pos_sd, pos_clip_sd) quat_dist = TruncatedNormalQuaternion(mean_pose.quaternion, rot_sd, rot_clip_sd) return CompositePoseDistribution(pos_dist, quat_dist) class ConstantPoseDistribution(PoseDistribution): """A distribution with only a single pose with probability 1.""" def __init__(self, pose): """Constructor. Args: pose: a 6D list composed of 3D pose and 3D euler angle. """ super().__init__() self._pos = pose[:3] self._quat = tr.euler_to_quat(pose[3:], ordering='XYZ') def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del random_state del physics return (self._pos, self._quat) def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics return (self._pos, self._quat) class LambdaPoseDistribution(PoseDistribution): """A distribution the samples using given lambdas.""" def __init__(self, sample_pose_fn: SamplePoseFn, mean_pose_fn: Callable[[Optional[geometry.Physics]], Tuple[np.ndarray, np.ndarray]]): """Constructor. Args: sample_pose_fn: a callable for obtaining a sample pose. mean_pose_fn: a callable for obtaining the mean of sampled poses. """ super().__init__() self._sample_pose_fn = sample_pose_fn self._mean_pose_fn = mean_pose_fn def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: return self._sample_pose_fn(random_state, physics) def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: return self._mean_pose_fn(physics) class WeightedDiscretePoseDistribution(PoseDistribution): """A distribution of a fixed number of poses each with a relative probability.""" def __init__(self, weighted_poses: Sequence[Tuple[float, np.ndarray]]): """Constructor. Args: weighted_poses: a list of tuples of (probability, pose). The probability is relative (i.e. does not need to be normalized), and the pose 6D array composed of 3D pose and 3D euler angle. """ super().__init__() self._poses = [pose for _, pose in weighted_poses] self._weights = np.array([weight for weight, _ in weighted_poses]) self._weights /= np.sum(self._weights) def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics chosen = random_state.choice(self._poses, p=self._weights) pos = chosen[:3] quat = tr.euler_to_quat(chosen[3:], ordering='XYZ') return pos, quat def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics # Note: this returns the mode, not the mean. ml_pose_idx = np.argmax(self._weights) ml_pose = self._poses[ml_pose_idx] pos = ml_pose[:3] quat = tr.euler_to_quat(ml_pose[3:], ordering='XYZ') return pos, quat class UniformPoseDistribution(PoseDistribution): """Distribution of uniformly distributed poses in a given range.""" def __init__(self, min_pose_bounds: Sequence[float], max_pose_bounds: Sequence[float]): """Constructor. Args: min_pose_bounds: a 6D list composed of 3D pose and 3D euler angle. max_pose_bounds: a 6D list composed of 3D pose and 3D euler angle. """ super().__init__() self._min_pose_bounds = np.array(min_pose_bounds) self._max_pose_bounds = np.array(max_pose_bounds) def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics pose = random_state.uniform(self._min_pose_bounds, self._max_pose_bounds) pos = pose[:3] quat = tr.euler_to_quat(pose[3:], ordering='XYZ') return pos, quat def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics min_pose = geometry.Pose.from_poseuler(self._min_pose_bounds) max_pose = geometry.Pose.from_poseuler(self._max_pose_bounds) mean_pos = min_pose.position + (max_pose.position - min_pose.position) / 2 mean_quat = tr.quat_slerp(min_pose.quaternion, max_pose.quaternion, 0.5) return mean_pos, mean_quat def _points_to_pose( to_pos: np.ndarray, from_pos: np.ndarray, y_hint: Optional[np.ndarray] = None, extra_quat: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]: """Computes pose at `from_pos` s.t. z-axis is pointing towards `to_pos`.""" if y_hint is None: y_hint = np.array([0., 1., 0.]) else: y_hint = y_hint / np.linalg.norm(y_hint) view_dir = to_pos - from_pos view_dir /= np.linalg.norm(view_dir) # Build right-handed coordinate system with z-axis towards target, and x-axis # orthogonal to y-axis hint. z = view_dir x = np.cross(y_hint, z) y = np.cross(z, x) rmat = np.stack([x, y, z], axis=1) rmat = rmat / np.linalg.norm(rmat, axis=0) quat = tr.axisangle_to_quat(tr.rmat_to_axisangle(rmat)) if extra_quat is not None: quat = tr.quat_mul(quat, extra_quat) return from_pos, quat class LookAtPoseDistribution(PoseDistribution): """Distribution looking from a view-point to a target-point.""" def __init__(self, look_at: Distribution, look_from: Distribution, y_hint: Optional[Union[np.ndarray, Callable[[], np.ndarray]]] = None, extra_quat: Optional[np.ndarray] = None): """Initialize LookAtPoseDistribution. This distribution returns poses centered at `look_from` and with the +z-axis pointing towards `look_at`. It is parameterized by two distributions over points, and accepts a user-provided constraint for the remaining degree-of- freedom around the z-axis. Args: look_at: A `Distribution` over the 3D point to look at. look_from: A `Distribution` over the 3D point to look from. y_hint: Optional array or callable returning 3-vector to cross with the looking direction to produce the x-axis of the sampled pose. This is required because the full pose is under-constrained given only `from` and `to` points, so rather than baking in a solution we expose this to the user as a "hint". This can be anything, but the motivating examples are: 1) Maintaining a fixed pose as the object moves -- pass the y-axis of object's current pose. 2) Minimizing the difference w.r.t. the current (wrist-mounted) camera pose -- pass the y-axis of TCP's current pose. Failure to do either of these will result in a "rolling" behavior along the z-axis as the object or robot moves. extra_quat: Optional quaternion [w, i, j, k] to apply as a final rotation after solving for the viewing direction. If omitted, the z-axis will point towards `look_at`, and the x-axis will be orthogonal to `y_hint`. """ super().__init__() self._look_at = look_at self._look_from = look_from self._extra_quat = extra_quat if y_hint is None: self._y_hint = np.array([0., 1., 0.]) elif isinstance(y_hint, np.ndarray): self._y_hint = y_hint / np.linalg.norm(y_hint) else: self._y_hint = y_hint def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics look_at = self._look_at.sample(random_state) look_from = self._look_from.sample(random_state) y_hint = self._y_hint() if callable(self._y_hint) else self._y_hint return _points_to_pose(look_at, look_from, y_hint, self._extra_quat) def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics look_at = self._look_at.mean() look_from = self._look_from.mean() return _points_to_pose(look_at, look_from, self._y_hint, self._extra_quat) class DomePoseDistribution(PoseDistribution): """Distribution within a dome (half sphere with a thickness). Dome sits on the x-y plane and the probe is initialized looking down. Radius and angles are uniformly sampled, hence points are not uniform in the volume. """ def __init__(self, center, r_min, r_max, theta_max): """Constructor. Args: center: 3D list for the position of the dome center. r_min: Minimum radius. r_max: Maximum radius. theta_max: Maximum polar angle """ super().__init__() self._center = center self._r_min = r_min self._r_max = r_max self._theta_max = theta_max def sample_pose( self, random_state: np.random.RandomState, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: del physics r = random_state.uniform(self._r_min, self._r_max) theta = random_state.uniform(0., self._theta_max) phi = random_state.uniform(-np.pi, np.pi) x = self._center[0] + r * np.sin(theta) * np.cos(phi) y = self._center[1] + r * np.sin(theta) * np.sin(phi) z = self._center[2] + r * np.cos(theta) pos = np.asarray([x, y, z]) quat = tr.euler_to_quat([0, np.pi, 0.], ordering='XYZ') return pos, quat def mean_pose( self, physics: Optional[geometry.Physics] = None ) -> Tuple[np.ndarray, np.ndarray]: raise NotImplementedError def _sample_with_limits(random_state: np.random.RandomState, sd: np.ndarray, clip_sd: float, max_steps: int = 500) -> np.ndarray: """Rejection-samples from a zero-mean truncated-normal distribution. Same as normal distribution except that values exceeding minimum or maximum limits are resampled. See also tf.truncated_normal. Args: random_state: Numpy `RandomState` object. sd: A list or array of standard deviations. Must be greater or equal to zero. clip_sd: (float) Scalar threshold on standard-deviation. Values larger than this will be re-sampled. max_steps: (int) Maximum number of times to resample Returns: An array filled with random truncated normal values. Raises: ValueError: If invalid sd provided. RuntimeError: If max_steps exceeded before a valid sample is obtained. """ if np.any(sd < 0): raise ValueError('Invalid sd {}'.format(sd)) samp = random_state.normal(scale=sd) i = 0 while i < max_steps: bad_idxs = np.logical_or(samp < -(sd * clip_sd), samp > (sd * clip_sd)) if np.any(bad_idxs): samp[bad_idxs] = random_state.normal(scale=sd[bad_idxs]) i += 1 else: break if np.any(bad_idxs): raise ValueError('Failed to sample within limits {} (clip_sd: {})'.format( samp, clip_sd)) return samp class UniformDistribution(Distribution): """Generic Uniform Distribution wrapping `numpy.random.uniform`.""" def __init__(self, low: Union[float, Sequence[float]] = 0., high: Union[float, Sequence[float]] = 1.): """Constructor. Args: low: Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. high: Upper boundary of the output interval. All values generated will be less than or equal to high. The default value is 1.0. """ super().__init__() self._low = np.array(low) self._high = np.array(high) def sample(self, random_state: np.random.RandomState) -> np.ndarray: return random_state.uniform(self._low, self._high) def mean(self) -> np.ndarray: return self._low + (self._high - self._low) / 2. class TruncatedNormal(Distribution): """Generic Truncated Normal Distribution.""" def __init__(self, mean, sd, clip_sd=2.): """Constructor. Args: mean: (array-like) Mean. sd: (array-like) Standard deviation. clip_sd: (float) Scalar threshold on standard-deviation. Values larger than this will be re-sampled. """ super().__init__() self._mean = np.array(mean, dtype=np.float32) self._sd = np.array(sd, dtype=np.float32) self._clip_sd = clip_sd def sample(self, random_state: np.random.RandomState) -> np.ndarray: return self._mean + _sample_with_limits(random_state, self._sd, self._clip_sd) def mean(self) -> np.ndarray: return self._mean class TruncatedNormalQuaternion(TruncatedNormal): """Truncated Normal Distribution over Quaternions. The deviation of this distribution is parameterized by axis-angle to allow control of each cartesian DOF independently. E.g. The following will generate a distribution that only varies about the x-axis with maximum deviation of 2-radians: >>> TruncatedNormalQuaternion([1., 0., 0., 0.], [1., 0., 0.], 2.) And the following will generate a distribution that varies over the y & z axes of the frame relative to the rotation described by an axis-angle: >>> TruncatedNormalQuaternion([0.2, 0.3, 0.], [0., 0.5, 0.5], 2.) """ def __init__(self, mean, sd, clip_sd=2.): """Constructor. Args: mean: (3d or 4d array) Mean quaternion repesented either as a 4-dim array [w, i, j, k] or a 3-dim axis-angle (with angle encoded in length). sd: (3d array) Standard deviation represented as axis-angle. clip_sd: (float) Scalar threshold on standard-deviation. Values larger than this will be re-sampled. """ super().__init__(mean, sd, clip_sd) if len(mean) == 3: self._mean = tr.axisangle_to_quat(mean) def sample(self, random_state: np.random.RandomState) -> np.ndarray: axisangle = _sample_with_limits(random_state, self._sd, self._clip_sd) offset_quat = tr.axisangle_to_quat(axisangle) return tr.quat_mul(self._mean, offset_quat)
dm_robotics-main
py/geometry/pose_distribution.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Physics implemented through MuJoCo.""" from typing import Callable from dm_control import composer from dm_control import mjcf from dm_control.mujoco.wrapper import mjbindings import dm_env from dm_robotics.geometry import geometry import numpy as np mjlib = mjbindings.mjlib def from_env(env: composer.Environment) -> geometry.Physics: return _MujocoPhysics(lambda: env.physics) def from_getter(getter: Callable[[], mjcf.Physics]) -> geometry.Physics: return _MujocoPhysics(getter) def wrap(physics: mjcf.Physics) -> geometry.Physics: return _MujocoPhysics(lambda: physics) class _MujocoPhysics(geometry.Physics): """Exposes `mjcf.Physics` as a `geometry.Physics`. Supports body, geom, and site elements. """ def __init__(self, physics_getter: Callable[[], mjcf.Physics]): super().__init__() self._physics_getter = physics_getter def sync_before_step(self, timestep: dm_env.TimeStep): """No-op for compatibility with Physics. Assumes synced elsewhere.""" pass def world_pose(self, frame: geometry.Grounding, get_pos: bool = True, get_rot: bool = True) -> geometry.Pose: """Return world pose of the provided frame. Args: frame: A frame identifier. get_pos: If False, drop position entries. get_rot: If False, drop rotation entries. Returns: A `geometry.Pose` containing the requested pose. """ if not isinstance(frame, mjcf.Element): raise ValueError('bad frame: {}, expected mjcf.Element'.format(frame)) physics = self._physics_getter() hmat_world_element = np.eye(4) mjlib.mj_kinematics(physics.model.ptr, physics.data.ptr) if get_rot: xmat = None if frame.tag == 'geom': xmat = physics.named.data.geom_xmat[frame.full_identifier] elif frame.tag == 'site': xmat = physics.named.data.site_xmat[frame.full_identifier] elif frame.tag == 'camera': xmat = physics.named.data.cam_xmat[frame.full_identifier] elif frame.tag == 'body': xmat = physics.named.data.xmat[frame.full_identifier] else: raise ValueError(f'Frame {frame} is a {frame.tag} not a geom/site/' 'camera/body') hmat_world_element[0:3, 0:3] = xmat.reshape(3, 3) if get_pos: xpos = None if frame.tag == 'geom': xpos = physics.named.data.geom_xpos[frame.full_identifier] elif frame.tag == 'site': xpos = physics.named.data.site_xpos[frame.full_identifier] elif frame.tag == 'camera': xpos = physics.named.data.cam_xpos[frame.full_identifier] elif frame.tag == 'body': xpos = physics.named.data.xpos[frame.full_identifier] else: raise ValueError(f'Frame {frame} is a {frame.tag} not a geom/site/' 'camera/body') hmat_world_element[0:3, 3] = xpos return geometry.Pose.from_hmat(hmat_world_element)
dm_robotics-main
py/geometry/mujoco_physics.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for PoseDistribution implementations.""" from unittest import mock from absl.testing import absltest from absl.testing import parameterized from dm_robotics.geometry import pose_distribution from dm_robotics.transformations import transformations as tr import numpy as np class PoseDistributionTest(parameterized.TestCase): def testLookAtPoseDistribution(self): target_dist = mock.MagicMock(spec=pose_distribution.UniformDistribution) source_dist = mock.MagicMock(spec=pose_distribution.UniformDistribution) target_dist.sample.return_value = np.array([0.1, 0.5, 0.3]) source_dist.sample.return_value = np.array([0.4, 0.2, 0.6]) target_dist.mean.return_value = np.array([0.1, 0.2, 0.3]) * -1 source_dist.mean.return_value = np.array([0.3, 0.2, 0.1]) * -1 xnormal = np.array([0, 1, 0]) look_at_dist = pose_distribution.LookAtPoseDistribution( target_dist, source_dist, xnormal) # Test `sample_pose` actual_pos, actual_quat = look_at_dist.sample_pose(None) expected_pos, expected_quat = pose_distribution._points_to_pose( target_dist.sample(), source_dist.sample(), xnormal) np.testing.assert_allclose(actual_pos, expected_pos) self.assertTrue( np.allclose(actual_quat, expected_quat) or np.allclose(actual_quat, expected_quat * -1)) # Test `mean_pose` actual_pos, actual_quat = look_at_dist.mean_pose(None) expected_pos, expected_quat = pose_distribution._points_to_pose( target_dist.mean(), source_dist.mean(), xnormal) np.testing.assert_allclose(actual_pos, expected_pos) self.assertTrue( np.allclose(actual_quat, expected_quat) or np.allclose(actual_quat, expected_quat * -1)) def testTruncatedNormalPoseDistribution(self): """Test normal pose with limits.""" random_state = np.random.RandomState(1) def _check_limits(mean_poseuler, pos_sd, rot_sd, pos_clip_sd, rot_clip_sd): pose_dist = pose_distribution.truncated_normal_pose_distribution( mean_poseuler, pos_sd, rot_sd, pos_clip_sd, rot_clip_sd) pos, quat = pose_dist.sample_pose(random_state) # Check that position and axis-angle don't exceed clip_sd # Obtain the orientation relative to the mean mean_quat = tr.euler_to_quat(mean_poseuler[3:]) quat_mean_inv = tr.quat_conj(mean_quat) quat_samp = tr.quat_mul(quat_mean_inv, quat) # Convert to axisangle and compare to threshold. axisangle_samp = tr.quat_to_axisangle(quat_samp) self.assertTrue( np.all(np.logical_or(pos > -pos_clip_sd, pos < pos_clip_sd))) self.assertTrue( np.all( np.logical_or(axisangle_samp > -rot_clip_sd, axisangle_samp < rot_clip_sd))) # Check special cases _check_limits( mean_poseuler=np.array([0.1, 0.2, 0.3, 0, 0, 0]), pos_sd=np.array([0.3, 0.2, 0.1]), rot_sd=np.array([0.3, 0.0, 0.0]), pos_clip_sd=2., rot_clip_sd=1.) # Check a bunch of random inputs for _ in range(100): mean_poseuler = random_state.uniform([-1, -2, -3, -np.pi, -np.pi, -np.pi], [1, 2, 3, np.pi, np.pi, np.pi]) pos_sd = random_state.uniform([0, 0, 0], [1, 2, 3]) rot_sd = random_state.uniform([0, 0, 0], [1, 2, 3]) pos_clip_sd = random_state.uniform(0, 10) rot_clip_sd = random_state.uniform(0, 10) _check_limits(mean_poseuler, pos_sd, rot_sd, pos_clip_sd, rot_clip_sd) # Check that pos and axis only vary along non-zero sd dims pos_sd = np.array([0.0, 0.2, 0.0]) rot_sd = np.array([0.1, 0.0, 0.3]) pose_dist = pose_distribution.truncated_normal_pose_distribution( mean_pose=np.array([0., 0., 0., 0., 0., 0.]), pos_sd=pos_sd, rot_sd=rot_sd, pos_clip_sd=2., rot_clip_sd=1.) pos, quat = pose_dist.sample_pose(random_state) axisangle_samp = tr.quat_to_axisangle(quat) self.assertTrue(np.all(np.nonzero(pos)[0] == np.nonzero(pos_sd)[0])) self.assertTrue( np.all(np.nonzero(axisangle_samp)[0] == np.nonzero(rot_sd)[0])) if __name__ == '__main__': absltest.main()
dm_robotics-main
py/geometry/pose_distribution_test.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
dm_robotics-main
py/geometry/__init__.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module for sampling joint angles distributions.""" import abc from typing import Optional from dm_robotics.geometry import geometry import numpy as np class JointAnglesDistribution(abc.ABC): """An interface for joint angles distributions.""" @abc.abstractmethod def sample_angles(self, random_state, physics: Optional[geometry.Physics] = None) -> np.ndarray: """Returns angles sampled from some distribution.""" pass class ConstantPanTiltDistribution(JointAnglesDistribution): """A distribution with only a single angle with probability 1.""" def __init__(self, joint_angles): super().__init__() self._joint_angles = joint_angles def sample_angles(self, random_state, physics: Optional[geometry.Physics] = None) -> np.ndarray: return np.array(self._joint_angles, dtype=np.float32) class NormalOffsetJointAnglesDistribution(JointAnglesDistribution): """Distribution for angles distributed normally around a mean.""" def __init__(self, mean_angles, angles_sd, clip_sd=3.0): super().__init__() self._mean_angles = mean_angles self._angles_sd = angles_sd self._clip_sd = clip_sd def sample_angles(self, random_state, physics: Optional[geometry.Physics] = None) -> np.ndarray: offset = random_state.normal(scale=self._angles_sd) clip_range = self._angles_sd * self._clip_sd return self._mean_angles + np.clip(offset, -clip_range, clip_range) class UniformJointAnglesDistribution(JointAnglesDistribution): """Uniform random distribution for joint angles.""" def __init__(self, min_angles, max_angles): super().__init__() self._min_angles = min_angles self._max_angles = max_angles def sample_angles(self, random_state, physics: Optional[geometry.Physics] = None) -> np.ndarray: joint_angles = random_state.uniform( low=self._min_angles, high=self._max_angles) return np.array(joint_angles, dtype=np.float32)
dm_robotics-main
py/geometry/joint_angles_distribution.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """Tests for geometry.""" import itertools import operator from absl.testing import absltest from absl.testing import parameterized from dm_robotics.geometry import geometry from dm_robotics.transformations import transformations as tr import numpy as np _N_RANDOM = 100 random_state = np.random.RandomState(1) class GeometryTest(parameterized.TestCase): def test_pos_mul_random(self): for _ in range(_N_RANDOM): pose1 = geometry.Pose.from_poseuler( random_state.uniform(size=6), ordering='XYZ') pose2 = geometry.Pose.from_poseuler( random_state.uniform(size=6), ordering='XYZ') hmat_world_pose1 = pose1.hmat hmat_pose1_pose2 = pose2.hmat hmat_world_pose2_true = hmat_world_pose1.dot(hmat_pose1_pose2) hmat_world_pose2_test = pose1.mul(pose2).hmat np.testing.assert_array_almost_equal(hmat_world_pose2_true, hmat_world_pose2_test) def test_pose_inv_random(self): for _ in range(_N_RANDOM): pose = geometry.Pose.from_poseuler( random_state.uniform(size=6), ordering='XYZ') np.testing.assert_array_almost_equal( tr.hmat_inv(pose.hmat), pose.inv().hmat) np.testing.assert_array_almost_equal( np.linalg.inv(pose.hmat), pose.inv().hmat) def test_pose_stamped_to_frame_simple(self): pose1 = geometry.PoseStamped( pose=geometry.Pose.from_poseuler([1, 2, 3, 0.1, 0.2, 0.3], ordering='XYZ'), frame=None) pose2 = geometry.PoseStamped( pose=geometry.Pose.from_poseuler([-0.5, -1, 2, -0.3, 0.2, -0.1], ordering='XYZ'), frame=None) hmat_world_pose1 = pose1.get_world_pose().hmat hmat_world_pose2 = pose2.get_world_pose().hmat np.testing.assert_array_almost_equal(hmat_world_pose1, pose1.pose.hmat) np.testing.assert_array_almost_equal(hmat_world_pose2, pose2.pose.hmat) pose2 = pose2.to_frame(pose1) hmat_pose1_pose2 = tr.hmat_inv(hmat_world_pose1).dot(hmat_world_pose2) np.testing.assert_array_almost_equal(pose2.pose.hmat, hmat_pose1_pose2) def test_pose_stamped_to_world_simple(self): pose1 = geometry.PoseStamped( pose=geometry.Pose.from_poseuler([1, 2, 3, 0.1, 0.2, 0.3], ordering='XYZ'), frame=None) pose2 = geometry.PoseStamped( pose=geometry.Pose.from_poseuler([-0.5, -1, 2, -0.3, 0.2, -0.1], ordering='XYZ'), frame=pose1) hmat_world_pose1 = pose1.pose.hmat hmat_pose1_pose2 = pose2.pose.hmat hmat_world_pose2_true = hmat_world_pose1.dot(hmat_pose1_pose2) pose2 = pose2.to_world() self.assertIsNone(pose2.frame) hmat_world_pose2_test = pose2.pose.hmat np.testing.assert_array_almost_equal(hmat_world_pose2_true, hmat_world_pose2_test) def test_world_pose_with_hybrid_frame(self): pose1 = geometry.Pose.from_poseuler( [0.1, 0.2, 0.3, np.pi, np.pi / 2, -np.pi]) pose2 = geometry.Pose.from_poseuler([0.1, 0.2, 0.3, 0, np.pi, np.pi / 2]) non_hybrid = geometry.PoseStamped( pose=pose1, frame=geometry.PoseStamped(pose=pose2)) identity_quaternion = [1, 0, 0, 0] hybrid = geometry.PoseStamped( pose=pose1, frame=geometry.HybridPoseStamped( pose=geometry.Pose.from_poseuler( np.hstack((pose2.position, identity_quaternion))), quaternion_override=geometry.PoseStamped(pose2, None))) non_hybrid_world_pose = non_hybrid.get_world_pose() hybrid_world_pose = hybrid.get_world_pose() self.assertEqual(non_hybrid_world_pose, hybrid_world_pose) @parameterized.named_parameters( ('Vec6', geometry.Vec6), ('Twist', geometry.Twist), ('Wrench', geometry.Wrench), ('Accel', geometry.Accel), ) def test_default_construction_of_base_types(self, base_cls): value = base_cls() np.testing.assert_array_almost_equal(np.zeros(6), value.data) @parameterized.named_parameters( ('VectorStamped', geometry.VectorStamped), ('TwistStamped', geometry.TwistStamped), ('WrenchStamped', geometry.WrenchStamped), ('AccelStamped', geometry.AccelStamped), ) def test_default_construction_of_stamped_tyes(self, stamped_cls): value = stamped_cls(None, None) np.testing.assert_array_almost_equal(np.zeros(6), value.data.data) BASE_TYPE_SPECS = (('Accel', geometry.Accel, { 'full': 6, 'linear': 3, 'angular': 3 }), ('Twist', geometry.Twist, { 'full': 6, 'linear': 3, 'angular': 3 }), ('Wrench', geometry.Wrench, { 'full': 6, 'force': 3, 'torque': 3 })) class BaseImmutabiliyTest(parameterized.TestCase): """Test of immutability for Accel, Twist, Wrench, Vector and Pose.""" def _copy_properties(self, value, property_map): value_map = {} for name in property_map: value_map[name] = np.copy(getattr(value, name)) return value_map def assertPropertiesEqual(self, expected, actual): self.assertEqual(list(expected.keys()), list(actual.keys())) for key in expected.keys(): try: np.testing.assert_array_almost_equal(expected[key], actual[key]) except AssertionError as failure: failure.args += (key) raise @parameterized.named_parameters(*BASE_TYPE_SPECS) def test_construction(self, geometry_type, property_map): # We should not be able to modify the object through its constructor param. # Test with a numpy array (must be copied). input_array = np.asarray(list(range(6))) value = geometry_type(input_array) initial_values = self._copy_properties(value, property_map) input_array[:] = list(range(10, 16, 1)) current_values = self._copy_properties(value, property_map) self.assertPropertiesEqual(initial_values, current_values) # Test with a list (a new numpy array will be created). input_list = list(range(6)) value = geometry_type(input_list) initial_values = self._copy_properties(value, property_map) input_list[:] = list(range(10, 16, 1)) current_values = self._copy_properties(value, property_map) self.assertPropertiesEqual(initial_values, current_values) @parameterized.named_parameters(*BASE_TYPE_SPECS) def test_property_cannot_set(self, geometry_type, property_map): value = geometry_type(list(range(6))) for property_name, property_size in property_map.items(): with self.assertRaises(AttributeError): setattr(value, property_name, list(range(property_size))) @parameterized.named_parameters(*BASE_TYPE_SPECS) def test_property_cannot_setitem(self, geometry_type, property_map): value = geometry_type(list(range(6))) for property_name, property_size in property_map.items(): with self.assertRaises( ValueError, msg='{}.{}[:] allowed'.format(geometry_type, property_name)) as expected: property_value = getattr(value, property_name) property_value[:] = list(range(property_size)) self.assertIn('read-only', str(expected.exception)) def test_pose_construction(self): # Test that the arguments we give to the Pose constructor do not permit # modifications to the Pose instance value. property_map = {'position': 3, 'quaternion': 4} # Test with the input being a numpy array. position = np.asarray(list(range(3))) quaternion = np.asarray(list(range(4))) pose = geometry.Pose(position, quaternion) initial_values = self._copy_properties(pose, property_map) position[:] = list(range(10, 13, 1)) quaternion[:] = list(range(10, 14, 1)) current_values = self._copy_properties(pose, property_map) self.assertPropertiesEqual(initial_values, current_values) # Test with the input being a list. position = list(range(3)) quaternion = list(range(4)) pose = geometry.Pose(position, quaternion) initial_values = self._copy_properties(pose, property_map) position[:] = list(range(10, 13, 1)) quaternion[:] = list(range(10, 14, 1)) current_values = self._copy_properties(pose, property_map) self.assertPropertiesEqual(initial_values, current_values) def test_pose_property_cannot_set(self): pose = geometry.Pose(list(range(3)), list(range(4))) with self.assertRaises(AttributeError) as expected: pose.position = list(range(10, 13, 1)) self.assertIn('can\'t set attribute', str(expected.exception)) with self.assertRaises(AttributeError) as expected: pose.quaternion = list(range(10, 14, 1)) self.assertIn('can\'t set attribute', str(expected.exception)) def test_pose_property_cannot_setitem(self): pose = geometry.Pose(list(range(3)), list(range(4))) with self.assertRaises(ValueError) as expected: pose.position[:] = list(range(10, 13, 1)) self.assertIn('read-only', str(expected.exception)) with self.assertRaises(ValueError) as expected: pose.quaternion[:] = list(range(10, 14, 1)) self.assertIn('read-only', str(expected.exception)) def test_pose_with_position(self): # Check that the pose from with_position has a new position. first_pose = geometry.Pose(list(range(3)), list(range(4))) second_pose = first_pose.with_position(list(range(10, 13, 1))) self.assertTrue(np.array_equal(first_pose.position, list(range(3)))) self.assertTrue( np.array_equal(second_pose.position, list(range(10, 13, 1)))) self.assertTrue( np.array_equal(first_pose.quaternion, second_pose.quaternion)) def test_pose_with_quaternion(self): # Check that the pose from with_quaternion has a new quaternion. first_pose = geometry.Pose(list(range(3)), list(range(4))) second_pose = first_pose.with_quaternion(list(range(10, 14, 1))) self.assertTrue(np.array_equal(first_pose.quaternion, list(range(4)))) self.assertTrue( np.array_equal(second_pose.quaternion, list(range(10, 14, 1)))) self.assertTrue(np.array_equal(first_pose.position, second_pose.position)) def test_vec6_construction(self): # Test copy of numpy object. input_array = np.asarray(list(range(6))) vec = geometry.Vec6(input_array) input_array[0] = 1 self.assertSequenceEqual(list(vec.data), list(range(6))) input_list = list(range(6)) vec = geometry.Vec6(input_list) input_list[0] = 1 self.assertSequenceEqual(list(vec.data), list(range(6))) def test_vec6_cannot_setitem(self): vec = geometry.Vec6(list(range(6))) # Test we can __getitem__ for i in range(6): self.assertEqual(vec[i], i) # but not __setitem__ with self.assertRaises(TypeError) as expected: vec[0] = 1 self.assertIn('does not support item assignment', str(expected.exception)) def test_vec6_cannot_setitem_on_full(self): vec = geometry.Vec6(list(range(6))) # Test we can __getitem__ for i in range(6): self.assertEqual(float(vec.data[i]), float(i)) # but not __setitem__ with self.assertRaises(ValueError) as expected: vec.data[0] = 1.0 self.assertIn('read-only', str(expected.exception)) @parameterized.named_parameters( ('Accel', geometry.Accel(list(range(6)))), ('Twist', geometry.Twist(list(range(6)))), ('Wrench', geometry.Wrench(list(range(6)))), ('Pose', geometry.Pose(list(range(3)), list(range(4)))), ('Vec6', geometry.Vec6(list(range(6))))) def test_no_dict(self, obj): # __dict__ allows us to add arbitrary attributes to objects, which we don't # want for immutable types. self.assertFalse(hasattr(obj, '__dict__')) # The immutability of the stamped types is assured by: # 1: Knowing that all attributes of these types are themselves immutable, # 2: Public properties being read-only, returning immutable objects. def test_accel_stamped_immutable(self): accel_stamped = geometry.AccelStamped(list(range(6)), None) with self.assertRaises(AttributeError): accel_stamped.accel = geometry.Accel(list(range(1, 7))) with self.assertRaises(AttributeError): accel_stamped.frame = 'Not allowed' def test_pose_stamped_immutable(self): pose_stamped = geometry.PoseStamped(geometry.Pose(), None) with self.assertRaises(AttributeError): pose_stamped.pose = geometry.Pose(list(range(3)), list(range(4))) with self.assertRaises(AttributeError): pose_stamped.frame = 'Not allowed' def test_twist_stamped_immutable(self): twist = geometry.Twist(list(range(6))) twist_stamped = geometry.TwistStamped(twist, None) with self.assertRaises(AttributeError): twist_stamped.twist = geometry.Twist(list(range(6))) with self.assertRaises(AttributeError): twist_stamped.frame = 'Not allowed' def test_vector_stamped_immutable(self): vector = geometry.Vec6(list(range(6))) vector_stamped = geometry.VectorStamped(vector, None) with self.assertRaises(AttributeError): vector_stamped.vector = geometry.Vec6(list(range(6))) with self.assertRaises(AttributeError): vector_stamped.frame = 'Not allowed' def test_wrench_stamped_immutable(self): wrench = geometry.Wrench(list(range(6))) wrench_stamped = geometry.WrenchStamped(wrench, None) with self.assertRaises(AttributeError): wrench_stamped.wrench = geometry.Wrench(list(range(6))) with self.assertRaises(AttributeError): wrench_stamped.frame = 'Not allowed' class VecSubtype(geometry.Vec6): pass class FancyGains(geometry.Vec6): def __mul__(self, other): return other.__rmul__(self) def __add__(self, other): return other.__radd__(self) class GeometryArithmeticTest(parameterized.TestCase): wrench_1_6 = geometry.Wrench(list(range(1, 7))) twist_1_6 = geometry.Twist(list(range(1, 7))) accel_1_6 = geometry.Accel(list(range(1, 7))) vec_10_60 = geometry.Vec6(list(range(10, 70, 10))) def test_vec6_scalar_addition_and_subtraction(self): target = geometry.Vec6(list(range(0, 6))) result = target + 1 self.assertSequenceEqual(list(target.data), list(range(0, 6))) self.assertSequenceEqual(list(result.data), list(range(1, 7))) result -= 1 self.assertSequenceEqual(list(target.data), list(range(0, 6))) self.assertSequenceEqual(list(result.data), list(range(0, 6))) result = target - 1 self.assertSequenceEqual(list(target.data), list(range(0, 6))) self.assertSequenceEqual(list(result.data), list(range(-1, 5))) result += 1 self.assertSequenceEqual(list(target.data), list(range(0, 6))) self.assertSequenceEqual(list(result.data), list(range(0, 6))) def test_vec6_scalar_multiplication_and_true_division(self): target = geometry.Vec6(np.arange(0.0, 6.0, 1.0)) result = target * 2 np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 12.0, 2.0)) result /= 2 np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0, 1.0)) result = target / 2 np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 3.0, 0.5)) result *= 2 np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) def test_vec6_vector_addition_and_subtraction(self): target = geometry.Vec6(np.arange(0.0, 6.0, 1.0)) result = target + np.arange(0.0, 6.0, 1.0) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 12.0, 2.0)) result -= np.arange(0.0, 6.0, 1.0) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) result = target - np.arange(0.0, 6.0, 1.0) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(result.data, np.zeros(6)) result += np.arange(0.0, 6.0, 1.0) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) def test_vec6_vector_multiplication_and_true_division(self): target = geometry.Vec6(np.arange(0.0, 6.0, 1.0)) result = target * np.arange(0.0, 6.0, 1.0) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.asarray([0, 1, 4, 9, 16, 25])) result /= np.arange(1.0, 7.0, 1.0) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal( result.data, np.asarray([0, 1 / 2, 4 / 3, 9 / 4, 16 / 5, 25 / 6])) result = target / np.arange(1.0, 7.0, 1.0) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal( result.data, np.asarray([0, 1 / 2, 2 / 3, 3 / 4, 4 / 5, 5 / 6])) result *= np.arange(1.0, 7.0, 1.0) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) def test_vec6_broadcast_vector_addition_and_subtraction(self): target = geometry.Vec6(np.arange(0.0, 6.0, 1.0)) result = target + np.asarray([2]) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(result.data, np.arange(2.0, 8.0)) result -= np.asarray([2]) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) result = target - np.asarray([2]) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(result.data, np.arange(-2.0, 4.0)) result += np.asarray([2]) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0)) def test_vec6_broadcast_vector_multiplication_and_true_division(self): target = geometry.Vec6(np.arange(0.0, 6.0, 1.0)) result = target * np.asarray([2]) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 12.0, 2.0)) result /= np.asarray([2]) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0, 1.0)) result = target / np.asarray([2]) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 3.0, 0.5)) result *= np.asarray([2]) np.testing.assert_array_almost_equal(result.data, np.arange(0.0, 6.0, 1.0)) np.testing.assert_array_almost_equal(target.data, np.arange(0.0, 6.0, 1.0)) @parameterized.parameters( itertools.product( [wrench_1_6, twist_1_6, accel_1_6], # Improve formatting. [vec_10_60], # Improve formatting. [operator.mul, operator.add])) def test_commutative_operators(self, geometry_obj, vec, operation): # Test Vec6 OP some-geometry-type and some-geometry-type OP Vec6 # This operation should return a some-geometry-type instance. vec_op_obj = operation(vec, geometry_obj) obj_op_vec = operation(geometry_obj, vec) # Assert commutativity. self.assertEqual( vec_op_obj, obj_op_vec, msg=(f'{vec} {operation} {geometry_obj} not commutative')) self.assertIsInstance(vec_op_obj, type(geometry_obj)) self.assertIsInstance(obj_op_vec, type(geometry_obj)) # Assert correctness. expected = type(geometry_obj)(operation(geometry_obj.data, vec.data)) self.assertEqual(obj_op_vec, expected) @parameterized.parameters( itertools.product( [wrench_1_6, twist_1_6, accel_1_6], # Formatting. [vec_10_60], # Formatting. [operator.truediv, operator.sub])) def test_non_commutative_operators(self, geometry_obj, vec, operation): obj_op_vec = operation(geometry_obj, vec) self.assertIsInstance(obj_op_vec, type(geometry_obj)) expected = type(geometry_obj)(operation(geometry_obj.data, vec.data)) self.assertEqual(obj_op_vec, expected) @parameterized.parameters([wrench_1_6, twist_1_6, accel_1_6]) def test_operators_with_vec6_subtype(self, geometry_obj): # Users may want their own subtypes of Vec6. # This tests that a basic version can be used for arithmetic. vec_sub_10_60 = VecSubtype(list(range(10, 70, 10))) obj_add = geometry_obj + vec_sub_10_60 obj_sub = geometry_obj - vec_sub_10_60 obj_mul = geometry_obj * vec_sub_10_60 obj_div = geometry_obj / vec_sub_10_60 self.assertIsInstance(obj_add, type(geometry_obj)) self.assertIsInstance(obj_sub, type(geometry_obj)) self.assertIsInstance(obj_mul, type(geometry_obj)) self.assertIsInstance(obj_div, type(geometry_obj)) expected_add = type(geometry_obj)(geometry_obj.data + vec_sub_10_60.data) expected_sub = type(geometry_obj)(geometry_obj.data - vec_sub_10_60.data) expected_mul = type(geometry_obj)(geometry_obj.data * vec_sub_10_60.data) expected_div = type(geometry_obj)(geometry_obj.data / vec_sub_10_60.data) self.assertEqual(obj_add, expected_add) self.assertEqual(obj_sub, expected_sub) self.assertEqual(obj_mul, expected_mul) self.assertEqual(obj_div, expected_div) @parameterized.parameters([wrench_1_6, twist_1_6, accel_1_6]) def test_operators_with_fancy_vec6_subtype(self, geometry_obj): # Users may want their own subtypes of Vec6. # This tests that a better version can improve on mul and add. # returning the geometry type from those operations. fancy_vec = FancyGains(list(range(10, 70, 10))) obj_add_vec = geometry_obj + fancy_vec obj_mul_vec = geometry_obj * fancy_vec vec_add_obj = fancy_vec + geometry_obj vec_mul_obj = fancy_vec * geometry_obj self.assertIsInstance(obj_add_vec, type(geometry_obj)) self.assertIsInstance(obj_mul_vec, type(geometry_obj)) self.assertIsInstance(vec_add_obj, type(geometry_obj)) self.assertIsInstance(vec_mul_obj, type(geometry_obj)) expected_add = type(geometry_obj)(geometry_obj.data + fancy_vec.data) expected_mul = type(geometry_obj)(geometry_obj.data * fancy_vec.data) self.assertEqual(obj_add_vec, expected_add) self.assertEqual(vec_add_obj, expected_add) self.assertEqual(obj_mul_vec, expected_mul) self.assertEqual(vec_mul_obj, expected_mul) @parameterized.parameters( itertools.product( [wrench_1_6, twist_1_6, accel_1_6], # Formatting. [wrench_1_6, twist_1_6, accel_1_6], # Formatting. [operator.mul, operator.add, operator.truediv, operator.sub])) def test_invalid_operations(self, lhs, rhs, operation): # This tests that you can't apply arithmetic operations to two instances # of a geometric type. What is a Twist added to an Accel? with self.assertRaises(TypeError): operation(lhs, rhs) class HashEqualsTest(parameterized.TestCase): @parameterized.parameters( [geometry.Accel, geometry.Twist, geometry.Wrench, geometry.Vec6]) def test_vec6_types(self, geometry_type): data = np.random.random(6) value1 = geometry_type(data) value2 = geometry_type(data) self.assertEqual(value1, value2) self.assertEqual(hash(value1), hash(value2)) data2 = data + np.arange(6) value3 = geometry_type(data2) self.assertNotEqual(value1, value3) def test_pose(self): position = np.random.random(3) quaternion = np.random.random(4) pose1 = geometry.Pose(position.copy(), quaternion.copy(), name='name') pose2 = geometry.Pose(position.copy(), quaternion.copy(), name='name') self.assertEqual(pose1, pose2) self.assertEqual(hash(pose1), hash(pose2)) @parameterized.parameters([(geometry.Accel, geometry.AccelStamped), (geometry.Twist, geometry.TwistStamped), (geometry.Wrench, geometry.WrenchStamped), (geometry.Vec6, geometry.VectorStamped)]) def test_stamped(self, base_type, stamped_type): base1 = base_type(np.random.random(6)) pose1 = geometry.Pose.from_poseuler(base1) pose_stamped_1 = geometry.PoseStamped(pose1, frame=None) pose2 = geometry.Pose.from_poseuler(base1) pose_stamped_2 = geometry.PoseStamped(pose2, frame=None) stamped1 = stamped_type(base1, pose_stamped_1) stamped2 = stamped_type(base1, pose_stamped_2) self.assertEqual(stamped1, stamped2) self.assertEqual(hash(stamped1), hash(stamped2)) class BatchedGeometryTest(parameterized.TestCase): BATCH = 5 IDENTITY_QUAT_BATCH = np.tile((1, 0, 0, 0), (BATCH, 1)) IDENTITY_POS_BATCH = np.zeros((BATCH, 3)) def test_batched_pose_mul(self): pose1 = geometry.Pose(position=self.IDENTITY_POS_BATCH, quaternion=self.IDENTITY_QUAT_BATCH) pose2 = geometry.Pose(position=np.ones((self.BATCH, 3)), quaternion=np.tile((0, 1, 0, 0), (self.BATCH, 1))) world_pose2_test = pose1.mul(pose2) for i in range(self.BATCH): np.testing.assert_array_almost_equal( world_pose2_test.position[i], pose2.position[i]) np.testing.assert_array_almost_equal( world_pose2_test.quaternion[i], pose2.quaternion[i]) def test_pose_inv_random(self): pose = geometry.Pose( position=np.zeros((5, 3)), quaternion=np.tile((1, 0, 0, 0), (5, 1))) inv = pose.inv() identity = pose.mul(inv) np.testing.assert_array_almost_equal( identity.position, self.IDENTITY_POS_BATCH) np.testing.assert_array_almost_equal( identity.quaternion, self.IDENTITY_QUAT_BATCH) if __name__ == '__main__': absltest.main()
dm_robotics-main
py/geometry/geometry_test.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for observation_physics.""" from absl.testing import absltest from dm_robotics.geometry import geometry from dm_robotics.geometry import observation_physics import numpy as np class ObservationPhysicsTest(absltest.TestCase): def test_happy_path(self): physics = observation_physics.ObservationPhysics( geometry.Pose.from_poseuler) raw_data = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] physics.set_observation({'pose1': np.asarray(raw_data)}) self.assertEqual( physics.world_pose('pose1'), geometry.Pose.from_poseuler(raw_data)) def test_missing(self): physics = observation_physics.ObservationPhysics( geometry.Pose.from_poseuler) raw_data = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] physics.set_observation({'pose1': np.asarray(raw_data)}) with self.assertRaises(ValueError): physics.world_pose('pose2') if __name__ == '__main__': absltest.main()
dm_robotics-main
py/geometry/observation_physics_test.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Package building script.""" import setuptools def _get_requirements(requirements_file): # pylint: disable=g-doc-args """Returns a list of dependencies for setup() from requirements.txt. Currently a requirements.txt is being used to specify dependencies. In order to avoid specifying it in two places, we're going to use that file as the source of truth. Lines starting with -r will be ignored. If the requirements are split across multiple files, call this function multiple times instead and sum the results. """ def line_should_be_included(line): return line and not line.startswith("-r") with open(requirements_file) as f: return [_parse_line(line) for line in f if line_should_be_included(line)] def _parse_line(s): """Parses a line of a requirements.txt file.""" requirement, *_ = s.split("#") return requirement.strip() setuptools.setup( name="dm_robotics-geometry", package_dir={"dm_robotics.geometry": ""}, packages=["dm_robotics.geometry"], version="0.5.0", license="Apache 2.0", author="DeepMind", description="This library provides primitives for dealing with scene and robot geometry", long_description=open("README.md").read(), long_description_content_type="text/markdown", url="https://github.com/deepmind/dm_robotics/tree/main/py/geometry", python_requires=">=3.7, <3.11", setup_requires=["wheel >= 0.31.0"], install_requires=(_get_requirements("requirements.txt") + _get_requirements("requirements_external.txt")), classifiers=[ "Development Status :: 5 - Production/Stable", "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", ], zip_safe=True, )
dm_robotics-main
py/geometry/setup.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Physics implementation that gets Poses from observation values.""" from typing import Any, Callable, Dict, Mapping, Text from dm_robotics.geometry import geometry import numpy as np _IDENTITY_QUATERNION = np.array([1, 0, 0, 0]) class ObservationPhysics(geometry.Physics): """A `geometry.Physics` backed by environment observations.""" def __init__(self, observation_to_pose: Callable[[np.ndarray], geometry.Pose]): """Initializes ObservationPhysics. Args: observation_to_pose: A function to convert an observation to a Pose. """ super().__init__() self._obs = None # type: Dict[Text, Any] self._parser = observation_to_pose def set_observation(self, observation: Mapping[str, np.ndarray]): """Sets the dict as the current observation.""" self._obs = observation # pytype: disable=annotation-type-mismatch def world_pose(self, frame: geometry.Grounding, get_pos: bool = True, get_rot: bool = True) -> geometry.Pose: """Return world pose of the provided frame. Args: frame: A frame identifier. get_pos: If False, zero out position entries. get_rot: If False, make the rotation an identity quaternion. Returns: A `geometry.Pose` containing the requested pose. """ if not isinstance(frame, str): raise ValueError(f"bad frame: {frame}, expected identifier") raw_value = self._obs.get(frame, None) if raw_value is None: raise ValueError(f"{frame} not found in {self._obs.keys()}") full_pose = self._parser(raw_value) if not get_pos: return full_pose.with_position([0, 0, 0]) elif not get_rot: return full_pose.with_quaternion(_IDENTITY_QUATERNION) return full_pose
dm_robotics-main
py/geometry/observation_physics.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for representing frames, twists, wrenches and accelerations. This mimics the ROS geometry_msgs interface, but using arbitrary frame identifiers coupled with a physics that can interpret those frames. The primary use case is to decouple the parameterization of coordinate frames and related quantities in user-code from an underlying world model, which can be a physics engine, a kinematics library like KDL, a real-robot frame system, e.g. ROS tf. Pose, Twist, Wrench, Accel and Vec6 instances are immutable, so too are their Stamped counterparts. All geometry types can be compared and are hashable. Arithmetic: This applies to `Accel`, `Twist`, `Wrench`, and `Vec6`. The arithmetic operators, + - * /, perform piece-wise operations, they are intended to work with Vec6 instances. Accel, Wrench, and Twist cannot have arithmetic operations applied to instances of each-other. For example: ```python twist = Twist() wrench = Wrench() accel = Accel() # Arithmetic does not apply between twist, wrench and accel: twist + twist # raises TypeError wrench / accel # raises TypeError # Instead, it works with Vec6 instances: vec6 = Vec6() twist + vec6 # returns a Twist vec6 * wrench # returns a Wrench ``` """ import abc from typing import Any, Optional, Sequence, Text, Union from dm_robotics.transformations import transformations as tr import numpy as np Grounding = Any # The world pose of a Grounding is given by a Physics. Frame = Union[Grounding, "PoseStamped"] _IDENTITY_QUATERNION = np.array([1, 0, 0, 0], dtype=np.float64) _ZERO_POSITION = np.zeros(3, dtype=np.float64) _DEFAULT = "default_constant_string" class Physics(abc.ABC): """Interface for 'Physics' as needed by this library. Unlike control.Physics, we only require the ability to get the world pose of scene elements using some identifier (aka Grounding). """ @abc.abstractmethod def world_pose(self, frame: Grounding, get_pos: bool = True, get_rot: bool = True) -> "Pose": """Return world pose of the provided frame. Args: frame: A frame identifier. get_pos: If False, zero out position entries. get_rot: If False, make the rotation an identity quaternion. Returns: A `geometry.Pose` containing the requested pose. """ raise NotImplementedError def frame_world_pose(frame: Optional[Frame], physics: Optional[Physics] = None) -> "Pose": """Traverses the pose hierarchy to compute the world pose. Args: frame: A frame identifier. Can be a Grounding, a `PoseStamped`, or None. None is interpreted as world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to get its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `Pose` containing the world pose Raises: ValueError: If `frame` is a Grounding but no Physics was provided. """ if frame is None: return Pose() elif isinstance(frame, PoseStamped): return frame.get_world_pose(physics) else: if physics is None: raise ValueError( "A `geometry.Physics` object is required to compute frame poses") return physics.world_pose(frame) def frame_relative_pose(frame1: Optional[Frame], frame2: Optional[Frame], physics: Optional[Physics] = None) -> "Pose": """Computes the pose of `frame1` with respect to `frame2`. Args: frame1: A frame. Can be an Grounding, a `PoseStamped`, or None. frame2: A frame. Can be an Grounding, a `PoseStamped`, or None. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to get its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `Pose` containing the pose of frame1 w.r.t. `frame2` """ pose_world_frame1 = frame_world_pose(frame1, physics=physics) pose_world_frame2 = frame_world_pose(frame2, physics=physics) return pose_world_frame2.inv().mul(pose_world_frame1) class Pose: """A class for representing a pose. Internally this class uses position and quaternion, but exposes a matrix interface. Equivalent to ROS geometry_msgs/Pose, except for the quaternion order. Here, [w, x, y, z] is used, whereas geometry_msgs/Quaternion uses [x, y, z, w]. Instances of this class are immutable. Instances can be copied with copy.copy(). """ __slots__ = ("_position", "_quaternion", "_name") def __init__(self, position=None, quaternion=None, name=""): if position is None: self._position = _ZERO_POSITION else: self._position = np.asarray(position, dtype=np.float64) if self._position is position: # Copy only if required. self._position = np.copy(self._position) if quaternion is None: self._quaternion = _IDENTITY_QUATERNION else: self._quaternion = np.asarray(quaternion, dtype=np.float64) if self._quaternion is quaternion: # Copy only if required. self._quaternion = np.copy(self._quaternion) if name is None: raise ValueError("Name should be a string not None") self._name = name # Prevent mutation through read-only properties. self._position.flags.writeable = False self._quaternion.flags.writeable = False def __repr__(self) -> Text: if self.name: name = f"name={self.name}, " else: name = "" return f"Pose({name}position={self.position}, quaternion={self.quaternion})" def __eq__(self, other): if isinstance(other, Pose): return np.allclose(self.position, other.position) and ( np.allclose(self.quaternion, other.quaternion) or np.allclose(self.quaternion, -1 * other.quaternion)) and (self.name == other.name) else: return NotImplemented def __hash__(self): return hash( tuple([self._name] + self._position.tolist() + self._quaternion.tolist())) def mul(self, other: "Pose", name: Text = "") -> "Pose": """Multiplies other pose by this pose. Args: other: The other Pose to multiply by. name: An optional name to set in the resulting Pose. Returns: Resulting pose. """ new_pos = self.position + tr.quat_rotate(self.quaternion, other.position) new_quat = tr.quat_mul(self.quaternion, other.quaternion) return Pose(new_pos, new_quat, name=name) def inv(self) -> "Pose": inv_quat = tr.quat_inv(self.quaternion) return Pose( position=tr.quat_rotate(inv_quat, -1 * self.position), quaternion=inv_quat) @property def hmat(self) -> np.ndarray: hmat = tr.quat_to_mat(self.quaternion) hmat[0:3, 3] = self.position return hmat @classmethod def from_hmat(cls, hmat: Union[np.ndarray, Sequence[float]]) -> "Pose": position = hmat[0:3, 3] quaternion = tr.mat_to_quat(hmat) return cls(position, quaternion) @classmethod def from_poseuler(cls, poseuler: Union[np.ndarray, Sequence[float]], ordering: Text = "XYZ") -> "Pose": position = poseuler[0:3] quaternion = tr.euler_to_quat(poseuler[3:6], ordering=ordering) return cls(position, quaternion) def to_poseuler(self, ordering="XYZ"): return np.hstack( [self.position, tr.quat_to_euler(self.quaternion, ordering=ordering)]) def to_posquat(self): return np.hstack([self.position, self.quaternion]) @property def position(self): return self._position @property def quaternion(self): return self._quaternion @property def name(self): return self._name def replace(self, position=_DEFAULT, quaternion=_DEFAULT, name=_DEFAULT): if position is _DEFAULT: position = self.position if quaternion is _DEFAULT: quaternion = self.quaternion if name is _DEFAULT: name = self.name return Pose(position=position, quaternion=quaternion, name=name) def with_quaternion(self, quaternion): return self.replace(quaternion=quaternion) def with_position(self, position): return self.replace(position=position) class PoseStamped(object): """A class for representing a pose relative to a parent frame. The purpose of this class is to simplify the process of computing relative transformations between scene elements. Every `PoseStamped` has a parent frame, which can be `None`, a `Grounding`, or another `PoseStamped`. The semantics of these possible parents are as follows: None: The pose is interpreted as a world pose. Grounding: The pose is interpreted in the frame of the Grounding, and a `Physics` object is used to resolve its world pose. PoseStamped: The pose is interpreted as a child of another `PoseStamped`, and the world pose is resolved recursively until a `Grounding` or `None` is found. Every `PoseStamped` can therefore be grounded in a common `world` frame, and by extension, any relative pose can be computed. Equivalent to ROS geometry_msgs/PoseStamped, supporting an arbitrary `Grounding` or another `PoseStamped` (or None) instead of a just string frame_id for the frame identifier. PoseStamped is immutable. """ __slots__ = ("_pose", "_frame", "_name") base_type = Pose def __init__(self, pose: Union[Pose, np.ndarray, None], frame: Optional[Frame] = None, name: Text = ""): """Initialize PoseStamped. Args: pose: A `Pose` object, if None is give a default Pose is used. frame: A frame identifier. Can be an Grounding, a `PoseStamped`, or None. If None, users should assume world frame. name: Optional name of the frame. Raises: ValueError: if `frame` or `name` arguments is invalid """ if isinstance(pose, np.ndarray): pose = Pose.from_hmat(pose) elif pose is None: pose = Pose() if isinstance(frame, Pose): raise ValueError( "A Pose is not a frame, did you mean `PoseStamped(pose, None)`?") if name is None: raise ValueError("Name should be a string not None") self._pose = pose self._frame = frame # type: Union[Grounding, PoseStamped, None] self._name = name def __repr__(self) -> Text: if self.name: name_str = "name:{}, ".format(self.name) else: name_str = "" return "{}({}pose={}, frame={})".format(self.__class__.__name__, name_str, self.pose, self.frame) def __eq__(self, other): if isinstance(other, PoseStamped): return (self.pose == other.pose) and (self.frame == other.frame) and ( self.name == other.name) else: return NotImplemented def __hash__(self): return hash((self._pose, self._name)) def to_world(self, physics: Optional[Physics] = None) -> "PoseStamped": """Returns this pose in the world frame - flattens the frame hierarchy. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to get its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A new PoseStamped, in the world frame. """ return PoseStamped(pose=self.get_world_pose(physics), frame=None) def to_frame(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> "PoseStamped": """Returns this pose in the given frame. Args: frame: A frame identifier. Can be an Grounding, a `PoseStamped`, or None. If None, world frame is assumed. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to get its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A new PoseStamped, in the given frame. """ return PoseStamped( pose=self.get_relative_pose(frame, physics=physics), frame=frame) def get_relative_pose(self, other: Optional[Frame], physics: Optional[Physics] = None) -> Pose: """Computes the pose of this frame with respect to `other`. Args: other: A frame. Can be an Grounding, a `PoseStamped`, or None. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `Pose` containing the pose of self in frame `other` """ return frame_relative_pose(self, other, physics=physics) def get_world_pose(self, physics: Optional[Physics] = None) -> Pose: """Recursively computes the world pose given the current frame. Args: physics: Required if the frame of the root pose is a reference that only a physics instance can resolve, If the root pose is None, we assume the highest-level frame is world. Returns: A `Pose` containing the world pose Raises: ValueError: If the root frame is a Grounding and no physics was provided. """ # recurse back through poses until we have the transform wrt a grounding frame = self.frame pose = self.pose if self.pose is not None else Pose() if isinstance(frame, PoseStamped): pose_world_element = frame.get_world_pose(physics) elif frame is None: # if no grounding at root, assume pose contains a world-frame transform pose_world_element = Pose() else: # Get the world pose of the frame from physics. if physics is None: raise ValueError( "A Physics object is required for frames with a grounding") else: pose_world_element = physics.world_pose(frame) return pose_world_element.mul(pose) @property def data(self) -> Pose: """Returns pose. Provides a common data accessor across stamped types.""" return self._pose @property def pose(self) -> Pose: return self._pose @property def frame(self): return self._frame @property def name(self): return self._name def replace(self, pose=_DEFAULT, frame=_DEFAULT, name=_DEFAULT): if pose is _DEFAULT: pose = self.pose if frame is _DEFAULT: frame = self.frame if name is _DEFAULT: name = self.name return PoseStamped(pose=pose, frame=frame, name=name) def with_pose(self, pose): return self.replace(pose=pose) def with_frame(self, frame): return self.replace(frame=frame) class HybridPoseStamped(PoseStamped): """A PoseStamped with a dynamically-overridable position or orientation. HybridPoseStamped is a convenience class to represent a PoseStamped whose world position or orientation can be overridden using user-provided values. This is useful when we want to define a frame which has a fixed position or orientation relative to a dynamic frame, i.e. one with a grounding as a parent. For example, we often want to define a coordinate frame for joystick actions as having the position of some gripper-site, but the orientation of the robot or world frame. In this case we can't simply compute a fixed Pose to transform the gripper's PoseStamped to the desired frame at init-time because of the lazy-evaluation of Grounding frames. """ def __init__( self, pose: Union[Pose, np.ndarray, None], frame: Optional[Frame] = None, name: Text = "", position_override: Optional[Frame] = None, quaternion_override: Optional[Frame] = None, ): """Initialize PoseStamped. The override parameters are mutually exclusive, only one may be supplied. Args: pose: A `Pose` object. frame: A frame identifier. Can be a Grounding, a `PoseStamped`, or None. If None, users should assume world frame. name: Frame name. position_override: A position override for the final world-pose. the frame's world-pose is evaluated and the position is used to override the position of `frame`. quaternion_override: A quaternion override for the final world-pose. The frame's world-pose is evaluated and the quaternion is used to override the rotation of `frame`. Raises: ValueError: If both position_override and quaternion_override are given. """ super().__init__(pose, frame, name) if position_override is not None and quaternion_override is not None: raise ValueError("Attempting to create a HybridPoseStamped with " "multiple position / quaternion overrides. " "Just create a child frame.") self._position_override = position_override self._quaternion_override = quaternion_override def __eq__(self, other): if isinstance(other, HybridPoseStamped): return (super().__eq__(other) and (self.position_override == other.position_override) and (self.quaternion_override == other.quaternion_override)) else: return NotImplemented def __hash__(self): return hash((HybridPoseStamped, super().__hash__())) def __repr__(self) -> Text: return ("{}(pose={}, frame={}, position_override={}, " "quaternion_override={})".format(self.__class__.__name__, self.pose, self.frame, self.position_override, self.quaternion_override)) @property def position_override(self): return self._position_override @property def quaternion_override(self): return self._quaternion_override def get_world_pose(self, physics: Optional[Physics] = None) -> Pose: """Recursively computes the world pose given the current frame. For HybridPoseStamped the Pose members override the final transform rather than post-multiplying it. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `Pose` containing the world pose Raises: ValueError: If a Grounding is the root frame and no Physics was provided. """ world_pose = super().get_world_pose(physics) if self._position_override is not None: position_override = frame_world_pose(self._position_override, physics).position return world_pose.with_position(position_override) elif self._quaternion_override is not None: quaternion_override = frame_world_pose(self._quaternion_override, physics).quaternion return world_pose.with_quaternion(quaternion_override) else: return world_pose def replace(self, pose=_DEFAULT, frame=_DEFAULT, name=_DEFAULT, position_override=_DEFAULT, quaternion_override=_DEFAULT): if pose is _DEFAULT: pose = self.pose if frame is _DEFAULT: frame = self.frame if name is _DEFAULT: name = self.name if position_override is _DEFAULT: position_override = self.position_override if quaternion_override is _DEFAULT: quaternion_override = self.quaternion_override return HybridPoseStamped( pose=pose, frame=frame, name=name, position_override=position_override, quaternion_override=quaternion_override) class Vec6(object): """A helper base-class with operators for 6-vector types. Immutable. """ __slots__ = ("_data",) def __init__(self, vec=None): if vec is None: self._data = np.zeros(6) else: self._data = np.asarray(vec) assert self._data.shape == (6,) # Defensive copy only if required. if self._data is vec: self._data = np.copy(self._data) self._data.flags.writeable = False def __getitem__(self, idx: int): return self._data[idx] def __repr__(self): return "{}({})".format(self.__class__.__name__, repr(self._data)) def __add__(self, other): rhs = other.data if isinstance(other, Vec6) else other return type(self)(self.data.__add__(rhs)) def __radd__(self, other_rhs): rhs = other_rhs.data if isinstance(other_rhs, Vec6) else other_rhs return type(self)(self.data.__add__(rhs)) def __sub__(self, other): rhs = other.data if isinstance(other, Vec6) else other return type(self)(self.data.__sub__(rhs)) def __mul__(self, other): rhs = other.data if isinstance(other, Vec6) else other return type(self)(self.data.__mul__(rhs)) def __rmul__(self, other_rhs): rhs = other_rhs.data if isinstance(other_rhs, Vec6) else other_rhs return type(self)(self.data.__mul__(rhs)) def __truediv__(self, other): rhs = other.data if isinstance(other, Vec6) else other return type(self)(self.data.__truediv__(rhs)) def __eq__(self, other): if isinstance(other, Vec6): return np.allclose(self.data, other.data) else: return NotImplemented def __hash__(self): return hash(tuple([type(self)] + self._data.tolist())) def _with_data(self, data_slice, value): """Creates a new object with `_data[data_slice]` set to `value`.""" new_data = np.copy(self._data) new_data[data_slice] = value return type(self)(new_data) @property def data(self): return self._data def with_data(self, value): return self._with_data(slice(0, 6), value) class VectorStamped(object): """A generic class for representing vectors relative to a Frame. This class can be helpful for defining frame-dependent quantities which aren't specifically position or force related quantities, e.g. a control gain. Frame transformations for generic vectors only perform rotation, and omit the cross-product terms in velocity_transform and force_transform. I.e. they only support changing the view on the vector. * Currently only supports 6-vectors. VectorStamped is immutable. """ __slots__ = ("_vector", "_frame") base_type = Vec6 def __init__(self, vector: Optional[Union[Sequence[float], np.ndarray, Vec6]], frame: Optional[Frame]): """Initialize VectorStamped. Args: vector: A `Vec6` or simply a 6-dim numpy array or list. If None, a default `Vec6` is used, with all components being zero. frame: A frame identifier. Can be an Grounding, a `PoseStamped`, or None. If None, users should assume world frame. """ if isinstance(vector, self.base_type): self._vector = vector else: self._vector = self.base_type(vector) self._frame = frame def __repr__(self): return "VectorStamped(vector={}, frame={})".format(self.vector, self.frame) def __eq__(self, other): if isinstance(other, VectorStamped): return self.data == other.data and self.frame == other.frame else: return NotImplemented def __hash__(self): return hash((VectorStamped, self.vector)) def to_frame(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> "VectorStamped": """Sets the frame and updates the vector accordingly. This function will not change the implied world vector, but it will result in the vector being expressed with respect to a new frame. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A new VectorStamped with the given frame. """ return VectorStamped( vector=self.get_relative_vector(frame, physics=physics), frame=frame) def to_world(self, physics: Optional[Physics] = None) -> "VectorStamped": """Converts vector to the world frame - flattens the frame hierarchy. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A new VectorStamped in the world frame. """ return VectorStamped(vector=self.get_world_vector(physics), frame=None) def get_relative_vector(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> Vec6: """Returns this vector in frame `frame`. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. """ pose_frame_self = frame_relative_pose(self.frame, frame, physics=physics) vector_frame = tr.rotate_vec6(pose_frame_self.hmat, self.vector.data) return self.base_type(vector_frame) def get_world_vector(self, physics: Optional[Physics] = None) -> Vec6: """Computes equivalent vector in the world frame. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `Vec6` containing this vector in the world frame. Raises: ValueError: If a Grounding is the root frame no Physics was provided. """ pose_world_frame = frame_world_pose(self.frame, physics) vector_world = tr.rotate_vec6(pose_world_frame.hmat, self.vector.data) return self.base_type(vector_world) @property def data(self): """Returns vector. Provides a common data accessor across stamped types.""" return self._vector @property def vector(self): return self._vector @property def frame(self): return self._frame def with_vector(self, vector): return VectorStamped(vector=vector, frame=self.frame) def with_frame(self, frame): return VectorStamped(vector=self.vector, frame=frame) class Twist(Vec6): """A class for representing a cartesian velocity. Equivalent to ROS geometry_msgs/Twist, except represented as a single numpy 6-dim array as [linear, angular] This class is immutable. """ __slots__ = ("_data",) def _linear_slice(self): return slice(0, 3) def _angular_slice(self): return slice(3, 6) def __add__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__add__(other) def __sub__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot subtract these types, consider Vec6") else: return super().__sub__(other) def __mul__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot multiply these types, consider Vec6") else: return super().__mul__(other) def __truediv__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot divide these types, consider Vec6") else: return super().__truediv__(other) def __radd__(self, other_rhs): # pylint: disable=useless-super-delegation return super().__radd__(other_rhs) def __rmul__(self, other_rhs): # pylint: disable=useless-super-delegation return super().__rmul__(other_rhs) @property def linear(self): """Cartesian linear velocity.""" return self._data[self._linear_slice()] def with_linear(self, value): return self._with_data(self._linear_slice(), value) @property def angular(self): """Cartesian angular velocity.""" return self._data[self._angular_slice()] def with_angular(self, value): return self._with_data(self._angular_slice(), value) @property def full(self): return self.data def with_full(self, value): return self.with_data(value) def __repr__(self): return "Twist(linear={}, angular={})".format(self.linear, self.angular) class TwistStamped(object): """A class for representing a twist relative to a Frame. The purpose of this class is to simplify the process of converting cartesian velocities between frames. For example it is often necessary to define a desired twist with respect to an interest point on a grasped object, and then convert it to a wrist or pinch-site for manipulation control. Equivalent to ROS geometry_msgs/TwistStamped, but supports a Grounding or a `PoseStamped` (or None) instead of a just string frame_id for the frame identifier. TwistStamped is immutable. """ __slots__ = ("_twist", "_frame") base_type = Twist def __init__(self, twist: Optional[Union[Twist, np.ndarray, Sequence[float]]], frame: Optional[Frame]): """Initialize TwistStamped. Args: twist: A `Twist` or simply a 6-dim numpy array or list, If None, a default `Twist` is used, with all components being zero. frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. """ if isinstance(twist, self.base_type): self._twist = twist else: self._twist = self.base_type(twist) self._frame = frame def __repr__(self): return "TwistStamped(twist={}, frame={})".format(self.twist, self.frame) def __eq__(self, other): if isinstance(other, TwistStamped): return self.data == other.data and self.frame == other.frame else: return NotImplemented def __hash__(self): return hash((TwistStamped, self._twist)) def to_frame(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> "TwistStamped": """Returns a new TwistStamped with the given frame and updated twist. This function will not change the implied world twist, but it will result in the twist being expressed with respect to a new frame. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A new `TwistStamped`, with the given frame. """ return TwistStamped( twist=self.get_relative_twist(frame, physics=physics), frame=frame) def to_world(self, physics: Optional[Physics] = None) -> "TwistStamped": """Converts twist to the world frame - flattens the frame hierarchy. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A new `TwistStamped`, in the world frame. """ return TwistStamped(twist=self.get_world_twist(physics), frame=None) def get_relative_twist(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> Twist: """Returns this twist in frame `frame`. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. """ pose_frame_self = frame_relative_pose(self.frame, frame, physics=physics) twist_frame = tr.velocity_transform(pose_frame_self.hmat, self.twist.full) return self.base_type(twist_frame) def get_world_twist(self, physics: Optional[Physics] = None, rot_only: bool = False) -> Twist: """Computes equivalent twist in the world frame. Note that by default this is NOT simply the twist of this frame rotated to the world frame (unless rot_only is True). Rather, it is the instantaneous velocity of the world origin when rigidly attached to this twist's frame. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. rot_only: (optional) If True, drops the translation to the world origin. Use this as a shortcut to obtaining the twist of this frame as viewed in world coords, without creating a new frame and explicitly calling `to_frame`. Returns: A new `Twist` representing this twist at the world frame origin. Raises: ValueError: If a Grounding is the root frame no Physics was provided. """ pose_world_frame = frame_world_pose(self.frame, physics) if rot_only: pose_world_frame = pose_world_frame.with_position(_ZERO_POSITION) twist_world = tr.velocity_transform(pose_world_frame.hmat, self.twist.full) return self.base_type(twist_world) @property def data(self) -> Twist: """Returns twist. Provides a common data accessor across stamped types.""" return self._twist @property def twist(self) -> Twist: return self._twist @property def frame(self): return self._frame def with_twist(self, twist): return TwistStamped(twist=twist, frame=self.frame) def with_frame(self, frame): return TwistStamped(twist=self.twist, frame=frame) class Wrench(Vec6): """A class for representing a cartesian wrench. Equivalent to ROS geometry_msgs/Wrench, except represented as a single numpy 6-dim array as [force, torque] This class is immutable. """ __slots__ = ("_data",) def _force_slice(self): return slice(0, 3) def _torque_slice(self): return slice(3, 6) def __add__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__add__(other) def __sub__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__sub__(other) def __mul__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__mul__(other) def __truediv__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__truediv__(other) def __radd__(self, other_rhs): # pylint: disable=useless-super-delegation return super().__radd__(other_rhs) def __rmul__(self, other_rhs): # pylint: disable=useless-super-delegation return super().__rmul__(other_rhs) @property def force(self): return self._data[self._force_slice()] def with_force(self, value): return self._with_data(self._force_slice(), value) @property def torque(self): return self._data[self._torque_slice()] def with_torque(self, value): return self._with_data(self._torque_slice(), value) @property def full(self): return self.data def with_full(self, value): return self.with_data(value) def __repr__(self): return "Wrench(force={}, torque={})".format(self.force, self.torque) class WrenchStamped(object): """A class for representing a wrench relative to a Frame. The purpose of this class is to simplify the process of converting cartesian wrenches between frames. For example it is often necessary to define a desired wrench with respect to an interest point on a grasped object, and then convert it to a wrist or pinch-site for manipulation control. Equivalent to ROS geometry_msgs/WrenchStamped, but supports a Grounding or a `PoseStamped` (or None) instead of a just string frame_id for the frame identifier. WrenchStamped is immutable. """ __slots__ = ("_wrench", "_frame") base_type = Wrench def __init__(self, wrench: Optional[Union[Wrench, np.ndarray, Sequence[float]]], frame: Optional[Frame]): """Initialize WrenchStamped. Args: wrench: A `Wrench` or simply a 6-dim numpy array or list. If None, a default `Wrench` is used, with all components being zero. frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. """ if isinstance(wrench, self.base_type): self._wrench = wrench else: self._wrench = self.base_type(wrench) self._frame = frame def __eq__(self, other): if isinstance(other, WrenchStamped): return self.data == other.data and self.frame == other.frame else: return NotImplemented def __hash__(self): return hash((WrenchStamped, self._wrench)) def to_frame(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> "WrenchStamped": """Sets the frame and updates the wrench accordingly. This function will not change the implied world wrench, but it will result in the wrench being expressed with respect to a new frame. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `WrenchStamped`, in the given frame. """ return WrenchStamped( wrench=self.get_relative_wrench(frame, physics=physics), frame=frame) def to_world(self, physics: Optional[Physics] = None) -> "WrenchStamped": """Converts wrench to the target frame - flattens the frame hierarchy. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: A `WrenchStamped`, in the world frame. """ return WrenchStamped(wrench=self.get_world_wrench(physics), frame=None) def get_relative_wrench(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> Wrench: """Returns this wrench in frame `frame`. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. """ pose_frame_self = frame_relative_pose(self.frame, frame, physics=physics) wrench_frame = tr.force_transform(pose_frame_self.hmat, self.wrench.full) return self.base_type(wrench_frame) def get_world_wrench(self, physics: Optional[Physics] = None, rot_only: bool = False) -> Wrench: """Computes equivalent wrench in the world frame. Note that by default this is NOT simply the wrench of this frame rotated to the world frame (unless rot_only is True). Rather, it is the instantaneous wrench of a point rigidly attached to this wrench's frame that is currently at the world origin. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. rot_only: (optional) If True, drops the translation to the world origin. Use this as a shortcut to obtaining the wrench of this frame as viewed in world coords, without creating a new frame and explicitly calling `to_frame`. Returns: A new `Wrench` representing this wrench at the world frame origin. Raises: ValueError: If a Grounding is the root frame no Physics was provided. """ pose_world_frame = frame_world_pose(self.frame, physics) if rot_only: pose_world_frame = pose_world_frame.with_position(_ZERO_POSITION) wrench_world = tr.force_transform(pose_world_frame.hmat, self.wrench.full) return self.base_type(wrench_world) @property def data(self): """Returns wrench. Provides a common data accessor across stamped types.""" return self._wrench @property def wrench(self): return self._wrench @property def frame(self): return self._frame def with_wrench(self, wrench): return WrenchStamped(wrench=wrench, frame=self.frame) def with_frame(self, frame): return WrenchStamped(wrench=self.wrench, frame=frame) def __repr__(self): return "WrenchStamped(wrench={}, frame={})".format(self.wrench, self.frame) class Accel(Vec6): """A class for representing a cartesian acceleration. Equivalent to ROS geometry_msgs/Accel, except represented as a single numpy 6-dim array as [linear, angular]. This class is immutable. """ __slots__ = ("_data",) def _linear_slice(self): return slice(0, 3) def _angular_slice(self): return slice(3, 6) def __add__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__add__(other) def __sub__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__sub__(other) def __mul__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__mul__(other) def __truediv__(self, other): if isinstance(other, (Accel, Twist, Wrench)): raise TypeError("Cannot add these types, consider Vec6") else: return super().__truediv__(other) def __radd__(self, other_rhs): # pylint: disable=useless-super-delegation return super().__radd__(other_rhs) def __rmul__(self, other_rhs): # pylint: disable=useless-super-delegation return super().__rmul__(other_rhs) @property def linear(self): """Cartesian linear acceleration.""" return self._data[self._linear_slice()] def with_linear(self, value): return self._with_data(self._linear_slice(), value) @property def angular(self): """Cartesian angular acceleration.""" return self._data[self._angular_slice()] def with_angular(self, value): return self._with_data(self._angular_slice(), value) @property def full(self): return self.data def with_full(self, value): return self.with_data(value) class AccelStamped(object): """A class for representing an acceleration relative to a Frame. The purpose of this class is to simplify the process of converting cartesian accelerations between frames. For example it is often necessary to define a desired accel with respect to an interest point on a grasped object, and then convert it to a wrist or pinch-site for manipulation control. Equivalent to ROS geometry_msgs/AccelStamped, but supports a Grounding or a `PoseStamped` (or None) instead of a just string frame_id for the frame identifier. AccelStamped is immutable. """ __slots__ = ("_accel", "_frame") base_type = Accel def __init__(self, accel: Optional[Union[Accel, np.ndarray, Sequence[float]]], frame: Optional[Frame]): """Initialize AccelStamped. Args: accel: A `Accel` or simply a 6-dim numpy array or list. If None, a default `Accel` is used, with all components being zero. frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. """ if isinstance(accel, self.base_type): self._accel = accel else: self._accel = self.base_type(accel) self._frame = frame def __eq__(self, other): if isinstance(other, AccelStamped): return self.data == other.data and self.frame == other.frame else: return NotImplemented def __hash__(self): return hash((AccelStamped, self._accel)) def to_frame(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> "AccelStamped": """Sets the frame and updates the accel accordingly. This function will not change the implied world accel, but it will result in the accel being expressed with respect to a new frame. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: An AccelStamped in the given frame. """ return AccelStamped( accel=self.get_relative_accel(frame, physics=physics), frame=frame) def to_world(self, physics: Optional[Physics] = None) -> "AccelStamped": """Converts accel to the world frame - flattens the frame hierarchy. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: An AccelStamped in the world frame. """ return AccelStamped(accel=self.get_world_accel(physics), frame=None) def get_relative_accel(self, frame: Optional[Frame], physics: Optional[Physics] = None) -> Accel: """Converts accel to the target frame. Args: frame: A frame identifier. Can be a Grounding, a PoseStamped, or None. If None, users should assume world frame. physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. Returns: accel_frame: A `Accel` containing the accel in frame `frame` """ pose_frame_self = frame_relative_pose(self.frame, frame, physics=physics) accel_frame = tr.force_transform(pose_frame_self.hmat, self.accel.full) return self.base_type(accel_frame) def get_world_accel(self, physics: Optional[Physics] = None, rot_only: bool = False) -> Accel: """Computes equivalent acceleration in the world frame. Note that by default this is NOT simply the accel of this frame rotated to the world frame (unless rot_only is True). Rather, it is the instantaneous accel of a point rigidly attached to this accel's frame that is currently at the world origin. Args: physics: Required if the frame of the root pose is a Grounding, in which case we need a physics to look up its world pose. If the root pose is None, we assume the highest-level frame is world. rot_only: (optional) If True, drops the translation to the world origin. Use this as a shortcut to obtaining the accel of this frame as viewed in world coords, without creating a new frame and explicitly calling `to_frame`. Returns: A new `Accel` representing this acceleration at the world frame origin. Raises: ValueError: If a Grounding is the root frame no Physics was provided. """ pose_world_frame = frame_world_pose(self.frame, physics) if rot_only: pose_world_frame = pose_world_frame.with_position(_ZERO_POSITION) accel_world = tr.force_transform(pose_world_frame.hmat, self.accel.full) return self.base_type(accel_world) @property def data(self): """Returns accel. Provides a common data accessor across stamped types.""" return self._accel @property def accel(self): return self._accel @property def frame(self): return self._frame def with_accel(self, accel): return AccelStamped(accel=accel, frame=self.frame) def with_frame(self, frame): return AccelStamped(accel=self.accel, frame=frame) def __repr__(self): return "AccelStamped(accel={}, frame={})".format(self.accel, self.frame)
dm_robotics-main
py/geometry/geometry.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build script for the python controller bindings.""" import os import subprocess import sys from setuptools import Extension from setuptools import setup from setuptools.command.build_ext import build_ext PRE_DOWNLOADED_SOURCE_DIRS = { "abseil-cpp": "/deps/abseil-cpp_215105818dfde3174fe799600bb0f3cae233d0bf", "osqp-cpp": "/deps/osqp-cpp_8cd904e2b49c24dd41d11f8c6e0adb113dd5e26c", "osqp": "/deps/osqp_f9fc23d3436e4b17dd2cb95f70cfa1f37d122c24", "pybind11": "/deps/pybind11_914c06fb252b6cc3727d0eedab6736e88a3fcb01", "googletest": "/deps/googletest_e2239ee6043f73722e7aa812a459f54a28552929" } class CMakeExtension(Extension): """Extension to record the directory to run cmake on.""" def __init__(self, name, sourcedir, cmake): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) self.cmake = cmake class CMakeBuild(build_ext): """Runs cmake.""" def build_extension(self, ext): use_preinstalled_libs = ( os.environ.get("DM_ROBOTICS_USE_PREINSTALLED_LIBRARIES", None) is not None) output_directory = os.path.abspath( os.path.dirname(self.get_ext_fullpath(ext.name))) # required for auto-detection of auxiliary "native" libs if not output_directory.endswith(os.path.sep): output_directory += os.path.sep build_type = "Debug" if self.debug else "Release" cmake_args = [ "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(output_directory), "-DPYTHON_EXECUTABLE={}".format(sys.executable), "-DDMR_PYTHON_VERSION={}.{}".format(sys.version_info.major, sys.version_info.minor), "-DCMAKE_BUILD_TYPE={}".format(build_type), "-DDM_ROBOTICS_BUILD_TESTS=OFF", "-DDM_ROBOTICS_BUILD_WHEEL=True", "--log-level=VERBOSE", ] if use_preinstalled_libs: cmake_args.append("-DFETCHCONTENT_FULLY_DISCONNECTED:BOOL=TRUE") cmake_args.append("-DDM_ROBOTICS_USE_SYSTEM_Eigen3:BOOL=TRUE") for libname, dirname in PRE_DOWNLOADED_SOURCE_DIRS.items(): if os.path.isdir(dirname): cmake_args.append( f"-DFETCHCONTENT_SOURCE_DIR_{libname.upper()}:STRING={dirname}") build_args = [] if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ: build_args += ["-j4"] print(f"Cmake build args: {cmake_args}") if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) # Generate build files: subprocess.check_call( [ext.cmake] + cmake_args + ["-S", ext.sourcedir], cwd=self.build_temp) # Build. subprocess.check_call( [ext.cmake, "--build", "."] + build_args, cwd=self.build_temp) def _get_requirements(requirements_file): # pylint: disable=g-doc-args """Returns a list of dependencies for setup() from requirements.txt. Currently a requirements.txt is being used to specify dependencies. In order to avoid specifying it in two places, we're going to use that file as the source of truth. Lines starting with -r will be ignored. If the requirements are split across multiple files, call this function multiple times instead and sum the results. """ def line_should_be_included(line): return line and not line.startswith("-r") with open(requirements_file) as f: return [_parse_line(line) for line in f if line_should_be_included(line)] def _parse_line(s): """Parses a line of a requirements.txt file.""" requirement, *_ = s.split("#") return requirement.strip() setup( name="dm_robotics-controllers", package_dir={"dm_robotics.controllers": ""}, packages=["dm_robotics.controllers"], version="0.5.0", license="Apache 2.0", author="DeepMind", description="Python bindings for dm_robotics/cpp/controllers", long_description=open("controllers_py/README.md").read(), long_description_content_type="text/markdown", url="https://github.com/deepmind/dm_robotics/tree/main/cpp/controllers_py", python_requires=">=3.7, <3.11", setup_requires=["wheel >= 0.31.0"], classifiers=[ "Development Status :: 5 - Production/Stable", "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries", "Topic :: Scientific/Engineering", ], ext_modules=[ CMakeExtension( "dm_robotics.controllers.cartesian_6d_to_joint_velocity_mapper", sourcedir="", cmake=os.environ.get("CMAKE_EXE", "cmake")) ], cmdclass={"build_ext": CMakeBuild}, zip_safe=False, )
dm_robotics-main
cpp/setup.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
dm_robotics-main
cpp/controllers_py/__init__.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example usage for cartesian_6d_to_joint_velocity_mapper module.""" import threading import time from typing import Sequence from absl import app from dm_control import mujoco from dm_control.mujoco.wrapper.mjbindings import enums from dm_control.suite import humanoid from dm_control.viewer import gui from dm_robotics.controllers import cartesian_6d_to_joint_velocity_mapper import numpy as np mjlib = mujoco.wrapper.mjbindings.mjlib _WINDOW_HEIGHT = 480 _WINDOW_WIDTH = 640 def _create_mapper_params( physics: mujoco.Physics ) -> cartesian_6d_to_joint_velocity_mapper.Parameters: """Creates the parameters for the Cartesian 6D to joint velocity mapper.""" # Set params for controlling the left hand, and enabling active collision # avoidance between the left arm and the floor. params = cartesian_6d_to_joint_velocity_mapper.Parameters() params.model = physics.model params.joint_ids = [19, 20, 21] params.object_type = enums.mjtObj.mjOBJ_SITE params.object_name = "left_hand" params.integration_timestep = 0.005 # 5ms params.enable_joint_position_limits = False params.joint_position_limit_velocity_scale = 0.95 params.minimum_distance_from_joint_position_limit = 0.01 # ~0.5deg. params.enable_joint_velocity_limits = True params.joint_velocity_magnitude_limits = [0.5, 0.5, 0.5] params.enable_joint_acceleration_limits = False params.remove_joint_acceleration_limits_if_in_conflict = True params.joint_acceleration_magnitude_limits = [1.0, 1.0, 1.0] params.enable_collision_avoidance = True params.collision_avoidance_normal_velocity_scale = 0.01 params.minimum_distance_from_collisions = 0.005 params.collision_detection_distance = 10.0 params.collision_pairs = [(["left_upper_arm", "left_lower_arm", "left_hand"], ["floor"])] print(params.cartesian_velocity_task_weighting_matrix) params.cartesian_velocity_task_weighting_matrix = [ # 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] # print(params.cartesian_velocity_task_weighting_matrix) params.check_solution_validity = True params.solution_tolerance = 1e-3 params.regularization_weight = 1e-3 params.enable_nullspace_control = True params.return_error_on_nullspace_failure = False params.nullspace_projection_slack = 1e-7 return params def _control_humanoid(physics: mujoco.Physics, physics_lock: threading.Lock): """Controls the humanoid's left arm to move towards the floor.""" params = _create_mapper_params(physics) mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(params) # Move the left arm down towards the floor. nullspace_bias = [-1.0, 0.0, 1.0] target_velocity = [0.0, 0.0, -1.0, 0.0, 0.0, 0.0] while True: with physics_lock: # Compute joint velocities. solution = mapper.compute_joint_velocities(physics.data, target_velocity, nullspace_bias) # Set joint velocities. Note that `solution` is already sorted in # ascending order of `params.joint_ids`. physics.data.qvel[:] = [0.0] * physics.model.nv for joint_id, velocity in zip(sorted(params.joint_ids), solution): dof_adr = physics.model.jnt_dofadr[joint_id] physics.data.qvel[dof_adr] = velocity # Integrate, run MuJoCo kinematics, and render. mjlib.mj_integratePos(physics.model.ptr, physics.data.qpos, physics.data.qvel, params.integration_timestep.total_seconds()) mjlib.mj_fwdPosition(physics.model.ptr, physics.data.ptr) time.sleep(params.integration_timestep.total_seconds()) def _render_image(physics: mujoco.Physics, physics_lock: threading.Lock) -> np.ndarray: """Returns a view of the scene as a numpy array.""" with physics_lock: return physics.render(height=_WINDOW_HEIGHT, width=_WINDOW_WIDTH) def main(argv: Sequence[str]): del argv physics = humanoid.Physics.from_xml_string(*humanoid.get_model_and_assets()) physics_lock = threading.Lock() # Place the humanoid in a position where the left hand can collide with the # floor if it moves down. physics.data.qpos[2] = 0.3 mjlib.mj_fwdPosition(physics.model.ptr, physics.data.ptr) # Start control thread to compute velocities and integrate. control_thread = threading.Thread( target=lambda: _control_humanoid(physics, physics_lock)) control_thread.start() # Show the rendered image. Note how the left arm avoids collisions with the # floor. while True: window = gui.RenderWindow( width=_WINDOW_WIDTH, height=_WINDOW_HEIGHT, title="Cartesian6dVelocityToJointVelocityMapperExample") window.update(lambda: _render_image(physics, physics_lock)) if __name__ == "__main__": app.run(main)
dm_robotics-main
cpp/controllers_py/cartesian_6d_to_joint_velocity_mapper_example.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for cartesian_6d_to_joint_velocity_mapper PyBind11 module.""" from absl.testing import absltest from absl.testing import parameterized from dm_control import mujoco from dm_control.suite import humanoid from dm_robotics.controllers import cartesian_6d_to_joint_velocity_mapper import numpy as np _MjGeom = mujoco.wrapper.mjbindings.enums.mjtObj.mjOBJ_GEOM _MjBody = mujoco.wrapper.mjbindings.enums.mjtObj.mjOBJ_BODY _MjSite = mujoco.wrapper.mjbindings.enums.mjtObj.mjOBJ_SITE mjlib = mujoco.wrapper.mjbindings.mjlib def _set_joint_velocities(physics, joint_ids, joint_velocities): """Sets the joint velocities in physics for a subset of joints.""" for i in range(0, physics.model.nv): physics.data.qvel[i] = 0.0 for joint_id, velocity in zip(sorted(joint_ids), joint_velocities): dof_adr = physics.model.jnt_dofadr[joint_id] physics.data.qvel[dof_adr] = velocity def _compute_object_jacobian(physics, object_name, object_type): """Computes an object's Jacobian.""" mjlib.mj_fwdPosition(physics.model.ptr, physics.data.ptr) mjlib.mj_fwdVelocity(physics.model.ptr, physics.data.ptr) jacobian = np.empty((6, physics.model.nv), dtype=physics.data.qpos.dtype) if object_type == _MjGeom or object_type == "geom": mjlib.mj_jacGeom(physics.model.ptr, physics.data.ptr, jacobian[:3], jacobian[3:], physics.model.name2id(object_name, _MjGeom)) elif object_type == _MjSite or object_type == "site": mjlib.mj_jacSite(physics.model.ptr, physics.data.ptr, jacobian[:3], jacobian[3:], physics.model.name2id(object_name, _MjSite)) elif object_type == _MjBody or object_type == "body": mjlib.mj_jacBody(physics.model.ptr, physics.data.ptr, jacobian[:3], jacobian[3:], physics.model.name2id(object_name, _MjBody)) else: raise ValueError("Invalid object type.") return jacobian def _compute_object_jacobian_for_joints(physics, object_name, object_type, joint_ids): """Computes an object's Jacobian for a subset of joints.""" jacobian = _compute_object_jacobian(physics, object_name, object_type) dof_ids = [physics.model.jnt_dofadr[joint_id] for joint_id in joint_ids] return jacobian[:, dof_ids] # This is necessary because MuJoCo's object_velocity is computationally # different than J*qvel. Since the QP uses the Jacobian, the solution should be # compared to J*qvel to ensure it matches the expected tolerances. def _compute_object_velocity_with_jacobian(physics, object_name, object_type): """Computes an object velocity by evaluating J*qvel.""" qvel = np.array(physics.data.qvel, dtype=physics.data.qpos.dtype) return _compute_object_jacobian(physics, object_name, object_type).dot(qvel) class Cartesian6DToJointVelocityMapperTest(absltest.TestCase): def test_parameters_attributes(self): params = cartesian_6d_to_joint_velocity_mapper.Parameters() attributes = sorted( [attr for attr in dir(params) if not attr.startswith("_")]) expected_attributes = sorted([ "model", "joint_ids", "object_type", "object_name", "integration_timestep", "enable_joint_position_limits", "joint_position_limit_velocity_scale", "minimum_distance_from_joint_position_limit", "enable_joint_velocity_limits", "joint_velocity_magnitude_limits", "enable_joint_acceleration_limits", "remove_joint_acceleration_limits_if_in_conflict", "joint_acceleration_magnitude_limits", "enable_collision_avoidance", "collision_avoidance_normal_velocity_scale", "minimum_distance_from_collisions", "collision_detection_distance", "collision_pairs", "cartesian_velocity_task_weighting_matrix", "check_solution_validity", "max_cartesian_velocity_control_iterations", "regularization_weight", "solution_tolerance", "enable_nullspace_control", "return_error_on_nullspace_failure", "clamp_nullspace_bias_to_feasible_space", "max_nullspace_control_iterations", "nullspace_projection_slack", "use_adaptive_step_size", "log_nullspace_failure_warnings", "log_collision_warnings", ]) self.assertEqual(expected_attributes, attributes) @parameterized.named_parameters( ("use_adaptive_step_size", True), ("do_not_use_adaptive_step_size", False), ) class Cartesian6DToJointVelocityMapperParameterizedTest(absltest.TestCase): def test_mapper_attributes(self, use_adaptive_step_size): physics = humanoid.Physics.from_xml_string(*humanoid.get_model_and_assets()) params = cartesian_6d_to_joint_velocity_mapper.Parameters() params.model = physics.model params.joint_ids = [19, 20, 21] params.object_type = _MjGeom params.object_name = "left_hand" params.integration_timestep = 1.0 params.enable_nullspace_control = True params.use_adaptive_step_size = use_adaptive_step_size mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(params) self.assertTrue(hasattr(mapper, "compute_joint_velocities")) def test_solution_without_nullspace_realizes_target(self, use_adaptive_step_size): physics = humanoid.Physics.from_xml_string(*humanoid.get_model_and_assets()) params = cartesian_6d_to_joint_velocity_mapper.Parameters() params.model = physics.model params.joint_ids = [16, 17, 18] params.object_type = _MjGeom params.object_name = "right_hand" params.integration_timestep = 1.0 params.solution_tolerance = 1.0e-15 params.max_cartesian_velocity_control_iterations = 10000 params.regularization_weight = 0.0 params.use_adaptive_step_size = use_adaptive_step_size mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(params) # Set target to a realizable velocity and solve. target_velocity = [ 0.0450566, 0.0199436, 0.0199436, 0, 0.0071797, -0.0071797 ] solution = mapper.compute_joint_velocities(physics.data, target_velocity, None) _set_joint_velocities(physics, params.joint_ids, solution) # Realized Cartesian velocity must be within the specified tolerance of the # target velocity. realized_velocity = _compute_object_velocity_with_jacobian( physics, params.object_name, params.object_type) # Ensure the realized Cartesian velocity is within tolerance of the target # velocity. # Note that for an unconstrained stack-of-tasks problem with one task that # is realizable, the `absolute_tolerance` represents how far from optimality # the solution is, measured by: # e_dual = W ||J^T J qvel - (xdot_target^T J)^T|| # e_dual = W ||J^T xdot_target - J^T xdot_realized|| jacobian = _compute_object_jacobian_for_joints(physics, params.object_name, params.object_type, params.joint_ids) e_dual = np.linalg.norm( np.transpose(jacobian).dot(realized_velocity - np.array(target_velocity)), ord=np.inf) self.assertLess(e_dual, params.solution_tolerance) def test_solution_with_nullspace_realizes_target(self, use_adaptive_step_size): physics = humanoid.Physics.from_xml_string(*humanoid.get_model_and_assets()) target_velocity = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0] nullspace_bias = [-1.0, 0.0, 1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 1.0] # Shared parameters for optimization problem with and without nullspace # hierarchy. params = cartesian_6d_to_joint_velocity_mapper.Parameters() params.model = physics.model params.joint_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9] params.object_type = _MjBody params.object_name = "right_foot" params.integration_timestep = 1.0 params.solution_tolerance = 1.0e-6 params.max_cartesian_velocity_control_iterations = 10000 params.regularization_weight = 1.0e-3 params.use_adaptive_step_size = use_adaptive_step_size # Compute solution without nullspace. no_nullspace_mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(params) no_nullspace_solution = no_nullspace_mapper.compute_joint_velocities( physics.data, target_velocity) _set_joint_velocities(physics, params.joint_ids, no_nullspace_solution) no_nullspace_cartesian_vel = _compute_object_velocity_with_jacobian( physics, params.object_name, params.object_type) # Reuse the same parameters but add nullspace projection, and compute the # solution to the optimization problem with nullspace. params.enable_nullspace_control = True params.return_error_on_nullspace_failure = True params.nullspace_projection_slack = 1.0e-7 nullspace_mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(params) nullspace_solution = nullspace_mapper.compute_joint_velocities( physics.data, target_velocity, nullspace_bias) _set_joint_velocities(physics, params.joint_ids, nullspace_solution) nullspace_cartesian_vel = _compute_object_velocity_with_jacobian( physics, params.object_name, params.object_type) # The nullspace solution should be different than the no-nullspace solution. # For this problem, we computed the Euclidean distance of both solutions to # be around ~0.85; this is expected since there's 10 DoF and only 6 DoF are # being used for Cartesian control. Test that the solutions differ by at # least 0.8, and that the nullspace solution is closer to the nullspace # target. solution_diff = np.linalg.norm( np.array(nullspace_solution) - np.array(no_nullspace_solution)) no_nullspace_sol_bias_error = np.linalg.norm( np.array(nullspace_bias) - np.array(no_nullspace_solution)) nullspace_sol_bias_error = np.linalg.norm( np.array(nullspace_bias) - np.array(nullspace_solution)) self.assertGreater(solution_diff, 0.8) self.assertLess(nullspace_sol_bias_error, no_nullspace_sol_bias_error) # The nullspace solution should respect the nullspace inequality constraint: # xdot_first - slack <= xdot <= xdot_first + slack, # with a tolerance of kSolution tolerance (and thus increasing the slack). # This means that it must be within solution_tolerance + slack from the # Cartesian velocity achieved without nullspace. self.assertLess( np.linalg.norm( np.array(nullspace_cartesian_vel) - np.array(no_nullspace_cartesian_vel), ord=np.inf), params.solution_tolerance + params.nullspace_projection_slack) def test_solution_with_all_constraints_and_nullspace_not_in_collision( self, use_adaptive_step_size): physics = humanoid.Physics.from_xml_string(*humanoid.get_model_and_assets()) # Increase collision detection margin for all geoms. for i in range(0, physics.model.ngeom): physics.model.geom_margin[i] = 0.01 # Place the humanoid in a position where the left hand can collide with the # floor if it moves down. physics.data.qpos[2] = 0.3 mjlib.mj_fwdPosition(physics.model.ptr, physics.data.ptr) # Set params with collision avoidance and full list of constraints. params = cartesian_6d_to_joint_velocity_mapper.Parameters() params.model = physics.model params.joint_ids = [19, 20, 21] params.object_type = _MjSite params.object_name = "left_hand" params.integration_timestep = 0.005 # 5ms params.enable_joint_position_limits = True params.joint_position_limit_velocity_scale = 0.95 params.minimum_distance_from_joint_position_limit = 0.01 # ~0.5deg. params.enable_joint_velocity_limits = True params.joint_velocity_magnitude_limits = [0.5, 0.5, 0.5] params.enable_joint_acceleration_limits = True params.remove_joint_acceleration_limits_if_in_conflict = True params.joint_acceleration_magnitude_limits = [1.0, 1.0, 1.0] params.enable_collision_avoidance = True params.collision_avoidance_normal_velocity_scale = 0.01 params.minimum_distance_from_collisions = 0.005 params.collision_detection_distance = 10.0 params.collision_pairs = [ (["left_upper_arm", "left_lower_arm", "left_hand"], ["floor"]) ] params.check_solution_validity = True params.solution_tolerance = 1e-6 params.max_cartesian_velocity_control_iterations = 10000 params.regularization_weight = 1e-3 params.enable_nullspace_control = True params.return_error_on_nullspace_failure = False params.nullspace_projection_slack = 1e-7 params.use_adaptive_step_size = use_adaptive_step_size mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(params) # Approximate the distance of the left hand and floor geoms by the # difference in Z components minus the radius. lhand_radius = physics.named.model.geom_size["left_hand"][0] lhand_floor_dist = ( physics.named.data.geom_xpos["left_hand"][2] - physics.named.data.geom_xpos["floor"][2] - lhand_radius) nullspace_bias = [-1.0, 0.0, 1.0] target_velocity = [0.0, 0.0, -1.0, 0.0, 0.0, 0.0] # Compute velocities and integrate, for 5000 steps. for _ in range(0, 5000): # Compute joint velocities. solution = mapper.compute_joint_velocities(physics.data, target_velocity, nullspace_bias) # Set joint velocities, integrate, and run MuJoCo kinematics. _set_joint_velocities(physics, params.joint_ids, solution) mjlib.mj_integratePos(physics.model.ptr, physics.data.qpos, physics.data.qvel, params.integration_timestep.total_seconds()) mjlib.mj_fwdPosition(physics.model.ptr, physics.data.ptr) # Compute the new distance between the floor and the left hand. # We expect the left hand site to get closer to the floor and settle at # around <0.006. new_lhand_floor_dist = ( physics.named.data.geom_xpos["left_hand"][2] - physics.named.data.geom_xpos["floor"][2] - lhand_radius) self.assertLess(new_lhand_floor_dist, max(0.006, lhand_floor_dist)) lhand_floor_dist = new_lhand_floor_dist # Ensure there is no contact between any left arm geom and the floor. for contact in physics.data.contact: geom1_name = physics.model.id2name(contact.geom1, _MjGeom) geom2_name = physics.model.id2name(contact.geom2, _MjGeom) if contact.dist < params.minimum_distance_from_collisions: is_any_left_hand = ( geom1_name == "left_hand" or geom2_name == "left_hand") is_any_left_upperarm = ( geom1_name == "left_upper_arm" or geom2_name == "left_upper_arm") is_any_left_lowerarm = ( geom1_name == "left_lower_arm" or geom2_name == "left_lower_arm") is_any_left_arm = ( is_any_left_hand or is_any_left_upperarm or is_any_left_lowerarm) is_any_floor = (geom1_name == "floor" or geom2_name == "floor") self.assertFalse(is_any_left_arm and is_any_floor) def test_invalid_parameters_throws(self, use_adaptive_step_size): physics = humanoid.Physics.from_xml_string(*humanoid.get_model_and_assets()) params = cartesian_6d_to_joint_velocity_mapper.Parameters() params.model = physics.model params.joint_ids = [19, 20, 21] params.object_type = _MjGeom params.object_name = "invalid_geom_name" params.integration_timestep = 1.0 params.enable_nullspace_control = True params.use_adaptive_step_size = use_adaptive_step_size with self.assertRaises(Exception): _ = cartesian_6d_to_joint_velocity_mapper.Mapper(params) if __name__ == "__main__": absltest.main()
dm_robotics-main
cpp/controllers_py/cartesian_6d_to_joint_velocity_mapper_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Install script for setuptools.""" import importlib.util import os from setuptools import Command from setuptools import find_packages from setuptools import setup from setuptools.command.build_ext import build_ext from setuptools.command.build_py import build_py _ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) _GOOGLE_COMMON_PROTOS_ROOT_DIR = os.path.join( _ROOT_DIR, 'third_party/api-common-protos' ) # Tuple of proto message definitions to build Python bindings for. Paths must # be relative to root directory. _DM_ENV_RPC_PROTOS = ( 'dm_env_rpc/v1/dm_env_rpc.proto', 'dm_env_rpc/v1/extensions/properties.proto', ) class _GenerateProtoFiles(Command): """Command to generate protobuf bindings for dm_env_rpc.proto.""" descriptions = 'Generates Python protobuf bindings for dm_env_rpc.proto.' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Import grpc_tools and importlib_resources here, after setuptools has # installed setup_requires dependencies. from grpc_tools import protoc # pylint: disable=g-import-not-at-top import importlib_resources # pylint: disable=g-import-not-at-top if not os.path.exists( os.path.join(_GOOGLE_COMMON_PROTOS_ROOT_DIR, 'google/rpc/status.proto') ): raise RuntimeError( 'Cannot find third_party/api-common-protos. ' 'Please run `git submodule init && git submodule update` to install ' 'the api-common-protos submodule.' ) with importlib_resources.as_file( importlib_resources.files('grpc_tools') / '_proto' ) as grpc_protos_include: for proto_path in _DM_ENV_RPC_PROTOS: proto_args = [ 'grpc_tools.protoc', '--proto_path={}'.format(_GOOGLE_COMMON_PROTOS_ROOT_DIR), '--proto_path={}'.format(grpc_protos_include), '--proto_path={}'.format(_ROOT_DIR), '--python_out={}'.format(_ROOT_DIR), '--grpc_python_out={}'.format(_ROOT_DIR), os.path.join(_ROOT_DIR, proto_path), ] if protoc.main(proto_args) != 0: raise RuntimeError('ERROR: {}'.format(proto_args)) class _BuildExt(build_ext): """Generate protobuf bindings in build_ext stage.""" def run(self): self.run_command('generate_protos') build_ext.run(self) class _BuildPy(build_py): """Generate protobuf bindings in build_py stage.""" def run(self): self.run_command('generate_protos') build_py.run(self) def _load_version(): """Load dm_env_rpc version.""" spec = importlib.util.spec_from_file_location( '_version', 'dm_env_rpc/_version.py' ) version_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(version_module) return version_module.__version__ setup( name='dm-env-rpc', version=_load_version(), description='A networking protocol for agent-environment communication.', author='DeepMind', license='Apache License, Version 2.0', keywords='reinforcement-learning python machine learning', packages=find_packages(exclude=['examples']), install_requires=[ 'dm-env>=1.2', 'immutabledict', 'googleapis-common-protos', 'grpcio', 'numpy', 'protobuf>=3.8', 'typing_extensions;python_version<"3.8"', ], tests_require=[ 'absl-py', 'asynctest', 'nose', ], python_requires='>=3.7', setup_requires=['grpcio-tools', 'importlib_resources'], extras_require={ 'examples': ['pygame'], }, cmdclass={ 'build_ext': _BuildExt, 'build_py': _BuildPy, 'generate_protos': _GenerateProtoFiles, }, test_suite='nose.collector', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
dm_env_rpc-master
setup.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Package version for dm_env_rpc. Kept in separate file so it can be used during installation. """ __version__ = '1.1.5' # https://www.python.org/dev/peps/pep-0440/
dm_env_rpc-master
dm_env_rpc/_version.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A networking protocol for agent-environment communication.""" from dm_env_rpc._version import __version__
dm_env_rpc-master
dm_env_rpc/__init__.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utilities for interfacing dm_env and dm_env_rpc.""" from typing import Dict from dm_env import specs import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import spec_manager as dm_env_rpc_spec_manager from dm_env_rpc.v1 import tensor_spec_utils from dm_env_rpc.v1 import tensor_utils def tensor_spec_to_dm_env_spec( tensor_spec: dm_env_rpc_pb2.TensorSpec) -> specs.Array: """Returns a dm_env spec given a dm_env_rpc TensorSpec. Args: tensor_spec: A dm_env_rpc TensorSpec protobuf. Returns: Either a DiscreteArray, BoundedArray, StringArray or Array, depending on the content of the TensorSpec. """ np_type = tensor_utils.data_type_to_np_type(tensor_spec.dtype) if tensor_spec.HasField('min') or tensor_spec.HasField('max'): bounds = tensor_spec_utils.bounds(tensor_spec) if (not tensor_spec.shape and np.issubdtype(np_type, np.integer) and bounds.min == 0 and tensor_spec.HasField('max')): return specs.DiscreteArray( num_values=bounds.max + 1, dtype=np_type, name=tensor_spec.name) else: return specs.BoundedArray( shape=tensor_spec.shape, dtype=np_type, name=tensor_spec.name, minimum=bounds.min, maximum=bounds.max) else: if tensor_spec.dtype == dm_env_rpc_pb2.DataType.STRING: return specs.StringArray(shape=tensor_spec.shape, name=tensor_spec.name) else: return specs.Array( shape=tensor_spec.shape, dtype=np_type, name=tensor_spec.name) def dm_env_spec_to_tensor_spec(spec: specs.Array) -> dm_env_rpc_pb2.TensorSpec: """Returns a dm_env_rpc TensorSpec from the provided dm_env spec type.""" dtype = np.str_ if isinstance(spec, specs.StringArray) else spec.dtype tensor_spec = dm_env_rpc_pb2.TensorSpec( name=spec.name, shape=spec.shape, dtype=tensor_utils.np_type_to_data_type(dtype)) if isinstance(spec, specs.DiscreteArray): tensor_spec_utils.set_bounds( tensor_spec, minimum=0, maximum=spec.num_values - 1) elif isinstance(spec, specs.BoundedArray): tensor_spec_utils.set_bounds(tensor_spec, spec.minimum, spec.maximum) return tensor_spec def dm_env_spec( spec_manager: dm_env_rpc_spec_manager.SpecManager ) -> Dict[str, specs.Array]: """Returns a dm_env spec for the given `spec_manager`. Args: spec_manager: An instance of SpecManager. Returns: A dict mapping names to either a dm_env Array, BoundedArray, DiscreteArray or StringArray spec for each named TensorSpec in `spec_manager`. """ return { name: tensor_spec_to_dm_env_spec(spec_manager.name_to_spec(name)) for name in spec_manager.names() }
dm_env_rpc-master
dm_env_rpc/v1/dm_env_utils.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helper functions used to process dm_env_rpc request / response messages. """ from typing import NamedTuple, Union import immutabledict from google.protobuf import any_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error _MESSAGE_TYPE_TO_FIELD = immutabledict.immutabledict({ field.message_type.name: field.name for field in dm_env_rpc_pb2.EnvironmentRequest.DESCRIPTOR.fields }) DmEnvRpcRequest = Union[dm_env_rpc_pb2.CreateWorldRequest, dm_env_rpc_pb2.JoinWorldRequest, dm_env_rpc_pb2.StepRequest, dm_env_rpc_pb2.ResetRequest, dm_env_rpc_pb2.ResetWorldRequest, dm_env_rpc_pb2.LeaveWorldRequest, dm_env_rpc_pb2.DestroyWorldRequest, any_pb2.Any, # Extension message. ] DmEnvRpcResponse = Union[dm_env_rpc_pb2.CreateWorldResponse, dm_env_rpc_pb2.JoinWorldResponse, dm_env_rpc_pb2.StepResponse, dm_env_rpc_pb2.ResetResponse, dm_env_rpc_pb2.ResetWorldResponse, dm_env_rpc_pb2.LeaveWorldResponse, dm_env_rpc_pb2.DestroyWorldResponse, any_pb2.Any, # Extension message. ] class EnvironmentRequestAndFieldName(NamedTuple): """EnvironmentRequest and field name used when packing.""" environment_request: dm_env_rpc_pb2.EnvironmentRequest field_name: str def pack_environment_request( request: DmEnvRpcRequest) -> EnvironmentRequestAndFieldName: """Constructs an EnvironmentRequest containing a request message. Args: request: An instance of a dm_env_rpc Request type, such as CreateWorldRequest. Returns: A tuple of (environment_request, field_name) where: environment_request: dm_env_rpc.v1.EnvironmentRequest containing the input request message. field_name: Name of the environment request field holding the input request message. """ field_name = _MESSAGE_TYPE_TO_FIELD[type(request).__name__] environment_request = dm_env_rpc_pb2.EnvironmentRequest() getattr(environment_request, field_name).CopyFrom(request) return EnvironmentRequestAndFieldName(environment_request, field_name) def unpack_environment_response( environment_response: dm_env_rpc_pb2.EnvironmentResponse, expected_field_name: str) -> DmEnvRpcResponse: """Extracts the response message contained within an EnvironmentResponse. Args: environment_response: An instance of dm_env_rpc.v1.EnvironmentResponse. expected_field_name: Name of the environment response field expected to be holding the dm_env_rpc response message. Returns: dm_env_rpc response message wrapped in the input environment response. Raises: DmEnvRpcError: The dm_env_rpc EnvironmentResponse contains an error. ValueError: The dm_env_rpc response message contained in the EnvironmentResponse is held in a different field from the one expected. """ response_field_name = environment_response.WhichOneof('payload') if response_field_name == 'error': raise error.DmEnvRpcError(environment_response.error) elif response_field_name == expected_field_name: return getattr(environment_response, expected_field_name) else: raise ValueError('Unexpected response message! expected: ' f'{expected_field_name}, actual: {response_field_name}')
dm_env_rpc-master
dm_env_rpc/v1/message_utils.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Manager class to manage the dm_env_rpc UID system.""" from typing import Any, Collection, Mapping, MutableMapping import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import tensor_utils def _assert_shapes_match(tensor: dm_env_rpc_pb2.Tensor, dm_env_rpc_spec: dm_env_rpc_pb2.TensorSpec): """Raises ValueError if shape of tensor and spec don't match.""" tensor_shape = np.asarray(tensor.shape) spec_shape = np.asarray(dm_env_rpc_spec.shape) # Check all elements are equal, or the spec element is -1 (variable length). if tensor_shape.size != spec_shape.size or not np.all( (tensor_shape == spec_shape) | (spec_shape < 0)): raise ValueError( 'Received dm_env_rpc tensor {} with shape {} but spec has shape {}.' .format(dm_env_rpc_spec.name, tensor_shape, spec_shape)) class SpecManager(object): """Manages transitions between Python dicts and dm_env_rpc UIDs. To make sending and receiving actions and observations easier for dm_env_rpc, this helps manage the transition between UID-keyed dicts mapping to dm_env_rpc tensors and string-keyed dicts mapping to scalars, lists, or NumPy arrays. """ def __init__(self, specs: Mapping[int, dm_env_rpc_pb2.TensorSpec]): """Builds the SpecManager from the given dm_env_rpc specs. Args: specs: A dict mapping UIDs to dm_env_rpc TensorSpecs, similar to what is stored in `actions` and `observations` in ActionObservationSpecs. """ for spec in specs.values(): if np.count_nonzero(np.asarray(spec.shape) < 0) > 1: raise ValueError( f'"{spec.name}" shape has > 1 variable length dimension. ' f'Spec:\n{spec}') self._name_to_uid = {spec.name: uid for uid, spec in specs.items()} self._uid_to_name = {uid: spec.name for uid, spec in specs.items()} if len(self._name_to_uid) != len(self._uid_to_name): raise ValueError('There are duplicate names in the tensor specs.') self._specs_by_uid = specs self._specs_by_name = {spec.name: spec for spec in specs.values()} @property def specs_by_uid(self) -> Mapping[int, dm_env_rpc_pb2.TensorSpec]: return self._specs_by_uid @property def specs_by_name(self) -> Mapping[str, dm_env_rpc_pb2.TensorSpec]: return self._specs_by_name def name_to_uid(self, name: str) -> int: """Returns the UID for the given name.""" return self._name_to_uid[name] def uid_to_name(self, uid: int) -> str: """Returns the name for the given UID.""" return self._uid_to_name[uid] def name_to_spec(self, name: str) -> dm_env_rpc_pb2.TensorSpec: """Returns the dm_env_rpc TensorSpec named `name`.""" return self._specs_by_name[name] def uid_to_spec(self, uid: int) -> dm_env_rpc_pb2.TensorSpec: """Returns the dm_env_rpc TensorSpec for the given UID.""" return self._specs_by_uid[uid] def names(self) -> Collection[str]: """Returns the spec names in no particular order.""" return self._name_to_uid.keys() def uids(self) -> Collection[int]: """Returns the spec UIDs in no particular order.""" return self._uid_to_name.keys() def unpack( self, dm_env_rpc_tensors: Mapping[int, dm_env_rpc_pb2.Tensor] ) -> MutableMapping[str, Any]: """Unpacks a dm_env_rpc uid-to-tensor map to a name-keyed Python dict. Args: dm_env_rpc_tensors: A dict mapping UIDs to dm_env_rpc tensor protos. Returns: A dict mapping names to scalars and arrays. """ unpacked = {} for uid, tensor in dm_env_rpc_tensors.items(): name = self._uid_to_name[uid] dm_env_rpc_spec = self.name_to_spec(name) _assert_shapes_match(tensor, dm_env_rpc_spec) tensor_dtype = tensor_utils.get_tensor_type(tensor) spec_dtype = tensor_utils.data_type_to_np_type(dm_env_rpc_spec.dtype) if tensor_dtype != spec_dtype: raise ValueError( 'Received dm_env_rpc tensor {} with dtype {} but spec has dtype {}.' .format(name, tensor_dtype, spec_dtype)) tensor_unpacked = tensor_utils.unpack_tensor(tensor) unpacked[name] = tensor_unpacked return unpacked def pack( self, tensors: Mapping[str, Any]) -> MutableMapping[int, dm_env_rpc_pb2.Tensor]: """Packs a name-keyed Python dict to a dm_env_rpc uid-to-tensor map. Args: tensors: A dict mapping string names to scalars and arrays. Returns: A dict mapping UIDs to dm_env_rpc tensor protos. """ packed = {} for name, value in tensors.items(): dm_env_rpc_spec = self.name_to_spec(name) tensor = tensor_utils.pack_tensor(value, dtype=dm_env_rpc_spec.dtype) _assert_shapes_match(tensor, dm_env_rpc_spec) packed[self.name_to_uid(name)] = tensor return packed
dm_env_rpc-master
dm_env_rpc/v1/spec_manager.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Micro-benchmark for tensor_utils.pack_tensor.""" import abc import timeit from absl import app from absl import flags import numpy as np from dm_env_rpc.v1 import tensor_utils flags.DEFINE_integer('repeats', 10000, 'Number of times each benchmark will run.') FLAGS = flags.FLAGS class _AbstractBenchmark(metaclass=abc.ABCMeta): """Base class for benchmarks using timeit.""" def run(self): time = timeit.timeit(self.statement, setup=self.setup, number=FLAGS.repeats) print(f'{self.name} -- overall: {time:0.2f} s, ' f'per call: {time/FLAGS.repeats:0.1e} s') def setup(self): pass @abc.abstractmethod def statement(self): pass @abc.abstractproperty def name(self): pass class _PackBenchmark(_AbstractBenchmark): """Benchmark for packing a numpy array to a Tensor proto.""" def __init__(self, dtype, shape): self._name = f'pack {np.dtype(dtype).name}' self._dtype = dtype self._shape = shape @property def name(self): return self._name def setup(self): # Use non-zero values in case there's something special about zero arrays. self._unpacked = np.arange( np.prod(self._shape), dtype=self._dtype).reshape(self._shape) def statement(self): self._unpacked.flat[0] += 1 # prevent caching of the result tensor_utils.pack_tensor(self._unpacked, self._dtype) class _UnpackBenchmark(_AbstractBenchmark): """Benchmark for unpacking a Tensor proto to a numpy array.""" def __init__(self, dtype, shape): self._name = f'unpack {np.dtype(dtype).name}' self._shape = shape self._dtype = dtype @property def name(self): return self._name def setup(self): # Use non-zero values in case there's something special about zero arrays. tensor = np.arange( np.prod(self._shape), dtype=self._dtype).reshape(self._shape) self._packed = tensor_utils.pack_tensor(tensor, self._dtype) def statement(self): tensor_utils.unpack_tensor(self._packed) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') # Pick `shape` such that number of bytes is consistent between benchmarks. benchmarks = ( _PackBenchmark(dtype=np.uint8, shape=(128, 128, 3)), _PackBenchmark(dtype=np.int32, shape=(64, 64, 3)), _PackBenchmark(dtype=np.int64, shape=(32, 64, 3)), _PackBenchmark(dtype=np.uint32, shape=(64, 64, 3)), _PackBenchmark(dtype=np.uint64, shape=(32, 64, 3)), _PackBenchmark(dtype=np.float32, shape=(64, 64, 3)), _PackBenchmark(dtype=np.float64, shape=(32, 64, 3)), _UnpackBenchmark(dtype=np.uint8, shape=(128, 128, 3)), _UnpackBenchmark(dtype=np.int32, shape=(64, 64, 3)), _UnpackBenchmark(dtype=np.int64, shape=(32, 64, 3)), _UnpackBenchmark(dtype=np.uint32, shape=(64, 64, 3)), _UnpackBenchmark(dtype=np.uint64, shape=(32, 64, 3)), _UnpackBenchmark(dtype=np.float32, shape=(64, 64, 3)), _UnpackBenchmark(dtype=np.float64, shape=(32, 64, 3)), ) for benchmark in benchmarks: benchmark.run() if __name__ == '__main__': app.run(main)
dm_env_rpc-master
dm_env_rpc/v1/tensor_utils_benchmark.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_rpc/dm_env utilities.""" import typing from absl.testing import absltest from absl.testing import parameterized from dm_env import specs import numpy as np from google.protobuf import text_format from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_utils from dm_env_rpc.v1 import spec_manager class TensorSpecToDmEnvSpecTests(absltest.TestCase): def test_no_bounds_gives_arrayspec(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.shape[:] = [3] tensor_spec.name = 'foo' actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) self.assertEqual(specs.Array(shape=[3], dtype=np.uint32), actual) self.assertEqual('foo', actual.name) def test_string_give_string_array(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.STRING tensor_spec.shape[:] = [1, 2, 3] tensor_spec.name = 'string_spec' actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) self.assertEqual(specs.StringArray(shape=[1, 2, 3]), actual) self.assertEqual('string_spec', actual.name) def test_scalar_with_0_n_bounds_gives_discrete_array(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.name = 'foo' max_value = 9 tensor_spec.min.uint32s.array[:] = [0] tensor_spec.max.uint32s.array[:] = [max_value] actual = typing.cast(specs.DiscreteArray, dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec)) expected = specs.DiscreteArray( num_values=max_value + 1, dtype=np.uint32, name='foo') self.assertEqual(expected, actual) self.assertEqual(0, actual.minimum) self.assertEqual(max_value, actual.maximum) self.assertEqual('foo', actual.name) def test_scalar_with_1_n_bounds_gives_bounded_array(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.name = 'foo' tensor_spec.min.uint32s.array[:] = [1] tensor_spec.max.uint32s.array[:] = [10] actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) expected = specs.BoundedArray( shape=(), dtype=np.uint32, minimum=1, maximum=10, name='foo') self.assertEqual(expected, actual) self.assertEqual('foo', actual.name) def test_scalar_with_0_min_and_no_max_bounds_gives_bounded_array(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.name = 'foo' tensor_spec.min.uint32s.array[:] = [0] actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) expected = specs.BoundedArray( shape=(), dtype=np.uint32, minimum=0, maximum=2**32 - 1, name='foo') self.assertEqual(expected, actual) self.assertEqual('foo', actual.name) def test_only_min_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.shape[:] = [3] tensor_spec.name = 'foo' tensor_spec.min.uint32s.array[:] = [1] actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) expected = specs.BoundedArray( shape=[3], dtype=np.uint32, minimum=1, maximum=2**32 - 1) self.assertEqual(expected, actual) self.assertEqual('foo', actual.name) def test_only_max_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.shape[:] = [3] tensor_spec.name = 'foo' tensor_spec.max.uint32s.array[:] = [10] actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) expected = specs.BoundedArray( shape=[3], dtype=np.uint32, minimum=0, maximum=10) self.assertEqual(expected, actual) self.assertEqual('foo', actual.name) def test_both_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.shape[:] = [3] tensor_spec.name = 'foo' tensor_spec.min.uint32s.array[:] = [1] tensor_spec.max.uint32s.array[:] = [10] actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) expected = specs.BoundedArray( shape=[3], dtype=np.uint32, minimum=1, maximum=10) self.assertEqual(expected, actual) self.assertEqual('foo', actual.name) def test_bounds_oneof_not_set_gives_dtype_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.shape[:] = [3] tensor_spec.name = 'foo' # Just to force the message to get created. tensor_spec.min.floats.array[:] = [3.0] tensor_spec.min.ClearField('floats') actual = dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) expected = specs.BoundedArray( shape=[3], dtype=np.uint32, minimum=0, maximum=2**32 - 1) self.assertEqual(expected, actual) self.assertEqual('foo', actual.name) def test_bounds_wrong_type_gives_error(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.shape[:] = [3] tensor_spec.name = 'foo' tensor_spec.min.floats.array[:] = [1.9] with self.assertRaisesRegex(ValueError, 'uint32'): dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) def test_bounds_on_string_gives_error(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.STRING tensor_spec.shape[:] = [2] tensor_spec.name = 'named' tensor_spec.min.floats.array[:] = [1.9] tensor_spec.max.floats.array[:] = [10.0] with self.assertRaisesRegex(ValueError, 'string'): dm_env_utils.tensor_spec_to_dm_env_spec(tensor_spec) class DmEnvSpecToTensorSpecTests(parameterized.TestCase): @parameterized.parameters( (specs.Array([1, 2], np.float32, 'foo'), """name: "foo" shape: 1 shape: 2 dtype: FLOAT"""), (specs.DiscreteArray(5, int, 'bar'), r"""name: "bar" dtype: INT64 min { int64s { array: 0 } } max { int64s { array: 4 } }"""), (specs.BoundedArray( (), np.int32, -1, 5, 'baz'), r"""name: "baz" dtype: INT32 min { int32s { array: -1 } } max { int32s { array: 5 } }"""), (specs.BoundedArray((1, 2), np.uint8, 0, 127, 'zog'), r"""name: "zog" shape: 1 shape: 2 dtype: UINT8 min { uint8s { array: "\000" } } max { uint8s { array: "\177" } }"""), (specs.StringArray(shape=(5, 5), name='fux'), r"""name: "fux" shape: 5 shape: 5 dtype: STRING"""), ) def test_dm_env_spec(self, value, expected): tensor_spec = dm_env_utils.dm_env_spec_to_tensor_spec(value) expected = text_format.Parse(expected, dm_env_rpc_pb2.TensorSpec()) self.assertEqual(expected, tensor_spec) class DmEnvSpecTests(absltest.TestCase): def test_spec(self): dm_env_rpc_specs = { 54: dm_env_rpc_pb2.TensorSpec( name='fuzz', shape=[3], dtype=dm_env_rpc_pb2.DataType.FLOAT), 55: dm_env_rpc_pb2.TensorSpec( name='foo', shape=[2], dtype=dm_env_rpc_pb2.DataType.INT32), } manager = spec_manager.SpecManager(dm_env_rpc_specs) expected = { 'foo': specs.Array(shape=[2], dtype=np.int32), 'fuzz': specs.Array(shape=[3], dtype=np.float32) } self.assertDictEqual(expected, dm_env_utils.dm_env_spec(manager)) def test_empty_spec(self): self.assertDictEqual({}, dm_env_utils.dm_env_spec(spec_manager.SpecManager({}))) def test_spec_generate_and_validate_scalars(self): dm_env_rpc_specs = [] for name, dtype in dm_env_rpc_pb2.DataType.items(): if dtype != dm_env_rpc_pb2.DataType.INVALID_DATA_TYPE: dm_env_rpc_specs.append( dm_env_rpc_pb2.TensorSpec(name=name, shape=(), dtype=dtype)) for dm_env_rpc_spec in dm_env_rpc_specs: spec = dm_env_utils.tensor_spec_to_dm_env_spec(dm_env_rpc_spec) value = spec.generate_value() spec.validate(value) def test_spec_generate_and_validate_tensors(self): example_shape = (10, 10, 3) dm_env_rpc_specs = [] for name, dtype in dm_env_rpc_pb2.DataType.items(): if dtype != dm_env_rpc_pb2.DataType.INVALID_DATA_TYPE: dm_env_rpc_specs.append( dm_env_rpc_pb2.TensorSpec( name=name, shape=example_shape, dtype=dtype)) for dm_env_rpc_spec in dm_env_rpc_specs: spec = dm_env_utils.tensor_spec_to_dm_env_spec(dm_env_rpc_spec) value = spec.generate_value() spec.validate(value) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/dm_env_utils_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Python utilities for flattening and unflattening key-value mappings.""" from typing import Any, Dict, Mapping def flatten_dict( input_dict: Mapping[str, Any], separator: str, *, strict: bool = True, ) -> Dict[str, Any]: """Flattens mappings by joining sub-keys using the provided separator. Only non-empty, mapping types will be flattened. All other types are deemed leaf values. Args: input_dict: Mapping of key-value pairs to flatten. separator: Delimiter used to concatenate keys. strict: Whether to permit keys that already contain the separator. Setting this to False will cause unflattening to be ambiguous. Returns: Flattened dictionary of key-value pairs. Raises: ValueError: If the `input_dict` has a key that contains the separator string, and strict is set to False. """ result: Dict[str, Any] = {} for key, value in input_dict.items(): if strict and separator in key: raise ValueError( f"Can not safely flatten dictionary: key '{key}' already contains " f"the separator '{separator}'!" ) if isinstance(value, Mapping) and len(value): result.update({ f'{key}{separator}{sub_key}': sub_value for sub_key, sub_value in flatten_dict( value, separator, strict=strict).items() }) else: result[key] = value return result def unflatten_dict(input_dict: Mapping[str, Any], separator: str) -> Dict[str, Any]: """Unflatten dictionary using split keys to determine the structure. For each key, split based on the provided separator and create nested dictionary entry for each sub-key. Args: input_dict: Mapping of key-value pairs to un-flatten. separator: Delimiter used to split keys. Returns: Unflattened dictionary. Raises: ValueError: If a key, or it's constituent sub-keys already has a value. For instance, unflattening `{"foo": True, "foo.bar": "baz"}` will result in "foo" being set to both a dict and a Bool. """ result: Dict[str, Any] = {} for key, value in input_dict.items(): sub_keys = key.split(separator) sub_tree = result for sub_key in sub_keys[:-1]: sub_tree = sub_tree.setdefault(sub_key, {}) if not isinstance(sub_tree, Mapping): raise ValueError(f"Sub-tree '{sub_key}' has already been assigned a " f"leaf value {sub_tree}") if sub_keys[-1] in sub_tree: raise ValueError(f'Duplicate key {key}') sub_tree[sub_keys[-1]] = value return result
dm_env_rpc-master
dm_env_rpc/v1/dm_env_flatten_utils.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_rpc/message_utils.""" from absl.testing import absltest from google.rpc import status_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1 import message_utils from dm_env_rpc.v1 import tensor_utils _CREATE_WORLD_REQUEST = dm_env_rpc_pb2.CreateWorldRequest( settings={'foo': tensor_utils.pack_tensor('bar')}) _CREATE_WORLD_RESPONSE = dm_env_rpc_pb2.CreateWorldResponse(world_name='qux') _CREATE_WORLD_ENVIRONMENT_RESPONSE = dm_env_rpc_pb2.EnvironmentResponse( create_world=_CREATE_WORLD_RESPONSE) class MessageUtilsTests(absltest.TestCase): def test_pack_create_world_request(self): environment_request, field_name = message_utils.pack_environment_request( _CREATE_WORLD_REQUEST) self.assertEqual(field_name, 'create_world') self.assertEqual(environment_request.WhichOneof('payload'), 'create_world') self.assertEqual(environment_request.create_world, _CREATE_WORLD_REQUEST) def test_unpack_create_world_response(self): response = message_utils.unpack_environment_response( _CREATE_WORLD_ENVIRONMENT_RESPONSE, 'create_world') self.assertEqual(response, _CREATE_WORLD_RESPONSE) def test_unpack_error_response(self): with self.assertRaisesRegex(error.DmEnvRpcError, 'A test error.'): message_utils.unpack_environment_response( dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status(message='A test error.')), 'create_world') def test_unpack_incorrect_response(self): with self.assertRaisesWithLiteralMatch( ValueError, 'Unexpected response message! expected: create_world, actual: ' 'leave_world'): message_utils.unpack_environment_response( dm_env_rpc_pb2.EnvironmentResponse( leave_world=dm_env_rpc_pb2.LeaveWorldResponse()), 'create_world') if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/message_utils_test.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helper Python utilities for managing dm_env_rpc TensorSpecs.""" import dataclasses from typing import Generic, TypeVar, Union import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import tensor_utils _BOUNDS_CANNOT_BE_SAFELY_CAST_TO_DTYPE = ( 'TensorSpec "{name}"\'s bounds [{minimum}, {maximum}] contain value(s) ' 'that cannot be safely cast to dtype {dtype}.') T = TypeVar('T') @dataclasses.dataclass class Bounds(Generic[T]): min: T max: T def _can_cast(array_or_scalar, np_dtype: np.dtype) -> bool: for value in np.asarray(array_or_scalar).flat: if not np.can_cast(value, np_dtype, casting='safe'): return False return True def np_range_info(np_type: ...) -> Union[np.finfo, np.iinfo]: """Returns type info for `np_type`, which includes min and max attributes.""" np_type = np.dtype(np_type) if issubclass(np_type.type, np.floating): return np.finfo(np_type) elif issubclass(np_type.type, np.integer): return np.iinfo(np_type) else: raise ValueError('{} does not have range info.'.format(np_type)) def _get_value(min_max_value, shape, default): """Helper function that returns the min/max bounds for a Value message. Args: min_max_value: Value protobuf message to get value from. shape: Optional dimensions to unpack payload data to. default: Value to use if min_max_value is not set. Returns: A scalar if `shape` is empty or None, or an unpacked NumPy array of either the unpacked value or provided default. """ which = min_max_value.WhichOneof('payload') value = which and getattr(min_max_value, which) if value is None: min_max = default else: unpacked = tensor_utils.unpack_proto(min_max_value) min_max = tensor_utils.reshape_array( unpacked, shape) if len(unpacked) > 1 else unpacked[0] if (shape is not None and np.any(np.array(shape) < 0) and np.asarray(min_max).size > 1): raise ValueError( "TensorSpec's with variable length shapes can only have scalar ranges. " 'Shape: {}, value: {}'.format(shape, min_max)) return min_max def bounds(tensor_spec: dm_env_rpc_pb2.TensorSpec) -> Bounds: """Gets the inclusive bounds of `tensor_spec`. Args: tensor_spec: An instance of a dm_env_rpc TensorSpec proto. Returns: A named tuple (`min`, `max`) of inclusive bounds. Raises: ValueError: `tensor_spec` does not have a numeric dtype, or the type of its `min` or `max` does not match its dtype, or the the bounds are invalid in some way. """ np_type = tensor_utils.data_type_to_np_type(tensor_spec.dtype) tensor_spec_type = dm_env_rpc_pb2.DataType.Name(tensor_spec.dtype).lower() if not issubclass(np_type.type, np.number): raise ValueError('TensorSpec "{}" has non-numeric type {}.' .format(tensor_spec.name, tensor_spec_type)) # Check min payload type matches the tensor type. min_which = tensor_spec.min.WhichOneof('payload') if min_which and not min_which.startswith(tensor_spec_type): raise ValueError('TensorSpec "{}" has dtype {} but min type {}.'.format( tensor_spec.name, tensor_spec_type, min_which)) # Check max payload type matches the tensor type. max_which = tensor_spec.max.WhichOneof('payload') if max_which and not max_which.startswith(tensor_spec_type): raise ValueError('TensorSpec "{}" has dtype {} but max type {}.'.format( tensor_spec.name, tensor_spec_type, max_which)) dtype_bounds = np_range_info(np_type) min_bound = _get_value(tensor_spec.min, tensor_spec.shape, dtype_bounds.min) max_bound = _get_value(tensor_spec.max, tensor_spec.shape, dtype_bounds.max) if not _can_cast(min_bound, np_type) or not _can_cast(max_bound, np_type): raise ValueError( _BOUNDS_CANNOT_BE_SAFELY_CAST_TO_DTYPE.format( name=tensor_spec.name, minimum=min_bound, maximum=max_bound, dtype=tensor_spec_type)) if np.any(max_bound < min_bound): raise ValueError('TensorSpec "{}" has min {} larger than max {}.'.format( tensor_spec.name, min_bound, max_bound)) return Bounds(np_type.type(min_bound), np_type.type(max_bound)) def set_bounds(tensor_spec: dm_env_rpc_pb2.TensorSpec, minimum, maximum): """Modifies `tensor_spec` to have its inclusive bounds set. Packs `minimum` in to `tensor_spec.min` and `maximum` in to `tensor_spec.max`. Args: tensor_spec: An instance of a dm_env_rpc TensorSpec proto. It should already have its `name`, `dtype` and `shape` attributes set. minimum: The minimum value that elements in the described tensor can obtain. A scalar, iterable of scalars, or None. If None, `min` will be cleared on `tensor_spec`. maximum: The maximum value that elements in the described tensor can obtain. A scalar, iterable of scalars, or None. If None, `max` will be cleared on `tensor_spec`. """ np_type = tensor_utils.data_type_to_np_type(tensor_spec.dtype) if not issubclass(np_type.type, np.number): raise ValueError(f'TensorSpec has non-numeric type "{np_type}".') has_min = minimum is not None has_max = maximum is not None if ((has_min and not _can_cast(minimum, np_type)) or (has_max and not _can_cast(maximum, np_type))): raise ValueError( _BOUNDS_CANNOT_BE_SAFELY_CAST_TO_DTYPE.format( name=tensor_spec.name, minimum=minimum, maximum=maximum, dtype=dm_env_rpc_pb2.DataType.Name(tensor_spec.dtype))) if has_min: minimum = np.asarray(minimum, dtype=np_type) if minimum.size != 1 and minimum.shape != tuple(tensor_spec.shape): raise ValueError( f'minimum has shape {minimum.shape}, which is incompatible with ' f"tensor_spec {tensor_spec.name}'s shape {tensor_spec.shape}.") if has_max: maximum = np.asarray(maximum, dtype=np_type) if maximum.size != 1 and maximum.shape != tuple(tensor_spec.shape): raise ValueError( f'maximum has shape {maximum.shape}, which is incompatible with ' f"tensor_spec {tensor_spec.name}'s shape {tensor_spec.shape}.") if (has_min and has_max and np.any(maximum < minimum)): raise ValueError('TensorSpec "{}" has min {} larger than max {}.'.format( tensor_spec.name, minimum, maximum)) packer = tensor_utils.get_packer(np_type) if has_min: packer.pack(tensor_spec.min, minimum) else: tensor_spec.ClearField('min') if has_max: packer.pack(tensor_spec.max, maximum) else: tensor_spec.ClearField('max')
dm_env_rpc-master
dm_env_rpc/v1/tensor_spec_utils.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Provides custom Pythonic errors for dm_env_rpc error messages.""" from google.rpc import status_pb2 class DmEnvRpcError(Exception): """A dm_env_rpc custom exception. Wraps a google.rpc.Status message as a Python Exception class. """ def __init__(self, status_proto: status_pb2.Status): super().__init__() self._status_proto = status_proto @property def code(self) -> int: return self._status_proto.code @property def message(self) -> str: return self._status_proto.message def __str__(self): return str(self._status_proto) def __repr__(self): return f'DmEnvRpcError(code={self.code}, message={self.message})' def __reduce__(self): return (DmEnvRpcError, (self._status_proto,))
dm_env_rpc-master
dm_env_rpc/v1/error.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_rpc helper functions.""" from absl.testing import absltest from absl.testing import parameterized import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import tensor_spec_utils class NpRangeInfoTests(absltest.TestCase): def test_floating(self): expected_min = np.finfo(np.float32).min actual_min = tensor_spec_utils.np_range_info(np.float32).min self.assertEqual(expected_min, actual_min) def test_integer(self): actual_min = tensor_spec_utils.np_range_info(np.uint32).min self.assertEqual(0, actual_min) def test_string_gives_error(self): with self.assertRaisesRegex(ValueError, 'does not have range info'): _ = tensor_spec_utils.np_range_info(np.str_).min class BoundsTests(parameterized.TestCase): def test_unbounded_unsigned(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(tensor_spec_utils.Bounds(0, 2**32 - 1), bounds) def test_unbounded_signed(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT32 bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(tensor_spec_utils.Bounds(-2**31, 2**31 - 1), bounds) def test_min_n_shape(self): minimum = np.array([[1, 2], [3, 4]]) tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.min.uint32s.array[:] = minimum.flatten().data.tolist() tensor_spec.shape[:] = minimum.shape bounds = tensor_spec_utils.bounds(tensor_spec) np.testing.assert_array_equal(minimum, bounds.min) self.assertEqual(2**32 - 1, bounds.max) def test_max_n_shape(self): maximum = np.array([[1, 2], [3, 4]]) tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.max.uint32s.array[:] = maximum.flatten().data.tolist() tensor_spec.shape[:] = maximum.shape bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(0, bounds.min) np.testing.assert_array_equal(maximum, bounds.max) def test_invalid_min_shape(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.min.uint32s.array[:] = [1, 2] with self.assertRaisesRegex(ValueError, 'Scalar tensors must have exactly 1 element.*'): tensor_spec_utils.bounds(tensor_spec) def test_invalid_max_shape(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.max.uint32s.array[:] = [1, 2] tensor_spec.shape[:] = (2, 2) with self.assertRaisesRegex(ValueError, 'cannot reshape array of size .* into shape.*'): tensor_spec_utils.bounds(tensor_spec) def test_min(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.min.uint32s.array[:] = [1] bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(tensor_spec_utils.Bounds(1, 2**32 - 1), bounds) def test_max(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.max.uint32s.array[:] = [1] bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(tensor_spec_utils.Bounds(0, 1), bounds) def test_min_and_max(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT32 tensor_spec.min.int32s.array[:] = [-1] tensor_spec.max.int32s.array[:] = [1] bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(tensor_spec_utils.Bounds(-1, 1), bounds) def test_broadcast_var_shape(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT32 tensor_spec.min.int32s.array[:] = [-1] tensor_spec.max.int32s.array[:] = [1] tensor_spec.shape[:] = (-1,) bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(tensor_spec_utils.Bounds(-1, 1), bounds) def test_invalid_min_var_shape(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT32 tensor_spec.min.int32s.array[:] = [-1, -1] tensor_spec.max.int32s.array[:] = [1] tensor_spec.shape[:] = (-1,) with self.assertRaisesRegex( ValueError, "TensorSpec's with variable length shapes " 'can only have scalar ranges.'): tensor_spec_utils.bounds(tensor_spec) def test_min_scalar_doesnt_broadcast(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.min.uint32s.array[:] = [1] tensor_spec.shape[:] = (2, 2) bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(1, bounds.min) self.assertEqual(2**32 - 1, bounds.max) def test_max_scalar_doesnt_broadcast(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.max.uint32s.array[:] = [1] tensor_spec.shape[:] = (2, 2) bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(0, bounds.min) self.assertEqual(1, bounds.max) def test_min_max_scalars_dont_broadcast(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.min.uint32s.array[:] = [1] tensor_spec.max.uint32s.array[:] = [2] tensor_spec.shape[:] = (4,) bounds = tensor_spec_utils.bounds(tensor_spec) self.assertEqual(1, bounds.min) self.assertEqual(2, bounds.max) def test_min_mismatches_type_raises_error(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.min.int32s.array[:] = [1] tensor_spec.name = 'foo' with self.assertRaisesRegex(ValueError, 'foo.*uint32.*min.*int32'): tensor_spec_utils.bounds(tensor_spec) def test_max_mismatches_type_raises_error(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.UINT32 tensor_spec.max.int32s.array[:] = [1] tensor_spec.name = 'foo' with self.assertRaisesRegex(ValueError, 'foo.*uint32.*max.*int32'): tensor_spec_utils.bounds(tensor_spec) def test_nonnumeric_type_raises_error(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.STRING tensor_spec.max.int32s.array[:] = [1] tensor_spec.name = 'foo' with self.assertRaisesRegex(ValueError, 'foo.*non-numeric.*string'): tensor_spec_utils.bounds(tensor_spec) def test_max_0_stays_0(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT8 tensor_spec.max.int8s.array = b'\x00' tensor_spec.name = 'foo' self.assertEqual( tensor_spec_utils.Bounds(-128, 0), tensor_spec_utils.bounds(tensor_spec)) def test_min_0_stays_0(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT8 tensor_spec.min.int8s.array = b'\x00' tensor_spec.name = 'foo' self.assertEqual( tensor_spec_utils.Bounds(0, 127), tensor_spec_utils.bounds(tensor_spec)) def test_max_less_than_min_raises_error(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.INT32 tensor_spec.max.int32s.array[:] = [-1] tensor_spec.min.int32s.array[:] = [1] tensor_spec.name = 'foo' with self.assertRaisesRegex(ValueError, 'foo.*min 1.*max -1'): tensor_spec_utils.bounds(tensor_spec) @parameterized.parameters([ dict(minimum=0., maximum=np.inf), dict(minimum=-np.inf, maximum=0.), dict(minimum=-np.inf, maximum=np.inf), ]) def test_infinite_bounds_are_valid_for_floats(self, minimum, maximum): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.dtype = dm_env_rpc_pb2.DataType.DOUBLE tensor_spec.min.doubles.array[:] = [minimum] tensor_spec.max.doubles.array[:] = [maximum] tensor_spec.name = 'foo' tensor_spec_utils.bounds(tensor_spec) class SetBoundsTests(parameterized.TestCase): def test_set_scalar_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=1, maximum=2) self.assertEqual([1], tensor_spec.min.int32s.array) self.assertEqual([2], tensor_spec.max.int32s.array) def test_set_scalar_bounds_int8(self): tensor_spec = dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.DataType.INT8) minimum = 1 maximum = 2 tensor_spec_utils.set_bounds(tensor_spec, minimum=minimum, maximum=maximum) self.assertEqual(np.int8(minimum).tobytes(), tensor_spec.min.int8s.array) self.assertEqual(np.int8(maximum).tobytes(), tensor_spec.max.int8s.array) def test_set_multiple_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=(2,), dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=[1, 2], maximum=[3, 4]) self.assertEqual([1, 2], tensor_spec.min.int32s.array) self.assertEqual([3, 4], tensor_spec.max.int32s.array) def test_set_only_min(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=1, maximum=None) self.assertEqual([1], tensor_spec.min.int32s.array) self.assertIsNone(tensor_spec.max.WhichOneof('payload')) def test_set_only_max(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=None, maximum=1) self.assertIsNone(tensor_spec.min.WhichOneof('payload')) self.assertEqual([1], tensor_spec.max.int32s.array) def test_unset_min_and_max(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=1, maximum=2) tensor_spec_utils.set_bounds(tensor_spec, minimum=None, maximum=None) self.assertIsNone(tensor_spec.min.WhichOneof('payload')) self.assertIsNone(tensor_spec.max.WhichOneof('payload')) def test_cannot_set_nonnumeric_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', dtype=dm_env_rpc_pb2.DataType.STRING) with self.assertRaisesRegex(ValueError, 'non-numeric'): tensor_spec_utils.set_bounds(tensor_spec, minimum=None, maximum=None) def test_can_set_broadcastable_min_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2], dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=1, maximum=[2, 3]) self.assertEqual([1], tensor_spec.min.int32s.array) self.assertEqual([2, 3], tensor_spec.max.int32s.array) def test_cannot_set_multiple_min_bounds_on_variable_shape(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2, -1], dtype=dm_env_rpc_pb2.DataType.INT32) with self.assertRaisesRegex(ValueError, 'incompatible'): tensor_spec_utils.set_bounds(tensor_spec, minimum=[2, 3], maximum=4) def test_cannot_set_dissimilar_shape_on_min_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2, 2], dtype=dm_env_rpc_pb2.DataType.INT32) with self.assertRaisesRegex(ValueError, 'incompatible'): tensor_spec_utils.set_bounds(tensor_spec, minimum=[1, 2, 3], maximum=4) def test_can_set_broadcastable_max_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2], dtype=dm_env_rpc_pb2.DataType.INT32) tensor_spec_utils.set_bounds(tensor_spec, minimum=[2, 3], maximum=4) self.assertEqual([2, 3], tensor_spec.min.int32s.array) self.assertEqual([4], tensor_spec.max.int32s.array) def test_cannot_set_multiple_max_bounds_on_variable_shape(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2, -1], dtype=dm_env_rpc_pb2.DataType.INT32) with self.assertRaisesRegex(ValueError, 'incompatible'): tensor_spec_utils.set_bounds(tensor_spec, minimum=1, maximum=[2, 3]) def test_cannot_set_dissimilar_shape_on_max_bounds(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2, 2], dtype=dm_env_rpc_pb2.DataType.INT32) with self.assertRaisesRegex(ValueError, 'incompatible'): tensor_spec_utils.set_bounds(tensor_spec, minimum=0, maximum=[1, 2, 3]) def test_cannot_set_any_min_bounds_to_exceed_maximum(self): tensor_spec = dm_env_rpc_pb2.TensorSpec( name='test', shape=[2], dtype=dm_env_rpc_pb2.DataType.INT32) with self.assertRaisesRegex(ValueError, 'larger than max'): tensor_spec_utils.set_bounds(tensor_spec, minimum=[0, 4], maximum=[1, 1]) @parameterized.parameters([ dict(minimum=[-1000], maximum=[1]), dict(minimum=[1], maximum=[1000]), ]) def test_new_bounds_must_be_safely_castable_to_dtype(self, minimum, maximum): name = 'test' dtype = dm_env_rpc_pb2.DataType.INT8 tensor_spec = dm_env_rpc_pb2.TensorSpec(name=name, dtype=dtype) with self.assertRaisesWithLiteralMatch( ValueError, tensor_spec_utils._BOUNDS_CANNOT_BE_SAFELY_CAST_TO_DTYPE.format( name=name, minimum=minimum, maximum=maximum, dtype=dm_env_rpc_pb2.DataType.Name(dtype))): tensor_spec_utils.set_bounds( tensor_spec, minimum=minimum, maximum=maximum) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/tensor_spec_utils_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Connection.""" import contextlib from unittest import mock from absl.testing import absltest import grpc from google.protobuf import any_pb2 from google.protobuf import struct_pb2 from google.rpc import status_pb2 from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_utils _CREATE_REQUEST = dm_env_rpc_pb2.CreateWorldRequest( settings={'foo': tensor_utils.pack_tensor('bar')}) _CREATE_RESPONSE = dm_env_rpc_pb2.CreateWorldResponse() _BAD_CREATE_REQUEST = dm_env_rpc_pb2.CreateWorldRequest() _TEST_ERROR = dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status(message='A test error.')) _INCORRECT_RESPONSE_TEST_MSG = dm_env_rpc_pb2.DestroyWorldRequest( world_name='foo') _INCORRECT_RESPONSE = dm_env_rpc_pb2.EnvironmentResponse( leave_world=dm_env_rpc_pb2.LeaveWorldResponse()) _EXTENSION_REQUEST = struct_pb2.Value(string_value='extension request') _EXTENSION_RESPONSE = struct_pb2.Value(number_value=555) def _wrap_in_any(proto): any_proto = any_pb2.Any() any_proto.Pack(proto) return any_proto _REQUEST_RESPONSE_PAIRS = { dm_env_rpc_pb2.EnvironmentRequest( create_world=_CREATE_REQUEST).SerializeToString(): dm_env_rpc_pb2.EnvironmentResponse(create_world=_CREATE_RESPONSE), dm_env_rpc_pb2.EnvironmentRequest( create_world=_BAD_CREATE_REQUEST).SerializeToString(): _TEST_ERROR, dm_env_rpc_pb2.EnvironmentRequest( extension=_wrap_in_any(_EXTENSION_REQUEST)).SerializeToString(): dm_env_rpc_pb2.EnvironmentResponse( extension=_wrap_in_any(_EXTENSION_RESPONSE)), dm_env_rpc_pb2.EnvironmentRequest( destroy_world=_INCORRECT_RESPONSE_TEST_MSG).SerializeToString(): _INCORRECT_RESPONSE, } def _process(request_iterator, **kwargs): del kwargs for request in request_iterator: yield _REQUEST_RESPONSE_PAIRS.get(request.SerializeToString(), _TEST_ERROR) @contextlib.contextmanager def _create_mock_channel(): """Mocks out gRPC and returns a channel to be passed to Connection.""" with mock.patch.object(dm_env_rpc_connection, 'dm_env_rpc_pb2_grpc') as mock_grpc: mock_stub_class = mock.MagicMock() mock_stub_class.Process = _process mock_grpc.EnvironmentStub.return_value = mock_stub_class yield mock.MagicMock() class ConnectionTests(absltest.TestCase): def test_create(self): with _create_mock_channel() as mock_channel: with dm_env_rpc_connection.Connection(mock_channel) as connection: response = connection.send(_CREATE_REQUEST) self.assertEqual(_CREATE_RESPONSE, response) def test_error(self): with _create_mock_channel() as mock_channel: with dm_env_rpc_connection.Connection(mock_channel) as connection: with self.assertRaisesRegex(error.DmEnvRpcError, 'test error'): connection.send(_BAD_CREATE_REQUEST) def test_extension(self): with _create_mock_channel() as mock_channel: with dm_env_rpc_connection.Connection(mock_channel) as connection: request = any_pb2.Any() request.Pack(_EXTENSION_REQUEST) response = connection.send(request) expected_response = any_pb2.Any() expected_response.Pack(_EXTENSION_RESPONSE) self.assertEqual(expected_response, response) @mock.patch.object(grpc, 'secure_channel') @mock.patch.object(grpc, 'channel_ready_future') def test_create_secure_channel_and_connect(self, mock_channel_ready, mock_secure_channel): mock_channel = mock.MagicMock() mock_secure_channel.return_value = mock_channel self.assertIsNotNone( dm_env_rpc_connection.create_secure_channel_and_connect( 'valid_address', grpc.local_channel_credentials())) mock_channel_ready.assert_called_once_with(mock_channel) mock_secure_channel.assert_called_once() mock_channel.close.assert_called_once() @mock.patch.object(grpc, 'secure_channel') @mock.patch.object(grpc, 'channel_ready_future') def test_create_secure_channel_and_connect_context(self, mock_channel_ready, mock_secure_channel): mock_channel = mock.MagicMock() mock_secure_channel.return_value = mock_channel with dm_env_rpc_connection.create_secure_channel_and_connect( 'valid_address') as connection: self.assertIsNotNone(connection) mock_channel_ready.assert_called_once_with(mock_channel) mock_secure_channel.assert_called_once() mock_channel.close.assert_called_once() def test_create_secure_channel_and_connect_timeout(self): with self.assertRaises(grpc.FutureTimeoutError): dm_env_rpc_connection.create_secure_channel_and_connect( 'invalid_address', grpc.local_channel_credentials(), timeout=1.) def test_incorrect_response(self): with _create_mock_channel() as mock_channel: with dm_env_rpc_connection.Connection(mock_channel) as connection: with self.assertRaisesRegex(ValueError, 'Unexpected response message'): connection.send(_INCORRECT_RESPONSE_TEST_MSG) def test_with_metadata(self): expected_metadata = (('key', 'value'),) with mock.patch.object(dm_env_rpc_connection, 'dm_env_rpc_pb2_grpc') as mock_grpc: mock_stub_class = mock.MagicMock() mock_grpc.EnvironmentStub.return_value = mock_stub_class _ = dm_env_rpc_connection.Connection( mock.MagicMock(), metadata=expected_metadata) mock_stub_class.Process.assert_called_with( mock.ANY, metadata=expected_metadata) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/connection_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_flatten_utils.""" from absl.testing import absltest from dm_env_rpc.v1 import dm_env_flatten_utils class FlattenUtilsTest(absltest.TestCase): def test_flatten(self): input_dict = { 'foo': { 'bar': 1, 'baz': False }, 'fiz': object(), } expected = { 'foo.bar': 1, 'foo.baz': False, 'fiz': object(), } self.assertSameElements(expected, dm_env_flatten_utils.flatten_dict(input_dict, '.')) def test_unflatten(self): input_dict = { 'foo.bar.baz': True, 'fiz.buz': 1, 'foo.baz': 'val', 'buz': {} } expected = { 'foo': { 'bar': { 'baz': True }, 'baz': 'val' }, 'fiz': { 'buz': 1 }, 'buz': {}, } self.assertSameElements( expected, dm_env_flatten_utils.unflatten_dict(input_dict, '.')) def test_unflatten_different_separator(self): input_dict = {'foo::bar.baz': True, 'foo.bar::baz': 1} expected = {'foo': {'bar.baz': True}, 'foo.bar': {'baz': 1}} self.assertSameElements( expected, dm_env_flatten_utils.unflatten_dict(input_dict, '::')) def test_flatten_unflatten(self): input_output = { 'foo': { 'bar': 1, 'baz': False }, 'fiz': object(), } self.assertSameElements( input_output, dm_env_flatten_utils.unflatten_dict( dm_env_flatten_utils.flatten_dict(input_output, '.'), '.')) def test_flatten_with_key_containing_separator(self): input_dict = {'foo.bar': {'baz': 123}, 'bar': {'foo.baz': 456}} expected = {'foo.bar.baz': 123, 'bar.foo.baz': 456} self.assertSameElements( expected, dm_env_flatten_utils.flatten_dict(input_dict, '.', strict=False), ) def test_flatten_with_key_containing_separator_strict_raises_error(self): with self.assertRaisesRegex(ValueError, 'foo.bar'): dm_env_flatten_utils.flatten_dict({'foo.bar': True}, '.') def test_invalid_flattened_dict_raises_error(self): input_dict = dict(( ('foo.bar', True), ('foo', 'invalid_value_for_sub_key'), )) with self.assertRaisesRegex(ValueError, 'Duplicate key'): dm_env_flatten_utils.unflatten_dict(input_dict, '.') def test_sub_tree_has_value_raises_error(self): input_dict = dict(( ('branch', 'should_not_have_value'), ('branch.leaf', True), )) with self.assertRaisesRegex(ValueError, "Sub-tree 'branch' has already been assigned"): dm_env_flatten_utils.unflatten_dict(input_dict, '.') def test_empty_dict_values_flatten(self): input_dict = { 'foo': {}, 'bar': { 'baz': {} }, } expected = { 'foo': {}, 'bar.baz': {}, } self.assertSameElements(expected, dm_env_flatten_utils.flatten_dict(input_dict, '.')) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/dm_env_flatten_utils_test.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Initial version of dm_env_rpc networking protocol."""
dm_env_rpc-master
dm_env_rpc/v1/__init__.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_rpc helper functions.""" import struct from unittest import mock from absl.testing import absltest from absl.testing import parameterized import numpy as np from google.protobuf import any_pb2 from google.protobuf import struct_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import tensor_utils class PackTensorTests(parameterized.TestCase): @parameterized.parameters( (np.float32(2.5), 'floats'), (2.5, 'doubles'), (np.int32(-25), 'int32s'), (np.int64(-25), 'int64s'), (np.frombuffer(b'\xF0\xF1\xF2\xF3', np.uint32)[0], 'uint32s'), (np.frombuffer(b'\xF0\xF1\xF2\xF3\xF4\xF5\xF6\xF7', np.uint64)[0], 'uint64s'), (True, 'bools'), (False, 'bools'), ('foo', 'strings'), ) def test_pack_scalars(self, scalar, expected_payload): tensor = tensor_utils.pack_tensor(scalar) self.assertEqual([], tensor.shape) self.assertEqual([scalar], getattr(tensor, expected_payload).array) @parameterized.parameters( (np.int8(-25), 'b', 'int8s'), (np.uint8(250), 'B', 'uint8s'), ) def test_pack_scalar_bytes(self, scalar, fmt, expected_payload): tensor = tensor_utils.pack_tensor(scalar) self.assertEqual([], tensor.shape) actual = struct.unpack(fmt, getattr(tensor, expected_payload).array) self.assertEqual(scalar, actual) def test_pack_scalar_protos(self): scalar = struct_pb2.Value(string_value='my message') tensor = tensor_utils.pack_tensor(scalar) self.assertEqual([], tensor.shape) self.assertLen(tensor.protos.array, 1) unpacked = struct_pb2.Value() self.assertTrue(tensor.protos.array[0].Unpack(unpacked)) self.assertEqual(scalar, unpacked) def test_pack_scalar_any_proto(self): scalar = struct_pb2.Value(string_value='my message') scalar_any = any_pb2.Any() scalar_any.Pack(scalar) tensor = tensor_utils.pack_tensor(scalar_any) self.assertEqual([], tensor.shape) self.assertLen(tensor.protos.array, 1) unpacked = struct_pb2.Value() self.assertTrue(tensor.protos.array[0].Unpack(unpacked)) self.assertEqual(scalar, unpacked) @parameterized.parameters( (25, np.float32, 'floats'), (25, np.float64, 'doubles'), (25, np.int32, 'int32s'), (25, np.int64, 'int64s'), (25, np.uint32, 'uint32s'), (25, np.uint64, 'uint64s'), (2**64-1, np.uint64, 'uint64s'), (True, bool, 'bools'), (False, bool, 'bools'), ('foo', str, 'strings'), # Test legacy support for numpy built-in types (no longer recommended as # of release 1.20.0 - # https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations) (True, np.bool_, 'bools'), (False, np.bool_, 'bools'), ('foo', np.str_, 'strings'), ) def test_pack_scalars_specific_dtype(self, scalar, dtype, expected_payload): tensor = tensor_utils.pack_tensor(scalar, dtype) self.assertEqual([], tensor.shape) self.assertEqual([scalar], getattr(tensor, expected_payload).array) def test_pack_with_dm_env_rpc_data_type(self): tensor = tensor_utils.pack_tensor([5], dm_env_rpc_pb2.DataType.FLOAT) self.assertEqual([5], tensor.floats.array) @parameterized.parameters( ([np.int8(-25), np.int8(-23)], '2b', 'int8s'), ([np.uint8(249), np.uint8(250)], '2B', 'uint8s'), ) def test_pack_bytes_array(self, scalar, fmt, expected_payload): tensor = tensor_utils.pack_tensor(scalar) self.assertEqual([2], tensor.shape) actual = struct.unpack(fmt, getattr(tensor, expected_payload).array) np.testing.assert_array_equal(scalar, actual) @parameterized.parameters( (np.array([1.0, 2.0], dtype=np.float32), 'floats'), (np.array([1.0, 2.0], dtype=np.float64), 'doubles'), ([1.0, 2.0], 'doubles'), (np.array([1, 2], dtype=np.int32), 'int32s'), (np.array([1, 2], dtype=np.int64), 'int64s'), (np.array([1, 2], dtype=np.uint32), 'uint32s'), (np.array([1, 2], dtype=np.uint64), 'uint64s'), ([True, False], 'bools'), (np.array([True, False]), 'bools'), (['foo', 'bar'], 'strings'), ) def test_pack_arrays(self, array, expected_payload): tensor = tensor_utils.pack_tensor(array) self.assertEqual([2], tensor.shape) packed_array = getattr(tensor, expected_payload).array np.testing.assert_array_equal(array, packed_array) @parameterized.parameters( ([], None, 'doubles'), ([], np.int64, 'int64s'), ([1, 2, 3], None, 'int64s'), ([1, 2, 3], np.int32, 'int32s'), ) def test_pack_override_dtype(self, value, dtype, expected_payload): tensor = tensor_utils.pack_tensor(value, dtype=dtype) array = np.asarray(value, dtype) self.assertEqual(expected_payload, tensor.WhichOneof('payload')) packed_array = getattr(tensor, expected_payload).array np.testing.assert_array_equal(array, packed_array) def test_pack_proto_arrays(self): array = np.array([ struct_pb2.Value(string_value=message) for message in ['foo', 'bar'] ]) tensor = tensor_utils.pack_tensor(array) self.assertEqual([2], tensor.shape) unpacked = struct_pb2.Value() tensor.protos.array[0].Unpack(unpacked) self.assertEqual(array[0], unpacked) tensor.protos.array[1].Unpack(unpacked) self.assertEqual(array[1], unpacked) def test_pack_mixed_proto_array_fails(self): with self.assertRaisesRegex(ValueError, 'not recognized'): tensor_utils.pack_tensor(np.array([struct_pb2.Value(), 1, 2, 3])) def test_packed_rowmajor(self): array2d = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32) tensor = tensor_utils.pack_tensor(array2d) self.assertEqual([3, 2], tensor.shape) np.testing.assert_array_equal([1, 2, 3, 4, 5, 6], tensor.int32s.array) def test_mixed_scalar_types_raises_exception(self): with self.assertRaises(TypeError): tensor_utils.pack_tensor(['hello!', 75], dtype=np.float32) def test_jagged_arrays_throw_exceptions(self): with self.assertRaises(ValueError): tensor_utils.pack_tensor([[1, 2], [3, 4, 5]]) @parameterized.parameters( (['foo', 'bar'], np.str_), ('baz', dm_env_rpc_pb2.DataType.STRING), (['foobar'], np.array(['foobar']).dtype), ) def test_np_object_strings(self, value, dtype): object_array = np.array(value, dtype=object) tensor = tensor_utils.pack_tensor(object_array, dtype=dtype) self.assertEqual(list(object_array.shape), tensor.shape) self.assertTrue(tensor.HasField('strings')) def test_np_object_strings_no_dtype_raises_exception(self): with self.assertRaises(ValueError): tensor_utils.pack_tensor(np.array(['foo'], dtype=object)) @parameterized.parameters( (['foo', 42, 'bar'],), ([1, 2, 3],), ) def test_np_object_to_strings_fail(self, bad_element): with self.assertRaisesRegex(TypeError, 'not all elements are Python string types'): tensor_utils.pack_tensor( np.array(bad_element, dtype=object), dtype=np.str_) def test_class_instance_throw_exception(self): class Foo(object): pass with self.assertRaises(ValueError): tensor_utils.pack_tensor(Foo()) def test_compress_integers_to_1_element_when_all_same(self): array = np.array([1, 1, 1, 1, 1, 1], dtype=np.uint32) packed = tensor_utils.pack_tensor(array, try_compress=True) self.assertEqual([6], packed.shape) self.assertEqual([1], packed.uint32s.array) def test_compress_floats_to_1_element_when_all_same(self): array = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5], dtype=np.float32) packed = tensor_utils.pack_tensor(array, try_compress=True) self.assertEqual([6], packed.shape) self.assertEqual([1.5], packed.floats.array) def test_compress_strings_to_1_element_when_all_same(self): array = np.array(['foo', 'foo', 'foo', 'foo'], dtype=np.str_) packed = tensor_utils.pack_tensor(array, try_compress=True) self.assertEqual([4], packed.shape) self.assertEqual(['foo'], packed.strings.array) def test_compress_multidimensional_arrays_to_1_element_when_all_same(self): array = np.array([[4, 4], [4, 4]], dtype=np.int32) packed = tensor_utils.pack_tensor(array, try_compress=True) self.assertEqual([2, 2], packed.shape) self.assertEqual([4], packed.int32s.array) def test_doesnt_compress_if_not_asked_to(self): array = np.array([1, 1, 1, 1, 1, 1], dtype=np.uint32) packed = tensor_utils.pack_tensor(array) self.assertEqual([6], packed.shape) self.assertEqual([1, 1, 1, 1, 1, 1], packed.uint32s.array) def test_ask_to_compress_but_cant(self): array = np.array([1, 1, 2, 1, 1, 1], dtype=np.uint32) packed = tensor_utils.pack_tensor(array, try_compress=True) self.assertEqual([6], packed.shape) self.assertEqual([1, 1, 2, 1, 1, 1], packed.uint32s.array) class UnpackTensorTests(parameterized.TestCase): @parameterized.parameters( np.float32(2.5), np.float64(2.5), np.int8(-25), np.int32(-25), np.int64(-25), np.uint8(250), np.frombuffer(b'\xF0\xF1\xF2\xF3', np.uint32)[0], np.frombuffer(b'\xF0\xF1\xF2\xF3\xF4\xF5\xF6\xF7', np.uint64)[0], True, False, 'foo', ) def test_unpack_scalars(self, scalar): tensor = tensor_utils.pack_tensor(scalar) round_trip = tensor_utils.unpack_tensor(tensor) self.assertEqual(scalar, round_trip) def test_unpack_scalar_proto(self): scalar = struct_pb2.Value(string_value='my message') tensor = tensor_utils.pack_tensor(scalar) unpacked = struct_pb2.Value() tensor_utils.unpack_tensor(tensor).Unpack(unpacked) self.assertEqual(scalar, unpacked) @parameterized.parameters( ([np.float32(2.5), np.float32(3.5)],), ([np.float64(2.5), np.float64(3.5)],), ([np.int8(-25), np.int8(-23)],), ([np.int32(-25), np.int32(-23)],), ([np.int64(-25), np.int64(-23)],), ([np.uint8(250), np.uint8(249)],), ([np.uint32(1), np.uint32(2)],), ([np.uint64(1), np.uint64(2)],), ([True, False],), (['foo', 'bar'],), ) def test_unpack_arrays(self, array): tensor = tensor_utils.pack_tensor(array) round_trip = tensor_utils.unpack_tensor(tensor) np.testing.assert_array_equal(array, round_trip) def test_unpack_proto_arrays(self): array = np.array([ struct_pb2.Value(string_value=message) for message in ['foo', 'bar'] ]) tensor = tensor_utils.pack_tensor(array) round_trip = tensor_utils.unpack_tensor(tensor) unpacked = struct_pb2.Value() round_trip[0].Unpack(unpacked) self.assertEqual(array[0], unpacked) round_trip[1].Unpack(unpacked) self.assertEqual(array[1], unpacked) def test_unpack_multidimensional_arrays(self): tensor = dm_env_rpc_pb2.Tensor() tensor.floats.array[:] = [1, 2, 3, 4, 5, 6, 7, 8] tensor.shape[:] = [2, 4] round_trip = tensor_utils.unpack_tensor(tensor) expected = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) np.testing.assert_array_equal(expected, round_trip) def test_too_few_elements(self): tensor = dm_env_rpc_pb2.Tensor() tensor.floats.array[:] = [1, 2, 3, 4] tensor.shape[:] = [2, 4] with self.assertRaisesRegex(ValueError, 'cannot reshape array'): tensor_utils.unpack_tensor(tensor) def test_too_many_elements(self): tensor = dm_env_rpc_pb2.Tensor() tensor.floats.array[:] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] tensor.shape[:] = [2, 4] with self.assertRaisesRegex(ValueError, 'cannot reshape array'): tensor_utils.unpack_tensor(tensor) def test_float_broadcasts_1_element_to_all_elements(self): tensor = dm_env_rpc_pb2.Tensor() tensor.floats.array[:] = [1] tensor.shape[:] = [4] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([1, 1, 1, 1], dtype=np.float32) np.testing.assert_array_equal(expected, unpacked) def test_integer_broadcasts_1_element_to_all_elements(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1] tensor.shape[:] = [4] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([1, 1, 1, 1], dtype=np.int32) np.testing.assert_array_equal(expected, unpacked) def test_unsigned_integer_broadcasts_1_element_to_all_elements(self): tensor = dm_env_rpc_pb2.Tensor() tensor.uint8s.array = b'\x01' tensor.shape[:] = [4] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([1, 1, 1, 1], dtype=np.uint8) np.testing.assert_array_equal(expected, unpacked) def test_string_broadcasts_1_element_to_all_elements(self): tensor = dm_env_rpc_pb2.Tensor() tensor.strings.array[:] = ['foo'] tensor.shape[:] = [4] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array(['foo', 'foo', 'foo', 'foo'], dtype=np.str_) np.testing.assert_array_equal(expected, unpacked) def test_broadcasts_to_multidimensional_arrays(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [4] tensor.shape[:] = [2, 2] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([[4, 4], [4, 4]], dtype=np.int32) np.testing.assert_array_equal(expected, unpacked) def test_negative_dimension(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1, 2, 3, 4] tensor.shape[:] = [-1] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([1, 2, 3, 4], dtype=np.int32) np.testing.assert_array_equal(expected, unpacked) def test_negative_dimension_in_matrix(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1, 2, 3, 4, 5, 6] tensor.shape[:] = [2, -1] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32) np.testing.assert_array_equal(expected, unpacked) def test_two_negative_dimensions_in_matrix(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1, 2, 3, 4, 5, 6] tensor.shape[:] = [-1, -2] with self.assertRaisesRegex(ValueError, 'one unknown dimension'): tensor_utils.unpack_tensor(tensor) def test_negative_dimension_single_element(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1] tensor.shape[:] = [-1] unpacked = tensor_utils.unpack_tensor(tensor) expected = np.array([1], dtype=np.int32) np.testing.assert_array_equal(expected, unpacked) def test_unknown_type_raises_error(self): tensor = mock.MagicMock() tensor.WhichOneof.return_value = 'foo' with self.assertRaisesRegex(TypeError, 'type foo'): tensor_utils.unpack_tensor(tensor) def test_scalar_with_too_many_elements_raises_error(self): tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1, 2, 3] with self.assertRaisesRegex(ValueError, '3 element'): tensor_utils.unpack_tensor(tensor) class GetTensorTypeTests(absltest.TestCase): def test_float(self): tensor = tensor_utils.pack_tensor(1.25) self.assertEqual(np.float64, tensor_utils.get_tensor_type(tensor)) def test_unknown_tensor_type(self): mock_tensor = mock.MagicMock() mock_tensor.WhichOneof.return_value = 'foo' with self.assertRaisesRegex(TypeError, 'foo'): tensor_utils.get_tensor_type(mock_tensor) class DataTypeToNpTypeTests(absltest.TestCase): def test_float(self): self.assertEqual( np.float32, tensor_utils.data_type_to_np_type(dm_env_rpc_pb2.DataType.FLOAT)) def test_empty_object_list(self): tensor = tensor_utils.pack_tensor(np.array([], dtype=object)) self.assertEqual([0], tensor.shape) def test_unknown_type(self): with self.assertRaises(TypeError): tensor_utils.data_type_to_np_type(30) # pytype: disable=wrong-arg-types class NpTypeToDataTypeTests(absltest.TestCase): def test_float32(self): self.assertEqual( dm_env_rpc_pb2.DataType.FLOAT, tensor_utils.np_type_to_data_type(np.float32)) def test_int32(self): self.assertEqual( dm_env_rpc_pb2.DataType.INT32, tensor_utils.np_type_to_data_type(np.int32)) def test_dtype(self): self.assertEqual( dm_env_rpc_pb2.DataType.INT32, tensor_utils.np_type_to_data_type(np.dtype(np.int32))) def test_unknown_type(self): with self.assertRaisesRegex(TypeError, 'dm_env_rpc DataType.*complex64'): tensor_utils.np_type_to_data_type(np.complex64) class GetPackerTests(absltest.TestCase): def test_cannot_get_packer_for_invalid_type(self): with self.assertRaisesRegex(TypeError, 'complex64'): tensor_utils.get_packer(np.complex64) def test_can_pack(self): packer = tensor_utils.get_packer(np.int32) tensor = dm_env_rpc_pb2.Tensor() packer.pack(tensor, np.asarray([1, 2, 3], dtype=np.int32)) self.assertEqual([1, 2, 3], tensor.int32s.array) def test_can_unpack(self): packer = tensor_utils.get_packer(np.int32) tensor = dm_env_rpc_pb2.Tensor() tensor.int32s.array[:] = [1, 2, 3] np.testing.assert_array_equal([1, 2, 3], packer.unpack(tensor)) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/tensor_utils_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A helper class to manage a connection to a dm_env_rpc server. This helper class allows sending the Request message types and receiving the Response message types without wrapping in an EnvironmentRequest or unwrapping from an EnvironmentResponse. It also turns error messages in to exceptions. For most calls (such as create, join, etc.): with connection.Connection(grpc_channel) as channel: create_response = channel.send(dm_env_rpc_pb2.CreateRequest(settings={ 'players': 5 }) For the `extension` message type, you must send an Any proto and you'll get back an Any proto. It is up to you to wrap and unwrap these to concrete proto types that you know how to handle. with connection.Connection(grpc_channel) as channel: request = struct_pb2.Struct() ... request_any = any_pb2.Any() request_any.Pack(request) response_any = channel.send(request_any) response = my_type_pb2.MyType() response_any.Unpack(response) Any errors encountered in the EnvironmentResponse are turned into Python exceptions, so explicit error handling code isn't needed per call. """ import queue import sys from typing import Optional, Sequence, Tuple import grpc from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2_grpc from dm_env_rpc.v1 import message_utils # pylint: disable=g-import-not-at-top if sys.version_info < (3, 8): from typing_extensions import Protocol else: from typing import Protocol # pylint: enable=g-import-not-at-top Metadata = Sequence[Tuple[str, str]] class ConnectionType(Protocol): """Connection protocol definition for interacting with dm_env_rpc servers.""" def send( self, request: message_utils.DmEnvRpcRequest) -> message_utils.DmEnvRpcResponse: """Blocking call to send and receive a message from a dm_env_rpc server.""" def close(self): """Closes the connection. Call when the connection is no longer needed.""" class StreamReaderWriter(object): """Helper class for reading/writing gRPC streams.""" def __init__(self, stub: dm_env_rpc_pb2_grpc.EnvironmentStub, metadata: Optional[Metadata] = None): self._requests = queue.Queue() self._stream = stub.Process( iter(self._requests.get, None), metadata=metadata) def write(self, request: dm_env_rpc_pb2.EnvironmentRequest): """Asynchronously sends `request` to the stream.""" self._requests.put(request) def read(self) -> dm_env_rpc_pb2.EnvironmentResponse: """Returns the response from stream. Blocking.""" return next(self._stream) class Connection(object): """A helper class for interacting with dm_env_rpc servers.""" def __init__(self, channel: grpc.Channel, metadata: Optional[Metadata] = None): """Manages a connection to a dm_env_rpc server. Args: channel: A grpc channel to connect to the dm_env_rpc server over. metadata: Optional sequence of 2-tuples, sent to the gRPC server as metadata. """ self._stream = StreamReaderWriter( dm_env_rpc_pb2_grpc.EnvironmentStub(channel), metadata) def send( self, request: message_utils.DmEnvRpcRequest) -> message_utils.DmEnvRpcResponse: """Sends the given request to the dm_env_rpc server and returns the response. The request should be an instance of one of the dm_env_rpc Request messages, such as CreateWorldRequest. Based on the type the correct payload for the EnvironmentRequest will be constructed and sent to the dm_env_rpc server. Blocks until the server sends back its response. Args: request: An instance of a dm_env_rpc Request type, such as CreateWorldRequest. Returns: The response the dm_env_rpc server returned for the given RPC call, unwrapped from the EnvironmentStream message. For instance if `request` had type `CreateWorldRequest` this returns a message of type `CreateWorldResponse`. Raises: DmEnvRpcError: The dm_env_rpc server responded to the request with an error. ValueError: The dm_env_rpc server responded to the request with an unexpected response message. """ environment_request, field_name = ( message_utils.pack_environment_request(request)) self._stream.write(environment_request) return message_utils.unpack_environment_response(self._stream.read(), field_name) def close(self): """Closes the connection. Call when the connection is no longer needed.""" if self._stream: self._stream = None def __exit__(self, *args, **kwargs): self.close() def __enter__(self): return self def create_secure_channel_and_connect( server_address: str, credentials: grpc.ChannelCredentials = grpc.local_channel_credentials(), timeout: Optional[float] = None) -> Connection: """Creates a secure channel from server address and credentials and connects. We allow the created channel to have un-bounded message lengths, to support large observations. Args: server_address: URI server address to connect to. credentials: gRPC credentials necessary to connect to the server. timeout: Optional timeout in seconds to wait for channel to be ready. Default to waiting indefinitely. Returns: An instance of dm_env_rpc.Connection, where the channel is close upon the connection being closed. """ options = [('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)] channel = grpc.secure_channel(server_address, credentials, options=options) grpc.channel_ready_future(channel).result(timeout) class _ConnectionWrapper(Connection): """Utility to ensure channel is closed when the connection is closed.""" def __init__(self, channel): super().__init__(channel) self._channel = channel def __del__(self): self.close() def close(self): super().close() self._channel.close() return _ConnectionWrapper(channel)
dm_env_rpc-master
dm_env_rpc/v1/connection.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helper Python utilities for bridging dm_env_rpc and NumPy. Note that the Tensor proto payload type is not supported, as it doesn't play well with NumPy. """ import abc from typing import Optional, Type, Union import numpy as np from google.protobuf import any_pb2 from google.protobuf import message from dm_env_rpc.v1 import dm_env_rpc_pb2 def _pack_any_proto(value): """Helper function to pack Any proto, iff it's not already packed.""" if isinstance(value, any_pb2.Any): return value elif isinstance(value, message.Message): any_proto = any_pb2.Any() any_proto.Pack(value) return any_proto else: # If we reach this exception, it is normally because the type being packed # is not supported. Raise exception with some typical examples. raise ValueError("Trying to pack an Any proto with a type that's not " f"recognized! Type: {type(value)}, value: '{value}'. " 'Is the value a jagged iterable? Is the data type not a ' 'supported primitive type like strings, floats, integers ' 'or protobuf messages? Are all elements in the array the ' 'same type?') TensorOrTensorSpecValue = Union[dm_env_rpc_pb2.Tensor, dm_env_rpc_pb2.TensorSpec.Value] class Packer(metaclass=abc.ABCMeta): """Converts between proto messages and NumPy arrays.""" def __init__(self, name: str, np_type: np.dtype): self._np_type = np_type self._name = name @property def name(self) -> str: return self._name @property def np_type(self) -> np.dtype: return self._np_type @abc.abstractmethod def pack(self, proto: TensorOrTensorSpecValue, value: np.ndarray): """Flattens and stores the given `value` array in the given `proto`.""" @abc.abstractmethod def unpack(self, proto: TensorOrTensorSpecValue) -> np.ndarray: """Retrieves a flat NumPy array for the payload from the given `proto`.""" class _RepeatedFieldPacker(Packer): """Handles packing and unpacking most data types.""" def pack(self, proto: TensorOrTensorSpecValue, value: np.ndarray): payload = getattr(proto, self._name) payload.array.extend(value.ravel().tolist()) def unpack(self, proto: TensorOrTensorSpecValue) -> np.ndarray: payload = getattr(proto, self._name) return np.fromiter(payload.array, self.np_type, len(payload.array)) class _BytesPacker(Packer): """Handles packing and unpacking int8 and uint8 arrays.""" def pack(self, proto: TensorOrTensorSpecValue, value: np.ndarray): payload = getattr(proto, self.name) payload.array = value.tobytes() def unpack(self, proto: TensorOrTensorSpecValue) -> np.ndarray: payload = getattr(proto, self.name) return np.frombuffer(payload.array, self.np_type) class _RepeatedStringFieldPacker(Packer): """Handles packing and unpacking strings.""" def pack(self, proto: TensorOrTensorSpecValue, value: np.ndarray): payload = getattr(proto, self.name) payload.array.extend(value.ravel().tolist()) def unpack(self, proto: TensorOrTensorSpecValue) -> np.ndarray: # String arrays with variable length strings can't be created with # np.fromiter, unlike other dtypes. payload = getattr(proto, self.name) return np.array(payload.array, self.np_type) class _RepeatedProtoFieldPacker(Packer): """Handles packing of protos.""" def pack(self, proto: TensorOrTensorSpecValue, value: np.ndarray): payload = getattr(proto, self.name) payload.array.extend( [_pack_any_proto(sub_value) for sub_value in value.ravel()]) def unpack(self, proto: TensorOrTensorSpecValue) -> np.ndarray: payload = getattr(proto, self._name) return np.array(payload.array, self.np_type) _PACKERS = ( _RepeatedFieldPacker('floats', np.dtype(np.float32)), _RepeatedFieldPacker('doubles', np.dtype(np.float64)), _BytesPacker('int8s', np.dtype(np.int8)), _RepeatedFieldPacker('int32s', np.dtype(np.int32)), _RepeatedFieldPacker('int64s', np.dtype(np.int64)), _BytesPacker('uint8s', np.dtype(np.uint8)), _RepeatedFieldPacker('uint32s', np.dtype(np.uint32)), _RepeatedFieldPacker('uint64s', np.dtype(np.uint64)), _RepeatedFieldPacker('bools', np.dtype(bool)), _RepeatedStringFieldPacker('strings', np.dtype(str)), _RepeatedProtoFieldPacker('protos', np.dtype(object)), ) _NAME_TO_NP_TYPE = { packer.name: packer.np_type for packer in _PACKERS } _TYPE_TO_PACKER = {packer.np_type: packer for packer in _PACKERS} _DM_ENV_RPC_DTYPE_TO_NUMPY_DTYPE = { dm_env_rpc_pb2.DataType.FLOAT: np.dtype(np.float32), dm_env_rpc_pb2.DataType.DOUBLE: np.dtype(np.float64), dm_env_rpc_pb2.DataType.INT8: np.dtype(np.int8), dm_env_rpc_pb2.DataType.INT32: np.dtype(np.int32), dm_env_rpc_pb2.DataType.INT64: np.dtype(np.int64), dm_env_rpc_pb2.DataType.UINT8: np.dtype(np.uint8), dm_env_rpc_pb2.DataType.UINT32: np.dtype(np.uint32), dm_env_rpc_pb2.DataType.UINT64: np.dtype(np.uint64), dm_env_rpc_pb2.DataType.BOOL: np.dtype(bool), dm_env_rpc_pb2.DataType.STRING: np.dtype(str), dm_env_rpc_pb2.DataType.PROTO: np.dtype(object), } _NUMPY_DTYPE_TO_DM_ENV_RPC_DTYPE = { **{value: key for key, value in _DM_ENV_RPC_DTYPE_TO_NUMPY_DTYPE.items()}, # Legacy support for numpy built-in types (no longer recommended as of # release 1.20.0 - # https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations) np.bool_: dm_env_rpc_pb2.DataType.BOOL, np.str_: dm_env_rpc_pb2.DataType.STRING, np.object_: dm_env_rpc_pb2.DataType.PROTO, } def get_tensor_type(tensor_proto: dm_env_rpc_pb2.Tensor) -> np.dtype: """Returns the NumPy type for the given tensor.""" payload = tensor_proto.WhichOneof('payload') np_type = _NAME_TO_NP_TYPE.get(payload) if not np_type: raise TypeError(f'Unknown NumPy type {payload}') return np_type def data_type_to_np_type(dm_env_rpc_dtype: dm_env_rpc_pb2.DataType) -> np.dtype: """Returns the NumPy type for the given dm_env_rpc DataType.""" np_type = _DM_ENV_RPC_DTYPE_TO_NUMPY_DTYPE.get(dm_env_rpc_dtype) if not np_type: raise TypeError(f'Unknown DataType {dm_env_rpc_dtype}') return np_type def np_type_to_data_type( np_type: Union[np.dtype, Type[np.generic]] ) -> dm_env_rpc_pb2.DataType: """Returns the dm_env_rpc DataType for the given NumPy type.""" data_type = _NUMPY_DTYPE_TO_DM_ENV_RPC_DTYPE.get(np.dtype(np_type)) if data_type is None: raise TypeError( f'No dm_env_rpc DataType corresponds to NumPy type "{np_type}"') return data_type def get_packer(np_type: Union[np.dtype, Type[np.generic]]) -> Packer: """Retrieves the `Packer` which can handle the given NumPy Type. Note: The returned packer is a relatively low level mechanism to convert between NumPy arrays and the repeated `payload` fields in dm_env_rpc messages. It won't set shape or type on the proto message. Instead of this packer, generally you should use `pack_tensor` and `unpack_tensor` to pack and unpack data to `Tensor` messages, as it will handle setting shape and type on the `Tensor` message as well. Args: np_type: The NumPy data type to retrieve a packer for. eg: np.int32. Returns: An instance of Packer which will handle conversion between NumPy arrays of `np_type` and the corresponding payload field in the dm_env_rpc message. Raises: TypeError: If the provided NumPy type has no known packer. """ packer = _TYPE_TO_PACKER.get(np.dtype(np_type)) if not packer: raise TypeError(f'Unknown NumPy type "{np_type}" has no known packer.') return packer def reshape_array(array: np.ndarray, shape): """Reshapes `array` to the given `shape` using dm_env_rpc's rules.""" if shape: if len(array) == 1: array = np.full(np.maximum(shape, 1), array[0]) else: array.shape = shape return array else: length = len(array) if length != 1: raise ValueError( 'Scalar tensors must have exactly 1 element but had {} elements.' .format(length)) return array[0] def unpack_proto(proto: TensorOrTensorSpecValue) -> np.ndarray: """Converts a proto with payload oneof to a scalar or NumPy array. Args: proto: A dm_env_rpc proto with payload oneof. Returns: Returns a NumPy array of the payload with the correct type. """ np_type = get_tensor_type(proto) packer = get_packer(np_type) return packer.unpack(proto) def unpack_tensor(tensor_proto: dm_env_rpc_pb2.Tensor): """Converts a Tensor proto to a scalar or NumPy array. Args: tensor_proto: A dm_env_rpc Tensor protobuf. Returns: If the provided tensor_proto has a non-empty `shape` attribute, returns a NumPy array of the payload with the correct type and shape. If the `shape` attribute is empty, returns a scalar (float, int, string, etc.) of the correct type and value. """ array = unpack_proto(tensor_proto) return reshape_array(array, tensor_proto.shape) def pack_tensor( value, dtype: Optional[ Union[np.dtype, Type[np.generic], 'dm_env_rpc_pb2.DataType'] ] = None, try_compress=False, ) -> dm_env_rpc_pb2.Tensor: """Encodes the given value as a tensor. Args: value: A scalar (float, int, string, etc.), protobuf message, NumPy array, or nested lists. dtype: The type to pack the data to. If set to None, will attempt to detect the correct type automatically. Either a dm_env_rpc DataType enum or NumPy type is acceptable. try_compress: A bool, whether to try and encode the tensor in less space or not. This will increase the computational cost of the packing, but may reduce the on-the-wire size of the tensor. There are no guarantees that any compression will actually happen. Raises: ValueError: If `value` is a jagged array, not a primitive type, nested iterable of primitive types or protobuf messages, or all elements can't be cast to the same type or the requested type. Returns: A dm_env_rpc Tensor proto containing the data. """ packed = dm_env_rpc_pb2.Tensor() value = np.asarray(value) if value.dtype == object: # Because Numpy doesn't truly support variable-length string arrays, users # tend to use arrays of Numpy objects instead. Iff a user provides an array # of objects and a string dtype argument, automatically convert the value to # an array of strings. if np.issubdtype( _DM_ENV_RPC_DTYPE_TO_NUMPY_DTYPE.get(dtype, dtype), np.str_): for item in value.flat: if not isinstance(item, str): raise TypeError(f'Requested string dtype but not all elements are ' 'Python string types. At least one element was ' f'{type(item)}.') value = np.array(value, dtype=np.str_) elif dtype is not None: # NumPy defaults to np.float64 dtype when calling np.asarray() on an empty # array. Allow unsafe casting in this particular case. value = value.astype( dtype=_DM_ENV_RPC_DTYPE_TO_NUMPY_DTYPE.get(dtype, dtype), copy=False, casting='same_kind' if value.size else 'unsafe') packed.shape[:] = value.shape packer = get_packer(value.dtype.type) if (try_compress and np.all(value == next(value.flat))): # All elements are the same. Pack in to a single value. packer.pack(packed, value.ravel()[0:1]) else: packer.pack(packed, value) return packed
dm_env_rpc-master
dm_env_rpc/v1/tensor_utils.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """An implementation of a dm_env environment using dm_env_rpc.""" from typing import Any, Mapping, NamedTuple, Optional, Sequence import dm_env import immutabledict from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_flatten_utils from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_utils from dm_env_rpc.v1 import error from dm_env_rpc.v1 import spec_manager from dm_env_rpc.v1 import tensor_utils # Default observation names for common RL concepts. By default the dm_env # wrapper will use these for reward and discount if available, but this behavior # can be overridden. DEFAULT_REWARD_KEY = 'reward' DEFAULT_DISCOUNT_KEY = 'discount' # Default key separator, used in flattening/unflattening nested structures. DEFAULT_KEY_SEPARATOR = '.' # Format string for error message used in DmEnvAdaptor.Reset _RESET_ENVIRONMENT_ERROR = r'''Environment changed spec after reset. before: "{specs}" after: "{new_specs}"''' class DmEnvAdaptor(dm_env.Environment): """An implementation of dm_env using dm_env_rpc as the data protocol. Users can also optionally provide a mapping of objects to DmEnvAdaptor attributes. This is to accommodate user-created protocol extensions that compliment the core protocol. """ # Disable pytype attribute checking for dynamically created extension attrs. _HAS_DYNAMIC_ATTRIBUTES = True def __init__( self, connection: dm_env_rpc_connection.ConnectionType, specs: dm_env_rpc_pb2.ActionObservationSpecs, requested_observations: Optional[Sequence[str]] = None, nested_tensors: bool = True, extensions: Mapping[str, Any] = immutabledict.immutabledict()): """Initializes the environment with the provided dm_env_rpc connection. Args: connection: An instance of ConnectionType already connected to a dm_env_rpc server and after a successful JoinWorldRequest has been sent. specs: A dm_env_rpc ActionObservationSpecs message for the environment. requested_observations: List of observation names to be requested from the environment when step is called. If None is specified then all observations will be requested. nested_tensors: Boolean to determine whether to flatten/unflatten tensors. extensions: Mapping of extension instances to DmEnvAdaptor attributes. Raises ValueError if attribute already exists. """ self._dm_env_rpc_specs = specs self._action_specs = spec_manager.SpecManager(specs.actions) self._observation_specs = spec_manager.SpecManager(specs.observations) self._connection = connection self._last_state = dm_env_rpc_pb2.EnvironmentStateType.TERMINATED self._nested_tensors = nested_tensors if requested_observations is None: requested_observations = self._observation_specs.names() self._is_reward_requested = False self._is_discount_requested = False else: self._is_reward_requested = DEFAULT_REWARD_KEY in requested_observations self._is_discount_requested = ( DEFAULT_DISCOUNT_KEY in requested_observations) requested_observations = set(requested_observations) self._default_reward_spec = None self._default_discount_spec = None if DEFAULT_REWARD_KEY in self._observation_specs.names(): self._default_reward_spec = dm_env_utils.tensor_spec_to_dm_env_spec( self._observation_specs.name_to_spec(DEFAULT_REWARD_KEY)) requested_observations.add(DEFAULT_REWARD_KEY) if DEFAULT_DISCOUNT_KEY in self._observation_specs.names(): self._default_discount_spec = ( dm_env_utils.tensor_spec_to_dm_env_spec( self._observation_specs.name_to_spec(DEFAULT_DISCOUNT_KEY))) requested_observations.add(DEFAULT_DISCOUNT_KEY) unsupported_observations = requested_observations.difference( self._observation_specs.names()) if unsupported_observations: raise ValueError('Unsupported observations requested: {}'.format( unsupported_observations)) self._requested_observation_uids = [ self._observation_specs.name_to_uid(name) for name in requested_observations ] # Not strictly necessary but it makes the unit tests deterministic. self._requested_observation_uids.sort() self._extension_names = extensions.keys() for extension_name, extension in extensions.items(): if hasattr(self, extension_name): raise ValueError( f'DmEnvAdaptor already has attribute "{extension_name}"!') setattr(self, extension_name, extension) def reset(self): """Implements dm_env.Environment.reset.""" reset_response = self._connection.send(dm_env_rpc_pb2.ResetRequest()) if self._dm_env_rpc_specs != reset_response.specs: raise RuntimeError(_RESET_ENVIRONMENT_ERROR.format( specs=self._dm_env_rpc_specs, new_specs=reset_response.specs)) self._last_state = dm_env_rpc_pb2.EnvironmentStateType.INTERRUPTED return self.step({}) def step(self, actions): """Implements dm_env.Environment.step.""" actions = dm_env_flatten_utils.flatten_dict( actions, DEFAULT_KEY_SEPARATOR) if self._nested_tensors else actions step_response = self._connection.send( dm_env_rpc_pb2.StepRequest( requested_observations=self._requested_observation_uids, actions=self._action_specs.pack(actions))) observations = self._observation_specs.unpack(step_response.observations) if (step_response.state == dm_env_rpc_pb2.EnvironmentStateType.RUNNING and self._last_state == dm_env_rpc_pb2.EnvironmentStateType.RUNNING): step_type = dm_env.StepType.MID elif step_response.state == dm_env_rpc_pb2.EnvironmentStateType.RUNNING: step_type = dm_env.StepType.FIRST elif self._last_state == dm_env_rpc_pb2.EnvironmentStateType.RUNNING: step_type = dm_env.StepType.LAST else: # Neither response.state nor _last_state is RUNNING. # See common causes for state transition errors: # https://github.com/deepmind/dm_env_rpc/blob/master/docs/v1/appendix.md#common-state-transition-errors raise RuntimeError('Environment transitioned from {} to {}'.format( dm_env_rpc_pb2.EnvironmentStateType.Name(self._last_state), dm_env_rpc_pb2.EnvironmentStateType.Name(step_response.state))) self._last_state = step_response.state reward = self.reward( state=step_response.state, step_type=step_type, observations=observations) discount = self.discount( state=step_response.state, step_type=step_type, observations=observations) if not self._is_reward_requested: observations.pop(DEFAULT_REWARD_KEY, None) if not self._is_discount_requested: observations.pop(DEFAULT_DISCOUNT_KEY, None) observations = dm_env_flatten_utils.unflatten_dict( observations, DEFAULT_KEY_SEPARATOR) if self._nested_tensors else observations return dm_env.TimeStep(step_type, reward, discount, observations) def reward(self, state, step_type, observations): """Returns the reward for the given observation state. Override in inherited classes to give different reward functions. Args: state: A dm_env_rpc EnvironmentStateType enum describing the state of the environment. step_type: The dm_env StepType describing the state of the environment. observations: The unpacked observations dictionary mapping string keys to scalars and NumPy arrays. Returns: A reward for the given step. The shape and type matches that returned by `self.reward_spec()`. """ if step_type == dm_env.StepType.FIRST: return None elif self._default_reward_spec: return observations[DEFAULT_REWARD_KEY] else: return 0.0 def discount(self, state, step_type, observations): """Returns the discount for the given observation state. Override in inherited classes to give different discount functions. Args: state: A dm_env_rpc EnvironmentStateType enum describing the state of the environment. step_type: The dm_env StepType describing the state of the environment. observations: The unpacked observations dictionary mapping string keys to scalars and NumPy arrays. Returns: The discount for the given step. The shape and type matches that returned by `self.discount_spec()`. """ if self._default_discount_spec: return observations[DEFAULT_DISCOUNT_KEY] if step_type == dm_env.StepType.FIRST: return None elif (state == dm_env_rpc_pb2.EnvironmentStateType.RUNNING or state == dm_env_rpc_pb2.EnvironmentStateType.INTERRUPTED): return 1.0 else: return 0.0 def observation_spec(self): """Implements dm_env.Environment.observation_spec.""" specs = {} for uid in self._requested_observation_uids: name = self._observation_specs.uid_to_name(uid) specs[name] = dm_env_utils.tensor_spec_to_dm_env_spec( self._observation_specs.uid_to_spec(uid)) if not self._is_reward_requested: specs.pop(DEFAULT_REWARD_KEY, None) if not self._is_discount_requested: specs.pop(DEFAULT_DISCOUNT_KEY, None) if self._nested_tensors: return dm_env_flatten_utils.unflatten_dict(specs, DEFAULT_KEY_SEPARATOR) else: return specs def action_spec(self): """Implements dm_env.Environment.action_spec.""" action_spec = dm_env_utils.dm_env_spec(self._action_specs) if self._nested_tensors: return dm_env_flatten_utils.unflatten_dict(action_spec, DEFAULT_KEY_SEPARATOR) else: return action_spec def reward_spec(self): """Implements dm_env.Environment.reward_spec.""" return (self._default_reward_spec or super(DmEnvAdaptor, self).reward_spec()) def discount_spec(self): """Implements dm_env.Environment.discount_spec.""" return (self._default_discount_spec or super(DmEnvAdaptor, self).discount_spec()) def close(self): """Implements dm_env.Environment.close.""" # Release any extensions associated with this EnvAdaptor: for extension_name in self._extension_names: setattr(self, extension_name, None) # Leaves the world if we were joined. If not, this will be a no-op anyway. if self._connection is not None: self._connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) self._connection = None def create_world(connection: dm_env_rpc_connection.ConnectionType, create_world_settings: Mapping[str, Any]) -> str: """Helper function to create a world with the provided settings. Args: connection: An instance of Connection already connected to a dm_env_rpc server. create_world_settings: Settings used to create the world. Nested settings will be automatically flattened before sending to the server. Values must be packable into a Tensor proto or already packed. Returns: Created world name. """ create_world_settings = dm_env_flatten_utils.flatten_dict( create_world_settings, DEFAULT_KEY_SEPARATOR, strict=False ) create_world_settings = { key: (value if isinstance(value, dm_env_rpc_pb2.Tensor) else tensor_utils.pack_tensor(value)) for key, value in create_world_settings.items() } return connection.send( dm_env_rpc_pb2.CreateWorldRequest( settings=create_world_settings)).world_name def join_world( connection: dm_env_rpc_connection.ConnectionType, world_name: str, join_world_settings: Mapping[str, Any], **adaptor_kwargs, ) -> DmEnvAdaptor: """Helper function to join a world with the provided settings. Args: connection: An instance of Connection already connected to a dm_env_rpc server. world_name: Name of the world to join. join_world_settings: Settings used to join the world. Nested settings will be automatically flattened before sending to the server. Values must be packable into a Tensor message or already packed. **adaptor_kwargs: Additional keyword args used to create the DmEnvAdaptor instance. Returns: Instance of DmEnvAdaptor. """ join_world_settings = dm_env_flatten_utils.flatten_dict( join_world_settings, DEFAULT_KEY_SEPARATOR, strict=False ) join_world_settings = { key: (value if isinstance(value, dm_env_rpc_pb2.Tensor) else tensor_utils.pack_tensor(value)) for key, value in join_world_settings.items() } specs = connection.send( dm_env_rpc_pb2.JoinWorldRequest( world_name=world_name, settings=join_world_settings)).specs try: return DmEnvAdaptor(connection, specs, **adaptor_kwargs) except ValueError: connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) raise class DmEnvAndWorldName(NamedTuple): """Environment and world_name created when calling create_and_join_world.""" env: DmEnvAdaptor world_name: str def create_and_join_world(connection: dm_env_rpc_connection.ConnectionType, create_world_settings: Mapping[str, Any], join_world_settings: Mapping[str, Any], **adaptor_kwargs) -> DmEnvAndWorldName: """Helper function to create and join a world with the provided settings. Args: connection: An instance of Connection already connected to a dm_env_rpc server. create_world_settings: Settings used to create the world. Values must be packable into a Tensor proto or already packed. join_world_settings: Settings used to join the world. Nested settings will be automatically flattened before sending to the server. Values must be packable into a Tensor message. **adaptor_kwargs: Additional keyword args used to create the DmEnvAdaptor instance. Returns: Tuple of DmEnvAdaptor and the created world name. """ world_name = create_world(connection, create_world_settings) try: return DmEnvAndWorldName( join_world(connection, world_name, join_world_settings, **adaptor_kwargs), world_name) except (error.DmEnvRpcError, ValueError): connection.send(dm_env_rpc_pb2.DestroyWorldRequest(world_name=world_name)) raise
dm_env_rpc-master
dm_env_rpc/v1/dm_env_adaptor.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for SpecManager class.""" from absl.testing import absltest import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import spec_manager from dm_env_rpc.v1 import tensor_utils _EXAMPLE_SPECS = { 54: dm_env_rpc_pb2.TensorSpec( name='fuzz', shape=[2], dtype=dm_env_rpc_pb2.DataType.FLOAT), 55: dm_env_rpc_pb2.TensorSpec( name='foo', shape=[3], dtype=dm_env_rpc_pb2.DataType.INT32), } class SpecManagerTests(absltest.TestCase): def setUp(self): super(SpecManagerTests, self).setUp() self._spec_manager = spec_manager.SpecManager(_EXAMPLE_SPECS) def test_specs_by_uid(self): self.assertDictEqual(_EXAMPLE_SPECS, self._spec_manager.specs_by_uid) def test_specs_by_name(self): expected = {'foo': _EXAMPLE_SPECS[55], 'fuzz': _EXAMPLE_SPECS[54]} self.assertDictEqual(expected, self._spec_manager.specs_by_name) def test_name_to_uid(self): self.assertEqual(55, self._spec_manager.name_to_uid('foo')) def test_name_to_uid_no_such_name(self): with self.assertRaisesRegex(KeyError, 'bar'): self._spec_manager.name_to_uid('bar') def test_name_to_spec(self): spec = self._spec_manager.name_to_spec('foo') self.assertEqual([3], spec.shape) def test_name_to_spec_no_such_name(self): with self.assertRaisesRegex(KeyError, 'bar'): self._spec_manager.name_to_spec('bar') def test_uid_to_name(self): self.assertEqual('foo', self._spec_manager.uid_to_name(55)) def test_uid_to_name_no_such_uid(self): with self.assertRaisesRegex(KeyError, '56'): self._spec_manager.uid_to_name(56) def test_names(self): self.assertEqual(set(['foo', 'fuzz']), self._spec_manager.names()) def test_uids(self): self.assertEqual(set([54, 55]), self._spec_manager.uids()) def test_uid_to_spec(self): spec = self._spec_manager.uid_to_spec(54) self.assertEqual([2], spec.shape) def test_pack(self): packed = self._spec_manager.pack({'fuzz': [1.0, 2.0], 'foo': [3, 4, 5]}) expected = { 54: tensor_utils.pack_tensor([1.0, 2.0], dtype=np.float32), 55: tensor_utils.pack_tensor([3, 4, 5], dtype=np.int32), } self.assertDictEqual(expected, packed) def test_partial_pack(self): packed = self._spec_manager.pack({ 'fuzz': [1.0, 2.0], }) expected = { 54: tensor_utils.pack_tensor([1.0, 2.0], dtype=np.float32), } self.assertDictEqual(expected, packed) def test_pack_unknown_key_raises_error(self): with self.assertRaisesRegex(KeyError, 'buzz'): self._spec_manager.pack({'buzz': 'hello'}) def test_pack_wrong_shape_raises_error(self): with self.assertRaisesRegex(ValueError, 'shape'): self._spec_manager.pack({'foo': [1, 2]}) def test_pack_wrong_dtype_raises_error(self): with self.assertRaisesRegex(TypeError, 'int32'): self._spec_manager.pack({'foo': 'hello'}) def test_pack_cast_float_to_int_raises_error(self): with self.assertRaisesRegex(TypeError, 'int32'): self._spec_manager.pack({'foo': [0.5, 1.0, 1]}) def test_pack_cast_int_to_float_is_ok(self): packed = self._spec_manager.pack({'fuzz': [1, 2]}) self.assertEqual([1.0, 2.0], packed[54].floats.array) def test_unpack(self): unpacked = self._spec_manager.unpack({ 54: tensor_utils.pack_tensor([1.0, 2.0], dtype=np.float32), 55: tensor_utils.pack_tensor([3, 4, 5], dtype=np.int32), }) self.assertLen(unpacked, 2) np.testing.assert_array_equal(np.asarray([1.0, 2.0]), unpacked['fuzz']) np.testing.assert_array_equal(np.asarray([3, 4, 5]), unpacked['foo']) def test_partial_unpack(self): unpacked = self._spec_manager.unpack({ 54: tensor_utils.pack_tensor([1.0, 2.0], dtype=np.float32), }) self.assertLen(unpacked, 1) np.testing.assert_array_equal(np.asarray([1.0, 2.0]), unpacked['fuzz']) def test_unpack_unknown_uid_raises_error(self): with self.assertRaisesRegex(KeyError, '53'): self._spec_manager.unpack({53: tensor_utils.pack_tensor('foo')}) def test_unpack_wrong_shape_raises_error(self): with self.assertRaisesRegex(ValueError, 'shape'): self._spec_manager.unpack({55: tensor_utils.pack_tensor([1, 2])}) def test_unpack_wrong_type_raises_error(self): with self.assertRaisesRegex(ValueError, 'dtype'): self._spec_manager.unpack( {55: tensor_utils.pack_tensor([1, 2, 3], dtype=np.float32)}) class SpecManagerVariableSpecShapeTests(absltest.TestCase): def setUp(self): super(SpecManagerVariableSpecShapeTests, self).setUp() specs = { 101: dm_env_rpc_pb2.TensorSpec( name='foo', shape=[1, -1], dtype=dm_env_rpc_pb2.DataType.INT32), } self._spec_manager = spec_manager.SpecManager(specs) def test_variable_spec_shape(self): packed = self._spec_manager.pack({'foo': [[1, 2, 3, 4]]}) expected = { 101: tensor_utils.pack_tensor([[1, 2, 3, 4]], dtype=np.int32), } self.assertDictEqual(expected, packed) def test_invalid_variable_shape(self): with self.assertRaisesRegex(ValueError, 'shape'): self._spec_manager.pack({'foo': np.ones((1, 2, 3), dtype=np.int32)}) def test_empty_variable_shape(self): manager = spec_manager.SpecManager({ 1: dm_env_rpc_pb2.TensorSpec( name='bar', shape=[], dtype=dm_env_rpc_pb2.DataType.INT32) }) with self.assertRaisesRegex(ValueError, 'shape'): manager.pack({'bar': np.ones((1), dtype=np.int32)}) def test_invalid_variable_spec_shape(self): with self.assertRaisesRegex(ValueError, 'shape has > 1 variable length'): spec_manager.SpecManager({ 1: dm_env_rpc_pb2.TensorSpec( name='bar', shape=[1, -1, -1], dtype=dm_env_rpc_pb2.DataType.INT32) }) class SpecManagerConstructorTests(absltest.TestCase): def test_duplicate_names_raise_error(self): specs = { 54: dm_env_rpc_pb2.TensorSpec( name='fuzz', shape=[3], dtype=dm_env_rpc_pb2.DataType.FLOAT), 55: dm_env_rpc_pb2.TensorSpec( name='fuzz', shape=[2], dtype=dm_env_rpc_pb2.DataType.FLOAT), } with self.assertRaisesRegex(ValueError, 'duplicate name'): spec_manager.SpecManager(specs) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/spec_manager_test.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_rpc/dm_env adaptor.""" from unittest import mock from absl.testing import absltest from absl.testing import parameterized import dm_env from dm_env import specs import numpy as np from google.rpc import status_pb2 from google.protobuf import text_format from dm_env_rpc.v1 import dm_env_adaptor from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_utils _SAMPLE_STEP_REQUEST = dm_env_rpc_pb2.StepRequest( requested_observations=[1, 2], actions={ 1: tensor_utils.pack_tensor(4, dtype=dm_env_rpc_pb2.UINT8), 2: tensor_utils.pack_tensor('hello') }) _SAMPLE_STEP_RESPONSE = dm_env_rpc_pb2.StepResponse( state=dm_env_rpc_pb2.EnvironmentStateType.RUNNING, observations={ 1: tensor_utils.pack_tensor(5, dtype=dm_env_rpc_pb2.UINT8), 2: tensor_utils.pack_tensor('goodbye') }) _TERMINATED_STEP_RESPONSE = dm_env_rpc_pb2.StepResponse( state=dm_env_rpc_pb2.EnvironmentStateType.TERMINATED, observations={ 1: tensor_utils.pack_tensor(5, dtype=dm_env_rpc_pb2.UINT8), 2: tensor_utils.pack_tensor('goodbye') }) _SAMPLE_SPEC = dm_env_rpc_pb2.ActionObservationSpecs( actions={ 1: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.UINT8, name='foo'), 2: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.STRING, name='bar') }, observations={ 1: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.UINT8, name='foo'), 2: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.STRING, name='bar') }, ) _SAMPLE_SPEC_REORDERED = dm_env_rpc_pb2.ActionObservationSpecs( observations={ 2: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.STRING, name='bar'), 1: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.UINT8, name='foo') }, actions={ 2: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.STRING, name='bar'), 1: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.UINT8, name='foo') }, ) _SAMPLE_NESTED_SPECS = dm_env_rpc_pb2.ActionObservationSpecs( actions={ 1: dm_env_rpc_pb2.TensorSpec( dtype=dm_env_rpc_pb2.INT32, name='foo.bar'), 2: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.STRING, name='baz') }, observations={ 1: dm_env_rpc_pb2.TensorSpec( dtype=dm_env_rpc_pb2.INT32, name='foo.bar'), 2: dm_env_rpc_pb2.TensorSpec(dtype=dm_env_rpc_pb2.STRING, name='baz') }, ) # Ensures the equality check in reset() works if the dictionary elements are # created in a different order. _SAMPLE_RESET_RESPONSE = dm_env_rpc_pb2.ResetResponse( specs=_SAMPLE_SPEC_REORDERED) _RESET_CHANGES_SPEC_RESPONSE = dm_env_rpc_pb2.ResetResponse( specs=dm_env_rpc_pb2.ActionObservationSpecs()) _RESERVED_SPEC = dm_env_rpc_pb2.ActionObservationSpecs( actions={}, observations={ 1: dm_env_rpc_pb2.TensorSpec( dtype=dm_env_rpc_pb2.UINT8, name=dm_env_adaptor.DEFAULT_REWARD_KEY), 2: dm_env_rpc_pb2.TensorSpec( dtype=dm_env_rpc_pb2.STRING, name=dm_env_adaptor.DEFAULT_DISCOUNT_KEY) }) _RESERVED_STEP_RESPONSE = dm_env_rpc_pb2.StepResponse( state=dm_env_rpc_pb2.EnvironmentStateType.RUNNING, observations={ 1: tensor_utils.pack_tensor(5, dtype=dm_env_rpc_pb2.UINT8), 2: tensor_utils.pack_tensor('goodbye') }) _RESET_CHANGES_SPEC_ERROR = r'''Environment changed spec after reset. before: "actions { key: 1 value { name: "foo" dtype: UINT8 } } actions { key: 2 value { name: "bar" dtype: STRING } } observations { key: 1 value { name: "foo" dtype: UINT8 } } observations { key: 2 value { name: "bar" dtype: STRING } } " after: ""''' _EXTENSIONS = {'baz': 123, 'qux': 'quux'} class DmEnvAdaptorTests(absltest.TestCase): def setUp(self): super(DmEnvAdaptorTests, self).setUp() self._connection = mock.MagicMock() self._env = dm_env_adaptor.DmEnvAdaptor( self._connection, _SAMPLE_SPEC, extensions=_EXTENSIONS) def test_requested_observations(self): requested_observations = ['foo'] filtered_env = dm_env_adaptor.DmEnvAdaptor(self._connection, _SAMPLE_SPEC, requested_observations) expected_filtered_step_request = dm_env_rpc_pb2.StepRequest( requested_observations=[1], actions={ 1: tensor_utils.pack_tensor(4, dtype=dm_env_rpc_pb2.UINT8), 2: tensor_utils.pack_tensor('hello') }) self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) filtered_env.step({'foo': 4, 'bar': 'hello'}) self._connection.send.assert_called_once_with( expected_filtered_step_request) def test_invalid_requested_observations(self): requested_observations = ['invalid'] with self.assertRaisesRegex(ValueError, 'Unsupported observations requested'): dm_env_adaptor.DmEnvAdaptor(self._connection, _SAMPLE_SPEC, requested_observations) def test_requested_observation_spec(self): requested_observations = ['foo'] filtered_env = dm_env_adaptor.DmEnvAdaptor(self._connection, _SAMPLE_SPEC, requested_observations) observation_names = [name for name in filtered_env.observation_spec()] self.assertEqual(requested_observations, observation_names) def test_first_running_step(self): self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) timestep = self._env.step({'foo': 4, 'bar': 'hello'}) self._connection.send.assert_called_once_with(_SAMPLE_STEP_REQUEST) self.assertEqual(dm_env.StepType.FIRST, timestep.step_type) self.assertIsNone(timestep.reward) self.assertIsNone(timestep.discount) self.assertEqual({'foo': 5, 'bar': 'goodbye'}, timestep.observation) def test_mid_running_step(self): self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) self._env.step({'foo': 4, 'bar': 'hello'}) self._connection.send.assert_called_once_with(_SAMPLE_STEP_REQUEST) timestep = self._env.step({'foo': 4, 'bar': 'hello'}) self.assertEqual(dm_env.StepType.MID, timestep.step_type) self.assertEqual(0.0, timestep.reward) self.assertEqual(1.0, timestep.discount) self.assertEqual({'foo': 5, 'bar': 'goodbye'}, timestep.observation) def test_last_step(self): self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) self._env.step({'foo': 4, 'bar': 'hello'}) self._connection.send.assert_called_once_with(_SAMPLE_STEP_REQUEST) self._connection.send = mock.MagicMock( return_value=_TERMINATED_STEP_RESPONSE) timestep = self._env.step({'foo': 4, 'bar': 'hello'}) self.assertEqual(dm_env.StepType.LAST, timestep.step_type) self.assertEqual(0.0, timestep.reward) self.assertEqual(0.0, timestep.discount) self.assertEqual({'foo': 5, 'bar': 'goodbye'}, timestep.observation) def test_illegal_state_transition(self): self._connection.send = mock.MagicMock( return_value=_TERMINATED_STEP_RESPONSE) with self.assertRaisesRegex( RuntimeError, 'Environment transitioned from TERMINATED to TERMINATED'): self._env.step({}) def test_reset(self): self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) self._env.step({'foo': 4, 'bar': 'hello'}) self._connection.send.assert_called_once_with(_SAMPLE_STEP_REQUEST) self._connection.send = mock.MagicMock( side_effect=[_SAMPLE_RESET_RESPONSE, _SAMPLE_STEP_RESPONSE]) timestep = self._env.reset() self.assertEqual(dm_env.StepType.FIRST, timestep.step_type) self.assertIsNone(timestep.reward) self.assertIsNone(timestep.discount) self.assertEqual({'foo': 5, 'bar': 'goodbye'}, timestep.observation) def test_spec_generate_value_step(self): self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) action_spec = self._env.action_spec() actions = { name: spec.generate_value() for name, spec in action_spec.items() } self._env.step(actions) self._connection.send.assert_called_once_with( dm_env_rpc_pb2.StepRequest( requested_observations=[1, 2], actions={ 1: tensor_utils.pack_tensor(actions['foo']), 2: tensor_utils.pack_tensor(actions['bar'], dtype=str) # pytype: disable=wrong-arg-types # typed-numpy })) def test_reset_changes_spec_raises_error(self): self._connection.send = mock.MagicMock(return_value=_SAMPLE_STEP_RESPONSE) self._env.step({'foo': 4, 'bar': 'hello'}) self._connection.send.assert_called_once_with(_SAMPLE_STEP_REQUEST) self._connection.send = mock.MagicMock( side_effect=[_RESET_CHANGES_SPEC_RESPONSE, _SAMPLE_STEP_RESPONSE]) with self.assertRaisesWithLiteralMatch(RuntimeError, _RESET_CHANGES_SPEC_ERROR): self._env.reset() def test_observation_spec(self): expected_spec = { 'foo': specs.Array(shape=(), dtype=np.uint8, name='foo'), 'bar': specs.StringArray(shape=(), name='bar') } self.assertEqual(expected_spec, self._env.observation_spec()) def test_action_spec(self): expected_spec = { 'foo': specs.Array(shape=(), dtype=np.uint8, name='foo'), 'bar': specs.StringArray(shape=(), name='bar') } self.assertEqual(expected_spec, self._env.action_spec()) def test_cant_step_after_close(self): self._connection.send = mock.MagicMock( return_value=dm_env_rpc_pb2.LeaveWorldResponse()) self._env.close() with self.assertRaisesRegex(AttributeError, 'send'): self._env.step({}) def test_reward_spec_default(self): self.assertEqual( specs.Array(shape=(), dtype=np.float64), self._env.reward_spec()) def test_discount_spec_default(self): self.assertEqual( specs.BoundedArray( shape=(), dtype=np.float64, minimum=0.0, maximum=1.0), self._env.discount_spec()) def test_close_leaves_world(self): self._connection.send = mock.MagicMock( return_value=dm_env_rpc_pb2.LeaveWorldResponse()) self._env.close() self._connection.send.assert_called_once_with( dm_env_rpc_pb2.LeaveWorldRequest()) def test_close_errors_when_cannot_leave_world(self): self._connection.send = mock.MagicMock(side_effect=ValueError('foo')) with self.assertRaisesRegex(ValueError, 'foo'): self._env.close() def test_close_releases_extensions(self): for extension_name, extension in _EXTENSIONS.items(): self.assertEqual(getattr(self._env, extension_name), extension) self._connection.send = mock.MagicMock( return_value=dm_env_rpc_pb2.LeaveWorldResponse()) self._env.close() for extension_name in _EXTENSIONS: self.assertIsNone(getattr(self._env, extension_name)) def test_close_allows_multiple_close_calls(self): self._connection.send = mock.MagicMock( return_value=dm_env_rpc_pb2.LeaveWorldResponse(), ) self._env.close() self._env.close() # Most importantly, when calling close multiple times, we need to make sure # that any _connection.send request is called once as the conenction is # destroyed after the first close. self._connection.send.assert_called_once_with( dm_env_rpc_pb2.LeaveWorldRequest(), ) self.assertIsNone(self._env._connection) class OverrideRewardDiscount(dm_env_adaptor.DmEnvAdaptor): def __init__(self): self.connection = mock.MagicMock() self.reward = mock.MagicMock() self.discount = mock.MagicMock() super(OverrideRewardDiscount, self).__init__(self.connection, _SAMPLE_SPEC) class RewardDiscountOverrideTests(absltest.TestCase): def test_override_reward(self): env = OverrideRewardDiscount() env.reward.return_value = 0.5 env.connection.send.return_value = _SAMPLE_STEP_RESPONSE timestep = env.step({}) self.assertEqual(0.5, timestep.reward) env.reward.assert_called() self.assertEqual(dm_env_rpc_pb2.EnvironmentStateType.RUNNING, env.reward.call_args[1]['state']) self.assertEqual(dm_env.StepType.FIRST, env.reward.call_args[1]['step_type']) self.assertDictEqual({ 'foo': 5, 'bar': 'goodbye' }, env.reward.call_args[1]['observations']) def test_override_discount(self): env = OverrideRewardDiscount() env.discount.return_value = 0.5 env.connection.send.return_value = _SAMPLE_STEP_RESPONSE timestep = env.step({}) self.assertEqual(0.5, timestep.discount) env.discount.assert_called() self.assertEqual(dm_env_rpc_pb2.EnvironmentStateType.RUNNING, env.discount.call_args[1]['state']) self.assertEqual(dm_env.StepType.FIRST, env.discount.call_args[1]['step_type']) self.assertDictEqual({ 'foo': 5, 'bar': 'goodbye' }, env.discount.call_args[1]['observations']) class ReservedKeywordTests(absltest.TestCase): def setUp(self): super(ReservedKeywordTests, self).setUp() self._connection = mock.MagicMock() self._env = dm_env_adaptor.DmEnvAdaptor(self._connection, _RESERVED_SPEC) def test_reward_spec(self): self.assertEqual( specs.Array(shape=(), dtype=np.uint8), self._env.reward_spec()) def test_discount_spec(self): self.assertEqual(specs.StringArray(shape=()), self._env.discount_spec()) def test_reward_from_reserved_keyword(self): self._connection.send = mock.MagicMock(return_value=_RESERVED_STEP_RESPONSE) self._env.step({}) # Reward is None for first step. timestep = self._env.step({}) self.assertEqual(5, timestep.reward) def test_discount(self): self._connection.send = mock.MagicMock(return_value=_RESERVED_STEP_RESPONSE) timestep = self._env.step({}) self.assertEqual('goodbye', timestep.discount) def test_observations_empty(self): self.assertEmpty(self._env.observation_spec()) def test_explicitly_requesting_reward_and_discount(self): env = dm_env_adaptor.DmEnvAdaptor( self._connection, _RESERVED_SPEC, requested_observations=[ dm_env_adaptor.DEFAULT_REWARD_KEY, dm_env_adaptor.DEFAULT_DISCOUNT_KEY ]) expected_observation_spec = { dm_env_adaptor.DEFAULT_REWARD_KEY: env.reward_spec(), dm_env_adaptor.DEFAULT_DISCOUNT_KEY: env.discount_spec(), } self.assertEqual(env.observation_spec(), expected_observation_spec) class EnvironmentAutomaticallyRequestsReservedKeywords(absltest.TestCase): def setUp(self): super(EnvironmentAutomaticallyRequestsReservedKeywords, self).setUp() self._connection = mock.MagicMock() self._env = dm_env_adaptor.DmEnvAdaptor( self._connection, _RESERVED_SPEC, requested_observations=[]) self._connection.send = mock.MagicMock(return_value=_RESERVED_STEP_RESPONSE) def test_reward_spec_unrequested(self): self.assertEqual( specs.Array(shape=(), dtype=np.uint8), self._env.reward_spec()) def test_discount_spec_unrequested(self): self.assertEqual(specs.StringArray(shape=()), self._env.discount_spec()) def test_does_not_give_back_unrequested_observations(self): timestep = self._env.step({}) self.assertEqual({}, timestep.observation) def test_first_reward_none(self): timestep = self._env.step({}) self.assertIsNone(timestep.reward) def test_reward_piped_correctly(self): self._env.step({}) # Reward is None for first step. timestep = self._env.step({}) self.assertEqual(5, timestep.reward) def test_discount_piped_correctly(self): timestep = self._env.step({}) self.assertEqual('goodbye', timestep.discount) class EnvironmentNestedActionsObservations(absltest.TestCase): def test_nested_specs(self): env = dm_env_adaptor.DmEnvAdaptor( connection=mock.MagicMock(), specs=_SAMPLE_NESTED_SPECS) expected_actions = { 'foo': { 'bar': specs.Array(shape=(), dtype=np.int32, name='foo.bar'), }, 'baz': specs.Array(shape=(), dtype=str, name='baz'), } expected_observations = { 'foo': { 'bar': specs.Array(shape=(), dtype=np.int32, name='foo.bar'), }, 'baz': specs.Array(shape=(), dtype=str, name='baz'), } self.assertSameElements(expected_actions, env.action_spec()) self.assertSameElements(expected_observations, env.observation_spec()) def test_no_nested_specs(self): env = dm_env_adaptor.DmEnvAdaptor( connection=mock.MagicMock(), specs=_SAMPLE_NESTED_SPECS, nested_tensors=False) expected_actions = { 'foo.bar': specs.Array(shape=(), dtype=np.int32, name='foo.bar'), 'baz': specs.Array(shape=(), dtype=str, name='baz'), } expected_observations = { 'foo.bar': specs.Array(shape=(), dtype=np.int32, name='foo.bar'), 'baz': specs.Array(shape=(), dtype=str, name='baz'), } self.assertSameElements(expected_actions, env.action_spec()) self.assertSameElements(expected_observations, env.observation_spec()) def test_nested_actions_step(self): connection = mock.MagicMock() connection.send = mock.MagicMock( return_value=text_format.Parse("""state: RUNNING""", dm_env_rpc_pb2.StepResponse())) env = dm_env_adaptor.DmEnvAdaptor( connection, specs=_SAMPLE_NESTED_SPECS, requested_observations=[]) timestep = env.step({'foo': {'bar': 123}}) self.assertEqual(dm_env.StepType.FIRST, timestep.step_type) connection.send.assert_called_once_with( text_format.Parse( """actions: { key: 1, value: { int32s: { array: 123 } } }""", dm_env_rpc_pb2.StepRequest())) def test_no_nested_actions_step(self): connection = mock.MagicMock() connection.send = mock.MagicMock( return_value=text_format.Parse("""state: RUNNING""", dm_env_rpc_pb2.StepResponse())) env = dm_env_adaptor.DmEnvAdaptor( connection, specs=_SAMPLE_NESTED_SPECS, requested_observations=[], nested_tensors=False) timestep = env.step({'foo.bar': 123}) self.assertEqual(dm_env.StepType.FIRST, timestep.step_type) connection.send.assert_called_once_with( text_format.Parse( """actions: { key: 1, value: { int32s: { array: 123 } } }""", dm_env_rpc_pb2.StepRequest())) def test_nested_observations_step(self): connection = mock.MagicMock() connection.send = mock.MagicMock( return_value=text_format.Parse( """state: RUNNING observations: { key: 1, value: { int32s: { array: 42 } } }""", dm_env_rpc_pb2.StepResponse())) expected = {'foo': {'bar': 42}} env = dm_env_adaptor.DmEnvAdaptor( connection, specs=_SAMPLE_NESTED_SPECS, requested_observations=['foo.bar']) timestep = env.step({}) self.assertEqual(dm_env.StepType.FIRST, timestep.step_type) self.assertSameElements(expected, timestep.observation) connection.send.assert_called_once_with( dm_env_rpc_pb2.StepRequest(requested_observations=[1])) def test_extensions(self): class _ExampleExtension: def foo(self): return 'bar' env = dm_env_adaptor.DmEnvAdaptor( connection=mock.MagicMock(), specs=_SAMPLE_SPEC, extensions={'extension': _ExampleExtension()}) self.assertEqual('bar', env.extension.foo()) def test_invalid_extension_attr(self): with self.assertRaisesRegex(ValueError, 'DmEnvAdaptor already has attribute'): dm_env_adaptor.DmEnvAdaptor( connection=mock.MagicMock(), specs=_SAMPLE_SPEC, extensions={'_connection': object()}) class CreateJoinHelpers(parameterized.TestCase): def test_create_world(self): connection = mock.MagicMock() connection.send = mock.MagicMock( return_value=dm_env_rpc_pb2.CreateWorldResponse( world_name='Damogran_01')) world_name = dm_env_adaptor.create_world(connection, {'planet': 'Damogran'}) self.assertEqual('Damogran_01', world_name) connection.send.assert_called_once_with( text_format.Parse( """settings: { key: 'planet', value: { strings: { array: 'Damogran' } } }""", dm_env_rpc_pb2.CreateWorldRequest())) def test_join_world(self): connection = mock.MagicMock() connection.send = mock.MagicMock( return_value=dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC)) env = dm_env_adaptor.join_world(connection, 'Damogran_01', {'player': 'zaphod'}) self.assertIsNotNone(env) connection.send.assert_called_once_with( text_format.Parse( """world_name: 'Damogran_01' settings: { key: 'player', value: { strings: { array: 'zaphod' } } }""", dm_env_rpc_pb2.JoinWorldRequest())) def test_create_join_world(self): connection = mock.MagicMock() connection.send = mock.MagicMock(side_effect=[ dm_env_rpc_pb2.CreateWorldResponse(world_name='Damogran_01'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC) ]) env, world_name = dm_env_adaptor.create_and_join_world( connection, create_world_settings={'planet': 'Damogran'}, join_world_settings={ 'ship_type': 1, 'player': 'zaphod', }, requested_observations=['foo']) self.assertIsNotNone(env) self.assertSameElements(env.observation_spec().keys(), ['foo']) self.assertEqual('Damogran_01', world_name) connection.send.assert_has_calls([ mock.call( text_format.Parse( """settings: { key: 'planet', value: { strings: { array: 'Damogran' } } }""", dm_env_rpc_pb2.CreateWorldRequest())), mock.call( text_format.Parse( """world_name: 'Damogran_01' settings: { key: 'ship_type', value: { int64s: { array: 1 } } } settings: { key: 'player', value: { strings: { array: 'zaphod' } } }""", dm_env_rpc_pb2.JoinWorldRequest())), ]) @parameterized.named_parameters( ( 'nested', {'nested': {'planet': 'Damogran'}}, {'nested': {'ship_type': 1, 'player': 'zaphod'}}, ), ( 'already_flattened', {'nested.planet': 'Damogran'}, {'nested.ship_type': 1, 'nested.player': 'zaphod'}, ), ) def test_flatten_create_join_world_settings( self, create_settings, join_settings ): connection = mock.MagicMock() connection.send = mock.MagicMock(side_effect=[ dm_env_rpc_pb2.CreateWorldResponse(world_name='Damogran_01'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC) ]) env, world_name = dm_env_adaptor.create_and_join_world( connection, create_world_settings=create_settings, join_world_settings=join_settings) self.assertIsNotNone(env) self.assertEqual('Damogran_01', world_name) connection.send.assert_has_calls([ mock.call( text_format.Parse( """settings: { key: 'nested.planet', value: { strings: { array: 'Damogran' } } }""", dm_env_rpc_pb2.CreateWorldRequest())), mock.call( text_format.Parse( """world_name: 'Damogran_01' settings: { key: 'nested.ship_type', value: { int64s: { array: 1 } } } settings: { key: 'nested.player', value: { strings: { array: 'zaphod' } } }""", dm_env_rpc_pb2.JoinWorldRequest())), ]) def test_create_join_world_with_packed_settings(self): connection = mock.MagicMock() connection.send = mock.MagicMock(side_effect=[ dm_env_rpc_pb2.CreateWorldResponse(world_name='Magrathea_02'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC) ]) env_and_world_name = dm_env_adaptor.create_and_join_world( connection, create_world_settings={'planet': tensor_utils.pack_tensor('Magrathea')}, join_world_settings={ 'ship_type': tensor_utils.pack_tensor(2), 'player': tensor_utils.pack_tensor('arthur'), 'unpacked_setting': [1, 2, 3], }) self.assertIsNotNone(env_and_world_name.env) self.assertEqual('Magrathea_02', env_and_world_name.world_name) connection.send.assert_has_calls([ mock.call( text_format.Parse( """settings: { key: 'planet', value: { strings: { array: 'Magrathea' } } }""", dm_env_rpc_pb2.CreateWorldRequest())), mock.call( text_format.Parse( """world_name: 'Magrathea_02' settings: { key: 'ship_type', value: { int64s: { array: 2 } } } settings: { key: 'player', value: { strings: { array: 'arthur' } } } settings: { key: 'unpacked_setting', value: { int64s: { array: 1 array: 2 array: 3 } shape: 3 } }""", dm_env_rpc_pb2.JoinWorldRequest())), ]) def test_create_join_world_with_extension(self): class _ExampleExtension: def foo(self): return 'bar' connection = mock.MagicMock() connection.send = mock.MagicMock(side_effect=[ dm_env_rpc_pb2.CreateWorldResponse(world_name='foo'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC) ]) env, _ = dm_env_adaptor.create_and_join_world( connection, create_world_settings={}, join_world_settings={}, extensions={'extension': _ExampleExtension()}) self.assertEqual('bar', env.extension.foo()) def test_create_join_world_with_unnested_tensors(self): connection = mock.MagicMock() connection.send = mock.MagicMock(side_effect=[ dm_env_rpc_pb2.CreateWorldResponse(world_name='Damogran_01'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_NESTED_SPECS) ]) env, _ = dm_env_adaptor.create_and_join_world( connection, create_world_settings={}, join_world_settings={}, nested_tensors=False) expected_actions = { 'foo.bar': specs.Array(shape=(), dtype=np.int32, name='foo.bar'), 'baz': specs.Array(shape=(), dtype=str, name='baz'), } expected_observations = { 'foo.bar': specs.Array(shape=(), dtype=np.int32, name='foo.bar'), 'baz': specs.Array(shape=(), dtype=str, name='baz'), } self.assertSameElements(expected_actions, env.action_spec()) self.assertSameElements(expected_observations, env.observation_spec()) def test_create_join_world_with_invalid_extension(self): connection = mock.MagicMock() connection.send = mock.MagicMock(side_effect=[ dm_env_rpc_pb2.CreateWorldResponse(world_name='foo'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC), dm_env_rpc_pb2.LeaveWorldResponse(), dm_env_rpc_pb2.DestroyWorldRequest() ]) with self.assertRaisesRegex(ValueError, 'DmEnvAdaptor already has attribute'): _ = dm_env_adaptor.create_and_join_world( connection, create_world_settings={}, join_world_settings={}, extensions={'step': object()}) connection.send.assert_has_calls([ mock.call(dm_env_rpc_pb2.CreateWorldRequest()), mock.call(dm_env_rpc_pb2.JoinWorldRequest(world_name='foo')), mock.call(dm_env_rpc_pb2.LeaveWorldRequest()), mock.call(dm_env_rpc_pb2.DestroyWorldRequest(world_name='foo')) ]) def test_created_but_failed_to_join_world(self): connection = mock.MagicMock() connection.send = mock.MagicMock( side_effect=( dm_env_rpc_pb2.CreateWorldResponse(world_name='Damogran_01'), error.DmEnvRpcError(status_pb2.Status(message='Failed to Join.')), dm_env_rpc_pb2.DestroyWorldResponse())) with self.assertRaisesRegex(error.DmEnvRpcError, 'Failed to Join'): _ = dm_env_adaptor.create_and_join_world( connection, create_world_settings={}, join_world_settings={}) connection.send.assert_has_calls([ mock.call(dm_env_rpc_pb2.CreateWorldRequest()), mock.call(dm_env_rpc_pb2.JoinWorldRequest(world_name='Damogran_01')), mock.call(dm_env_rpc_pb2.DestroyWorldRequest(world_name='Damogran_01')) ]) def test_created_and_joined_but_adaptor_failed(self): connection = mock.MagicMock() connection.send = mock.MagicMock( side_effect=( dm_env_rpc_pb2.CreateWorldResponse(world_name='Damogran_01'), dm_env_rpc_pb2.JoinWorldResponse(specs=_SAMPLE_SPEC), dm_env_rpc_pb2.LeaveWorldResponse(), dm_env_rpc_pb2.DestroyWorldResponse())) with self.assertRaisesRegex(ValueError, 'Unsupported observations'): _ = dm_env_adaptor.create_and_join_world( connection, create_world_settings={}, join_world_settings={}, requested_observations=['invalid_observation']) connection.send.assert_has_calls([ mock.call(dm_env_rpc_pb2.CreateWorldRequest()), mock.call(dm_env_rpc_pb2.JoinWorldRequest(world_name='Damogran_01')), mock.call(dm_env_rpc_pb2.LeaveWorldRequest()), mock.call(dm_env_rpc_pb2.DestroyWorldRequest(world_name='Damogran_01')) ]) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/dm_env_adaptor_test.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for environment_stream.proto. These aren't for testing functionality (it's assumed protobufs work) but for testing/demonstrating how the protobufs would have to be used in code. """ from absl.testing import absltest from dm_env_rpc.v1 import dm_env_rpc_pb2 class TensorTests(absltest.TestCase): def test_setting_tensor_data(self): tensor = dm_env_rpc_pb2.Tensor() tensor.floats.array[:] = [1, 2] def test_setting_tensor_data_with_wrong_type(self): tensor = dm_env_rpc_pb2.Tensor() with self.assertRaises(TypeError): tensor.floats.array[:] = ['hello!'] # pytype: disable=unsupported-operands def test_which_is_set(self): tensor = dm_env_rpc_pb2.Tensor() tensor.floats.array[:] = [1, 2] self.assertEqual('floats', tensor.WhichOneof('payload')) class TensorSpec(absltest.TestCase): def test_setting_spec(self): tensor_spec = dm_env_rpc_pb2.TensorSpec() tensor_spec.name = 'Foo' tensor_spec.min.floats.array[:] = [0.0] tensor_spec.max.floats.array[:] = [0.0] tensor_spec.shape[:] = [2, 2] tensor_spec.dtype = dm_env_rpc_pb2.DataType.FLOAT class JoinWorldResponse(absltest.TestCase): def test_setting_spec(self): response = dm_env_rpc_pb2.JoinWorldResponse() tensor_spec = response.specs.actions[1] tensor_spec.shape[:] = [1] tensor_spec.dtype = dm_env_rpc_pb2.DataType.FLOAT if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/dm_env_rpc_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_env_rpc error module.""" import pickle from absl.testing import absltest from google.rpc import code_pb2 from google.rpc import status_pb2 from dm_env_rpc.v1 import error class ErrorTest(absltest.TestCase): def testSimpleError(self): message = status_pb2.Status( code=code_pb2.INVALID_ARGUMENT, message='A test error.') exception = error.DmEnvRpcError(message) self.assertEqual(code_pb2.INVALID_ARGUMENT, exception.code) self.assertEqual('A test error.', exception.message) self.assertEqual(str(message), str(exception)) def testPickleUnpickle(self): exception = error.DmEnvRpcError(status_pb2.Status( code=code_pb2.INVALID_ARGUMENT, message='foo.')) pickled = pickle.dumps(exception) unpickled = pickle.loads(pickled) self.assertEqual(code_pb2.INVALID_ARGUMENT, unpickled.code) self.assertEqual('foo.', unpickled.message) def testRepr(self): exception = error.DmEnvRpcError(status_pb2.Status( code=code_pb2.INVALID_ARGUMENT, message='foo.')) as_string = repr(exception) self.assertIn(exception.message, as_string) self.assertIn(str(exception.code), as_string) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/error_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A base class for ResetWorld tests for a server.""" import abc from absl.testing import absltest from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error class ResetWorld(absltest.TestCase, metaclass=abc.ABCMeta): """A base class for dm_env_rpc `ResetWorld` compliance tests.""" @property @abc.abstractmethod def connection(self): """An instance of dm_env_rpc's Connection already joined to a world.""" pass @property def required_reset_world_settings(self): """Settings necessary to pass to ResetWorld.""" return {} @property def required_join_world_settings(self): """Settings necessary to pass to JoinWorld.""" return {} @property def invalid_world_name(self): """The name of a world which doesn't exist.""" return 'invalid_world_name' @property @abc.abstractmethod def world_name(self): """The name of the world to attempt to call ResetWorld on.""" return '' def join_world(self): """Joins the world to call ResetWorld on.""" self.connection.send(dm_env_rpc_pb2.JoinWorldRequest( world_name=self.world_name, settings=self.required_join_world_settings)) def reset_world(self, world_name): """Resets the world.""" self.connection.send(dm_env_rpc_pb2.ResetWorldRequest( world_name=world_name, settings=self.required_reset_world_settings)) def leave_world(self): """Leaves the world.""" self.connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) # pylint: disable=missing-docstring def test_cannot_reset_invalid_world(self): with self.assertRaises(error.DmEnvRpcError): self.reset_world(self.invalid_world_name) def test_can_reset_world_not_joined_to(self): self.reset_world(self.world_name) # If there are no errors the test passes. def test_can_reset_world_when_joined_to_it(self): try: self.join_world() self.reset_world(self.world_name) # If there are no errors the test passes. finally: self.leave_world() # pylint: enable=missing-docstring
dm_env_rpc-master
dm_env_rpc/v1/compliance/reset_world.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A base class for Reset tests for a server.""" import abc from absl.testing import absltest from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error class Reset(absltest.TestCase, metaclass=abc.ABCMeta): """A base class for dm_env_rpc `Reset` compliance tests.""" @property @abc.abstractmethod def connection(self): """An instance of dm_env_rpc's Connection already joined to a world.""" pass @property def required_reset_settings(self): return {} @abc.abstractmethod def join_world(self): """Joins a world, returning the specs.""" pass def reset(self): """Resets the environment, returning the specs.""" return self.connection.send(dm_env_rpc_pb2.ResetRequest( settings=self.required_reset_settings)).specs # pylint: disable=missing-docstring def test_reset_resends_the_specs(self): join_specs = self.join_world() specs = self.reset() self.assertEqual(join_specs, specs) def test_cannot_reset_if_not_joined_to_world(self): with self.assertRaises(error.DmEnvRpcError): self.reset() def test_can_reset_multiple_times(self): join_specs = self.join_world() self.reset() specs = self.reset() self.assertEqual(join_specs, specs) # pylint: enable=missing-docstring
dm_env_rpc-master
dm_env_rpc/v1/compliance/reset.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Compliance test base classes for dm_env_rpc.""" from dm_env_rpc.v1.compliance import create_destroy_world from dm_env_rpc.v1.compliance import join_leave_world from dm_env_rpc.v1.compliance import reset from dm_env_rpc.v1.compliance import reset_world from dm_env_rpc.v1.compliance import step CreateDestroyWorld = create_destroy_world.CreateDestroyWorld JoinLeaveWorld = join_leave_world.JoinLeaveWorld Reset = reset.Reset ResetWorld = reset_world.ResetWorld Step = step.Step
dm_env_rpc-master
dm_env_rpc/v1/compliance/__init__.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A base class for Step tests for a server.""" import abc import functools import operator from absl.testing import absltest import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_spec_utils from dm_env_rpc.v1 import tensor_utils def _find_uid_not_in_set(uid_set): """Finds an example UID not in `uid_set`.""" uids = set(uid_set) uid = 0 while uid in uids: uid = uid + 1 return uid def _is_numeric_type(dtype): return (dtype != dm_env_rpc_pb2.DataType.PROTO and np.issubdtype(tensor_utils.data_type_to_np_type(dtype), np.number)) def _assert_less_equal(x, y, err_msg='', verbose=True): np.testing.assert_array_compare( operator.__le__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less or equal ordered', equal_inf=False) def _assert_greater_equal(x, y, err_msg='', verbose=True): np.testing.assert_array_compare( operator.__ge__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not greater or equal ordered', equal_inf=False) def _create_test_value(spec, dtype=None): """Creates a NumPy array test value consistent with the TensorSpec `spec`.""" if _is_numeric_type(spec.dtype): value = tensor_spec_utils.bounds(spec).min else: value = tensor_utils.data_type_to_np_type(spec.dtype).type() shape = np.asarray(spec.shape) shape[shape < 0] = 1 return np.full(shape=shape, fill_value=value, dtype=dtype) def _create_test_tensor(spec, dtype=None): """Creates an arbitrary tensor consistent with the TensorSpec `spec`.""" value = _create_test_value(spec, dtype) return tensor_utils.pack_tensor(value) def _below_min(spec): """Generates values below spec's min. Args: spec: An instance of `TensorSpec`. Yields: A sequence of tuples of `(value, index)` where `index` is an indexer into `value` where the element has been set below the spec's min. """ if not spec.HasField('min'): return np_type = tensor_utils.data_type_to_np_type(spec.dtype) min_type_value = tensor_spec_utils.np_range_info(np_type).min minimum = tensor_spec_utils.bounds(spec).min for index in np.ndindex(*spec.shape): min_index_value = minimum if np.isscalar(minimum) else minimum[index] if min_type_value < min_index_value: value = _create_test_value(spec) value[index] = min_type_value yield value, index def _above_max(spec): """Generates values above spec's max. Args: spec: An instance of `TensorSpec`. Yields: A sequence of tuples of `(value, index)` where `index` is an indexer into `value` where the element has been set above the spec's max. """ if not spec.HasField('max'): return np_type = tensor_utils.data_type_to_np_type(spec.dtype) max_type_value = tensor_spec_utils.np_range_info(np_type).max maximum = tensor_spec_utils.bounds(spec).max for index in np.ndindex(*spec.shape): max_index_value = maximum if np.isscalar(maximum) else maximum[index] if max_type_value > max_index_value: value = _create_test_value(spec) value[index] = max_type_value yield value, index def _find_scalar_within_bounds(spec): """Returns a scalar which can satisfy the spec's bounds for any element.""" if _is_numeric_type(spec.dtype): bounds = tensor_spec_utils.bounds(spec) hard_min = np.amax(bounds.min) hard_max = np.amin(bounds.max) if hard_min < hard_max: return hard_min else: # There is no single scalar value that can satisfy all bounds. return None else: np_type = tensor_utils.data_type_to_np_type(spec.dtype) return np_type.type() def _step_before_test(function): """Decorator which calls step before test function is run.""" @functools.wraps(function) def wrapper(self, *args, **kwargs): # First step's actions are ignored, so step once to start the sequence. step_response = self.step() self.assertEqual( step_response.state, dm_env_rpc_pb2.EnvironmentStateType.RUNNING) return function(self, *args, **kwargs) return wrapper class Step(absltest.TestCase, metaclass=abc.ABCMeta): """A base class for dm_env_rpc `Step` compliance tests.""" @property @abc.abstractmethod def connection(self): """An instance of dm_env_rpc's Connection already joined to a world.""" pass @property @abc.abstractmethod def specs(self): """The specs from a JoinWorldResponse.""" pass @property def observation_uids(self): return set(self.specs.observations.keys()) @property def action_uids(self): return set(self.specs.actions.keys()) @property def required_actions(self): """A dict of required actions for a Step call.""" return {} @property def numeric_actions(self): return {uid: spec for uid, spec in self.specs.actions.items() if _is_numeric_type(spec.dtype)} @property def nonnumeric_actions(self): return {uid: spec for uid, spec in self.specs.actions.items() if not _is_numeric_type(spec.dtype)} def step(self, actions=None, **kwargs): """Sends a StepRequest and returns the StepResponse.""" actions = {**self.required_actions, **(actions or {})} return self.connection.send( dm_env_rpc_pb2.StepRequest(actions=actions, **kwargs)) # pylint: disable=missing-docstring ############################################################################## # Observations ############################################################################## def test_no_observations_returned_if_not_requested(self): observations = self.step().observations self.assertEmpty(observations) def test_requested_observations_are_returned(self): response = self.step(requested_observations=self.observation_uids) observations = response.observations self.assertEqual(self.observation_uids, set(observations.keys())) def test_cannot_request_invalid_observation_uid(self): bad_uid = _find_uid_not_in_set(self.observation_uids) with self.assertRaisesRegex(error.DmEnvRpcError, str(bad_uid)): self.step(requested_observations=[bad_uid]) def test_all_observation_dtypes_match_spec_dtypes(self): response = self.step(requested_observations=self.observation_uids) for uid, observation in response.observations.items(): spec = self.specs.observations[uid] with self.subTest(uid=uid, name=spec.name): spec_type = tensor_utils.data_type_to_np_type(spec.dtype) tensor_type = tensor_utils.get_tensor_type(observation) self.assertEqual(spec_type, tensor_type) def test_all_numerical_observations_in_range(self): numeric_uids = (uid for uid, spec in self.specs.observations.items() if _is_numeric_type(spec.dtype)) response = self.step(requested_observations=numeric_uids) for uid, observation in response.observations.items(): spec = self.specs.observations[uid] with self.subTest(uid=uid, name=spec.name): unpacked = tensor_utils.unpack_tensor(observation) bounds = tensor_spec_utils.bounds(spec) if spec.max.WhichOneof('payload') is not None: _assert_less_equal(unpacked, bounds.max) if spec.min.WhichOneof('payload') is not None: _assert_greater_equal(unpacked, bounds.min) def test_duplicated_requested_observations_are_redundant(self): response = self.step(requested_observations=list(self.observation_uids) * 2) self.assertEqual(self.observation_uids, set(response.observations.keys())) def test_can_request_each_observation_individually(self): for uid in self.observation_uids: spec = self.specs.observations[uid] with self.subTest(uid=uid, name=spec.name): response = self.step(requested_observations=[uid]) self.assertEqual([uid], list(response.observations.keys())) ############################################################################## # Actions ############################################################################## def test_first_step_actions_are_ignored(self): bad_uid = _find_uid_not_in_set(self.action_uids) self.step(actions={bad_uid: tensor_utils.pack_tensor(0)}) @_step_before_test def test_can_send_each_action_individually(self): for uid, spec in self.specs.actions.items(): with self.subTest(uid=uid, name=spec.name): tensor = _create_test_tensor(spec) self.step(actions={uid: tensor}) @_step_before_test def test_cannot_send_wrong_numeric_type_action(self): for uid, spec in self.numeric_actions.items(): with self.subTest(uid=uid, name=spec.name): np_type = tensor_utils.data_type_to_np_type(spec.dtype) wrong_dtype = (np.float64 if not np.issubdtype(np_type, np.float64) else np.float32) tensor = _create_test_tensor(spec, dtype=wrong_dtype) with self.assertRaises(error.DmEnvRpcError): self.step(actions={uid: tensor}) @_step_before_test def test_cannot_send_wrong_type_to_nonnumeric_actions(self): tensor = tensor_utils.pack_tensor(0, dtype=np.int32) for uid, spec in self.nonnumeric_actions.items(): with self.subTest(uid=uid, name=spec.name): shape = np.asarray(spec.shape) shape[shape < 0] = 1 tensor.shape[:] = shape with self.assertRaises(error.DmEnvRpcError): self.step(actions={uid: tensor}) @_step_before_test def test_cannot_send_invalid_action_uid(self): bad_uid = _find_uid_not_in_set(self.action_uids) with self.assertRaises(error.DmEnvRpcError): self.step(actions={bad_uid: tensor_utils.pack_tensor(0)}) @_step_before_test def test_cannot_send_action_below_min(self): for uid, spec in self.numeric_actions.items(): with self.subTest(uid=uid, name=spec.name): shape = np.asarray(spec.shape) shape[shape < 0] = 1 for value, index in _below_min(spec): with self.subTest(below_min_index=index): tensor = tensor_utils.pack_tensor(value, dtype=spec.dtype) tensor.shape[:] = shape with self.assertRaises(error.DmEnvRpcError): self.step(actions={uid: tensor}) @_step_before_test def test_cannot_send_action_above_max(self): for uid, spec in self.numeric_actions.items(): with self.subTest(uid=uid, name=spec.name): shape = np.asarray(spec.shape) shape[shape < 0] = 1 for value, index in _above_max(spec): with self.subTest(above_max_index=index): tensor = tensor_utils.pack_tensor(value, dtype=spec.dtype) tensor.shape[:] = shape with self.assertRaises(error.DmEnvRpcError): self.step(actions={uid: tensor}) @_step_before_test def test_cannot_send_action_with_wrong_shape(self): for uid, spec in self.specs.actions.items(): with self.subTest(uid=uid, name=spec.name): tensor = _create_test_tensor(spec) # Add too many dimensions to shape. tensor.shape[:] = tensor.shape[:] + [1] with self.assertRaises(error.DmEnvRpcError): self.step(actions={uid: tensor}) @_step_before_test def test_can_send_variable_dimension_tensor_action(self): actions_with_shape = {uid: spec for uid, spec in self.specs.actions.items() if spec.shape} for uid, spec in actions_with_shape.items(): with self.subTest(uid=uid, name=spec.name): tensor = _create_test_tensor(spec) # Set first dimension to be variable. tensor.shape[0] = -1 self.step(actions={uid: tensor}) @_step_before_test def test_cannot_send_tensor_with_too_many_variable_dimensions(self): actions_with_multidimensional_shape = { uid: spec for uid, spec in self.specs.actions.items() if len(spec.shape) >= 2} for uid, spec in actions_with_multidimensional_shape.items(): with self.subTest(uid=uid, name=spec.name): tensor = _create_test_tensor(spec) # Set multiple variable dimensions. tensor.shape[0] = -1 tensor.shape[1] = -1 with self.assertRaises(error.DmEnvRpcError): self.step(actions={uid: tensor}) @_step_before_test def test_can_send_broadcastable_actions(self): for uid, spec in self.specs.actions.items(): with self.subTest(uid=uid, name=spec.name): scalar = _find_scalar_within_bounds(spec) if scalar is None: # The action has no scalars we could feasibly broadcast. continue tensor = tensor_utils.pack_tensor(scalar, dtype=spec.dtype) shape = np.asarray(spec.shape) shape[shape < 0] = 1 tensor.shape[:] = shape self.step(actions={uid: tensor}) # pylint: enable=missing-docstring
dm_env_rpc-master
dm_env_rpc/v1/compliance/step.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A base class for JoinWorld and LeaveWord tests for a server.""" import abc from absl.testing import absltest import numpy as np from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_spec_utils def _find_duplicates(iterable): """Returns a list of duplicate entries found in `iterable`.""" duplicates = [] seen = set() for item in iterable: if item in seen: duplicates.append(item) else: seen.add(item) return duplicates def _check_tensor_spec(tensor_spec): """Raises an error if the given `tensor_spec` is internally inconsistent.""" if np.sum(np.asarray(tensor_spec.shape) < 0) > 1: raise ValueError( f'"{tensor_spec.name}" has shape {tensor_spec.shape} which has more ' 'than one negative element.') min_type = tensor_spec.min and tensor_spec.min.WhichOneof('payload') max_type = tensor_spec.max and tensor_spec.max.WhichOneof('payload') if min_type or max_type: _ = tensor_spec_utils.bounds(tensor_spec) class JoinLeaveWorld(absltest.TestCase, metaclass=abc.ABCMeta): """A base class for `JoinWorld` and `LeaveWorld` compliance tests.""" @property def required_join_settings(self): """A dict of required settings for a Join World call.""" return {} @property def invalid_join_settings(self): """A list of dicts of Join World settings which are invalid in some way.""" return {} @abc.abstractproperty def world_name(self): """A string of the world name of an already created world.""" pass @property def invalid_world_name(self): """A string which doesn't correspond to any valid world_name.""" return 'invalid_world_name' @property @abc.abstractmethod def connection(self): """An instance of dm_env_rpc's Connection.""" pass def tearDown(self): super().tearDown() try: self.leave_world() finally: pass def join_world(self, **kwargs): """Joins the world and returns the spec.""" response = self.connection.send(dm_env_rpc_pb2.JoinWorldRequest(**kwargs)) return response.specs def leave_world(self): """Leaves currently joined world, if any.""" self.connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) # pylint: disable=missing-docstring def test_can_join(self): self.join_world( world_name=self.world_name, settings=self.required_join_settings) # Success if there's no error raised. def test_cannot_join_with_wrong_world_name(self): with self.assertRaises(error.DmEnvRpcError): self.join_world(world_name=self.invalid_world_name) def test_cannot_join_world_with_invalid_settings(self): settings = self.required_join_settings for name, tensor in self.invalid_join_settings.items(): with self.assertRaises(error.DmEnvRpcError): self.join_world( world_name=self.world_name, settings={ name: tensor, **settings }) def test_cannot_join_world_twice(self): self.join_world( world_name=self.world_name, settings=self.required_join_settings) with self.assertRaises(error.DmEnvRpcError): self.join_world( world_name=self.world_name, settings=self.required_join_settings) def test_action_specs_have_unique_names(self): specs = self.join_world( world_name=self.world_name, settings=self.required_join_settings) self.assertEmpty(_find_duplicates( spec.name for spec in specs.actions.values())) def test_action_specs_for_consistency(self): specs = self.join_world( world_name=self.world_name, settings=self.required_join_settings) for action_spec in specs.actions.values(): _check_tensor_spec(action_spec) def test_observation_specs_have_unique_names(self): specs = self.join_world( world_name=self.world_name, settings=self.required_join_settings) self.assertEmpty(_find_duplicates( spec.name for spec in specs.observations.values())) def test_observation_specs_for_consistency(self): specs = self.join_world( world_name=self.world_name, settings=self.required_join_settings) for observation_spec in specs.observations.values(): _check_tensor_spec(observation_spec) def test_can_leave_world_if_not_joined(self): self.leave_world() # Success if there's no error raised. def test_can_leave_world_after_joining(self): self.join_world( world_name=self.world_name, settings=self.required_join_settings) self.leave_world() # Success if there's no error raised. def test_can_rejoin_world_after_leaving(self): self.join_world( world_name=self.world_name, settings=self.required_join_settings) self.leave_world() self.join_world( world_name=self.world_name, settings=self.required_join_settings) # Success if there's no error raised. # pylint: enable=missing-docstring
dm_env_rpc-master
dm_env_rpc/v1/compliance/join_leave_world.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A base class for CreateWorld and DestroyWorld tests for a server.""" import abc from absl.testing import absltest from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error class CreateDestroyWorld(absltest.TestCase, metaclass=abc.ABCMeta): """A base class for `CreateWorld` and `DestroyWorld` compliance tests.""" @abc.abstractproperty def required_world_settings(self): """A string to Tensor mapping of the minimum set of required settings.""" pass @abc.abstractproperty def invalid_world_settings(self): """World creation settings which are invalid in some way.""" pass @abc.abstractproperty def has_multiple_world_support(self): """Does the server support creating more than one world?""" pass @abc.abstractproperty def connection(self): """An instance of dm_env_rpc's Connection.""" pass def create_world(self, settings): """Returns the world name of the world created with the given settings.""" response = self.connection.send( dm_env_rpc_pb2.CreateWorldRequest(settings=settings)) return response.world_name def destroy_world(self, world_name): """Destroys the world named `world_name`.""" if world_name is not None: self.connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=world_name)) # pylint: disable=missing-docstring def test_can_create_and_destroy_world(self): # If this doesn't raise an exception the test passes. world_name = self.create_world(self.required_world_settings) self.destroy_world(world_name) def test_cannot_create_world_with_less_than_required_settings(self): settings = self.required_world_settings for name, _ in settings.items(): sans_setting = dict(settings) del sans_setting[name] message = f'world was created without required setting "{name}"' with self.assertRaises(error.DmEnvRpcError, msg=message): self.create_world(sans_setting) def test_cannot_create_world_with_invalid_settings(self): settings = self.required_world_settings invalid_settings = self.invalid_world_settings for name, tensor in invalid_settings.items(): message = f'world was created with invalid setting "{name}"' with self.assertRaises(error.DmEnvRpcError, msg=message): self.create_world({name: tensor, **settings}) def test_world_name_is_unique(self): if not self.has_multiple_world_support: return world1_name = None world2_name = None try: world1_name = self.create_world(self.required_world_settings) world2_name = self.create_world(self.required_world_settings) self.assertIsNotNone(world1_name) self.assertIsNotNone(world2_name) self.assertNotEqual(world1_name, world2_name) finally: self.destroy_world(world1_name) self.destroy_world(world2_name) def test_cannot_destroy_uncreated_world(self): with self.assertRaises(error.DmEnvRpcError): self.destroy_world('foo') # pylint: enable=missing-docstring
dm_env_rpc-master
dm_env_rpc/v1/compliance/create_destroy_world.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A helper class for sending and receiving property requests and responses. This helper class provides a Pythonic interface for reading, writing and listing properties. It simplifies the packing and unpacking of property requests and responses using the provided dm_env_rpc.v1.connection.Connection instance to send and receive extension messages. Example Usage: property_extension = PropertyExtension(connection) # To read a property: value = property_extension['my_property'] # To write a property: property_extension['my_property'] = new_value # To find available properties: property_specs = property_extension.specs() spec = property_specs['my_property'] """ import contextlib from typing import Mapping, Sequence, Optional from dm_env import specs as dm_env_specs from google.protobuf import any_pb2 from google.rpc import code_pb2 from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_utils from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_utils from dm_env_rpc.v1.extensions import properties_pb2 @contextlib.contextmanager def _convert_dm_env_rpc_error(): """Helper to convert DmEnvRpcError to a properties related exception.""" try: yield except error.DmEnvRpcError as e: if e.code == code_pb2.NOT_FOUND: raise KeyError('Property key not found!') from e elif e.code == code_pb2.PERMISSION_DENIED: raise PermissionError('Property permission denied!') from e elif e.code == code_pb2.INVALID_ARGUMENT: raise ValueError('Property value error!') from e raise class PropertySpec(object): """Class that represents a property's specification.""" def __init__(self, property_spec_proto: properties_pb2.PropertySpec): """Constructs a property specification from PropertySpec proto message. Args: property_spec_proto: A properties_pb2.PropertySpec message. """ self._property_spec_proto = property_spec_proto @property def key(self) -> str: """Return the property's key.""" return self._property_spec_proto.spec.name @property def readable(self) -> bool: """Returns True if the property is readable.""" return self._property_spec_proto.is_readable @property def writable(self) -> bool: """Returns True if the property is writable.""" return self._property_spec_proto.is_writable @property def listable(self) -> bool: """Returns True if the property is listable.""" return self._property_spec_proto.is_listable @property def spec(self) -> Optional[dm_env_specs.Array]: """Returns a dm_env spec if the property has a valid dtype. Returns: Either a dm_env spec or, if the dtype is invalid, None. """ if self._property_spec_proto.spec.dtype != ( dm_env_rpc_pb2.DataType.INVALID_DATA_TYPE): return dm_env_utils.tensor_spec_to_dm_env_spec( self._property_spec_proto.spec) else: return None @property def description(self) -> str: """Returns the property's description.""" return self._property_spec_proto.description def __repr__(self): return (f'PropertySpec(key={self.key}, readable={self.readable}, ' f'writable={self.writable}, listable={self.listable}, ' f'spec={self.spec}, description={self.description})') class PropertiesExtension(object): """Helper class for sending and receiving property requests and responses.""" def __init__(self, connection: dm_env_rpc_connection.Connection): """Construct extension with provided dm_env_rpc connection to the env. Args: connection: An instance of Connection already connected to a dm_env_rpc server. """ self._connection = connection def __getitem__(self, key: str): """Alias for PropertiesExtension read function.""" return self.read(key) def __setitem__(self, key: str, value) -> None: """Alias for PropertiesExtension write function.""" self.write(key, value) def specs(self, key: str = '') -> Mapping[str, PropertySpec]: """Helper to return sub-properties as a dict.""" return { sub_property.key: sub_property for sub_property in self.list(key) } def read(self, key: str): """Reads the value of a property. Args: key: A string key that represents the property to read. Returns: The value of the property, either as a scalar (float, int, string, etc.) or, if the response tensor has a non-empty `shape` attribute, a NumPy array of the payload with the correct type and shape. See tensor_utils.unpack for more details. """ response = properties_pb2.PropertyResponse() packed_request = any_pb2.Any() packed_request.Pack( properties_pb2.PropertyRequest( read_property=properties_pb2.ReadPropertyRequest(key=key))) with _convert_dm_env_rpc_error(): self._connection.send(packed_request).Unpack(response) return tensor_utils.unpack_tensor(response.read_property.value) def write(self, key: str, value) -> None: """Writes the provided value to a property. Args: key: A string key that represents the property to write. value: A scalar (float, int, string, etc.), NumPy array, or nested lists. See tensor_utils.pack for more details. """ packed_request = any_pb2.Any() packed_request.Pack( properties_pb2.PropertyRequest( write_property=properties_pb2.WritePropertyRequest( key=key, value=tensor_utils.pack_tensor(value)))) with _convert_dm_env_rpc_error(): self._connection.send(packed_request) def list(self, key: str = '') -> Sequence[PropertySpec]: """Lists properties residing under the provided key. Args: key: A string key to list properties at this location. If empty, returns properties registered at the root level. Returns: A sequence of PropertySpecs. """ response = properties_pb2.PropertyResponse() packed_request = any_pb2.Any() packed_request.Pack( properties_pb2.PropertyRequest( list_property=properties_pb2.ListPropertyRequest(key=key))) with _convert_dm_env_rpc_error(): self._connection.send(packed_request).Unpack(response) return tuple( PropertySpec(sub_property) for sub_property in response.list_property.values)
dm_env_rpc-master
dm_env_rpc/v1/extensions/properties.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Extensions module for dm_env_rpc."""
dm_env_rpc-master
dm_env_rpc/v1/extensions/__init__.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests Properties extension.""" import contextlib from unittest import mock from absl.testing import absltest from dm_env import specs import numpy as np from google.protobuf import any_pb2 from google.rpc import code_pb2 from google.rpc import status_pb2 from google.protobuf import text_format from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1.extensions import properties from dm_env_rpc.v1.extensions import properties_pb2 def _create_property_request_key(text_proto): extension_message = any_pb2.Any() extension_message.Pack( text_format.Parse(text_proto, properties_pb2.PropertyRequest())) return dm_env_rpc_pb2.EnvironmentRequest( extension=extension_message).SerializeToString() def _pack_property_response(text_proto): extension_message = any_pb2.Any() extension_message.Pack( text_format.Parse(text_proto, properties_pb2.PropertyResponse())) return dm_env_rpc_pb2.EnvironmentResponse(extension=extension_message) # Set of expected requests and associated responses for mock connection. _EXPECTED_REQUEST_RESPONSE_PAIRS = { _create_property_request_key('read_property { key: "foo" }'): _pack_property_response( 'read_property { value: { int32s: { array: 1 } } }'), _create_property_request_key("""write_property { key: "bar" value: { strings { array: "some_value" } } }"""): _pack_property_response('write_property {}'), _create_property_request_key('read_property { key: "bar" }'): _pack_property_response( 'read_property { value: { strings: { array: "some_value" } } }'), _create_property_request_key('list_property { key: "baz" }'): _pack_property_response("""list_property { values: { is_readable:true spec { name: "baz.fiz" dtype:UINT32 shape: 2 shape: 2 } }}"""), _create_property_request_key('list_property {}'): _pack_property_response("""list_property { values: { is_readable:true spec { name: "foo" dtype:INT32 } description: "This is a documented integer" } values: { is_readable:true is_writable:true spec { name: "bar" dtype:STRING } } values: { is_listable:true spec { name: "baz" } } }"""), _create_property_request_key('read_property { key: "bad_property" }'): dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status(message='invalid property request.')), _create_property_request_key('read_property { key: "invalid_key" }'): dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status( code=code_pb2.NOT_FOUND, message='Invalid key.')), _create_property_request_key("""write_property { key: "argument_test" value: { strings: { array: "invalid" } } }"""): dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status( code=code_pb2.INVALID_ARGUMENT, message='Invalid argument.')), _create_property_request_key('read_property { key: "permission_key" }'): dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status( code=code_pb2.PERMISSION_DENIED, message='No permission.')) } @contextlib.contextmanager def _create_mock_connection(): """Helper to create mock dm_env_rpc connection.""" with mock.patch.object(dm_env_rpc_connection, 'dm_env_rpc_pb2_grpc') as mock_grpc: def _process(request_iterator, **kwargs): del kwargs for request in request_iterator: yield _EXPECTED_REQUEST_RESPONSE_PAIRS[request.SerializeToString()] mock_stub_class = mock.MagicMock() mock_stub_class.Process = _process mock_grpc.EnvironmentStub.return_value = mock_stub_class yield dm_env_rpc_connection.Connection(mock.MagicMock()) class PropertiesTest(absltest.TestCase): def test_read_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) self.assertEqual(1, extension['foo']) def test_write_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) extension['bar'] = 'some_value' self.assertEqual('some_value', extension['bar']) def test_list_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs('baz') self.assertLen(property_specs, 1) property_spec = property_specs['baz.fiz'] self.assertTrue(property_spec.readable) self.assertFalse(property_spec.writable) self.assertFalse(property_spec.listable) self.assertEqual( specs.Array(shape=(2, 2), dtype=np.uint32), property_spec.spec) def test_root_list_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertLen(property_specs, 3) self.assertTrue(property_specs['foo'].readable) self.assertTrue(property_specs['bar'].readable) self.assertTrue(property_specs['bar'].writable) self.assertTrue(property_specs['baz'].listable) def test_invalid_spec_request_on_listable_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertTrue(property_specs['baz'].listable) self.assertIsNone(property_specs['baz'].spec) def test_invalid_request(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) with self.assertRaisesRegex(error.DmEnvRpcError, 'invalid property request.'): _ = extension['bad_property'] def test_invalid_key_raises_key_error(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) with self.assertRaises(KeyError): _ = extension['invalid_key'] def test_invalid_argument_raises_value_error(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) with self.assertRaises(ValueError): extension['argument_test'] = 'invalid' def test_permission_denied_raises_permission_error(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) with self.assertRaises(PermissionError): _ = extension['permission_key'] def test_property_description(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertEqual('This is a documented integer', property_specs['foo'].description) def test_property_print(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertRegex( str(property_specs['foo']), (r'PropertySpec\(key=foo, readable=True, writable=False, ' r'listable=False, spec=.*, ' r'description=This is a documented integer\)')) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
dm_env_rpc/v1/extensions/properties_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for CatchEnvironment.""" from concurrent import futures from absl.testing import absltest from dm_env import test_utils import grpc import numpy as np import catch_environment from dm_env_rpc.v1 import compliance from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_adaptor from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2_grpc from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_utils class ServerConnection: def __init__(self): self._server = grpc.server( futures.ThreadPoolExecutor(max_workers=1)) servicer = catch_environment.CatchEnvironmentService() dm_env_rpc_pb2_grpc.add_EnvironmentServicer_to_server( servicer, self._server) port = self._server.add_secure_port('[::]:0', grpc.local_server_credentials()) self._server.start() self._channel = grpc.secure_channel(f'[::]:{port}', grpc.local_channel_credentials()) grpc.channel_ready_future(self._channel).result() self.connection = dm_env_rpc_connection.Connection(self._channel) def close(self): self.connection.close() self._channel.close() self._server.stop(grace=None) class JoinedServerConnection(ServerConnection): def __init__(self): super().__init__() response = self.connection.send(dm_env_rpc_pb2.CreateWorldRequest()) self.world_name = response.world_name response = self.connection.send(dm_env_rpc_pb2.JoinWorldRequest( world_name=self.world_name)) self.specs = response.specs def close(self): try: self.connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) self.connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self.world_name)) finally: super().close() class CatchDmEnvRpcStepTest(compliance.Step): @property def connection(self): return self._server_connection.connection @property def specs(self): return self._server_connection.specs def setUp(self): super().setUp() self._server_connection = JoinedServerConnection() def tearDown(self): self._server_connection.close() super().tearDown() class CatchDmEnvRpcCreateAndDestoryWorldTest(compliance.CreateDestroyWorld): @property def connection(self): return self._server_connection.connection @property def required_world_settings(self): """A string to Tensor mapping of the minimum set of required settings.""" return {} @property def invalid_world_settings(self): """World creation settings which are invalid in some way.""" return {'invalid_setting': tensor_utils.pack_tensor(123)} @property def has_multiple_world_support(self): """Does the server support creating more than one world?""" return False def setUp(self): self._server_connection = ServerConnection() super().setUp() def tearDown(self): super().tearDown() self._server_connection.close() class CatchDmEnvRpcJoinAndLeaveWorldTest(compliance.JoinLeaveWorld): @property def connection(self): return self._server_connection.connection @property def world_name(self): return self._world_name @property def invalid_join_settings(self): return {'invalid_setting': tensor_utils.pack_tensor(123)} def setUp(self): self._server_connection = ServerConnection() response = self.connection.send(dm_env_rpc_pb2.CreateWorldRequest()) self._world_name = response.world_name super().setUp() def tearDown(self): super().tearDown() try: self.connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self.world_name)) finally: self._server_connection.close() class CatchDmEnvRpcResetTest(compliance.Reset): @property def connection(self): return self._server_connection.connection def join_world(self): """Joins a world, returning the specs.""" response = self.connection.send(dm_env_rpc_pb2.JoinWorldRequest( world_name=self.world_name)) return response.specs @property def world_name(self): return self._world_name def setUp(self): self._server_connection = ServerConnection() response = self.connection.send(dm_env_rpc_pb2.CreateWorldRequest()) self._world_name = response.world_name super().setUp() def tearDown(self): super().tearDown() try: self.connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) self.connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self.world_name)) finally: self._server_connection.close() class CatchDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase): def setUp(self): self._server_connection = JoinedServerConnection() self._connection = self._server_connection.connection self.world_name = self._server_connection.world_name self._dm_env = dm_env_adaptor.DmEnvAdaptor( self._connection, self._server_connection.specs) super().setUp() def tearDown(self): super().tearDown() self._server_connection.close() def make_object_under_test(self): return self._dm_env class CatchTestSettings(absltest.TestCase): def setUp(self): super().setUp() self._server_connection = ServerConnection() self._connection = self._server_connection.connection self._world_name = None def tearDown(self): try: if self._world_name: self._connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) self._connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name)) finally: self._server_connection.close() super().tearDown() def test_reset_world_seed_setting(self): self._world_name = self._connection.send( dm_env_rpc_pb2.CreateWorldRequest( settings={'seed': tensor_utils.pack_tensor(1234)})).world_name self._connection.send( dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name)) step_response = self._connection.send(dm_env_rpc_pb2.StepRequest()) self._connection.send( dm_env_rpc_pb2.ResetWorldRequest( world_name=self._world_name, settings={'seed': tensor_utils.pack_tensor(1234)})) self.assertEqual(step_response, self._connection.send(dm_env_rpc_pb2.StepRequest())) def test_reset_seed_setting(self): self._world_name = self._connection.send( dm_env_rpc_pb2.CreateWorldRequest( settings={'seed': tensor_utils.pack_tensor(1234)})).world_name self._connection.send( dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name)) step_response = self._connection.send(dm_env_rpc_pb2.StepRequest()) self._connection.send( dm_env_rpc_pb2.ResetRequest( settings={'seed': tensor_utils.pack_tensor(1234)})) self.assertEqual(step_response, self._connection.send(dm_env_rpc_pb2.StepRequest())) class CatchTest(absltest.TestCase): def setUp(self): super().setUp() self._server_connection = ServerConnection() self._connection = self._server_connection.connection response = self._connection.send(dm_env_rpc_pb2.CreateWorldRequest()) self._world_name = response.world_name def tearDown(self): try: self._connection.send(dm_env_rpc_pb2.LeaveWorldRequest()) self._connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name)) finally: self._server_connection.close() super().tearDown() def test_can_reset_world_when_joined(self): self._connection.send( dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name)) self._connection.send(dm_env_rpc_pb2.ResetWorldRequest()) def test_cannot_reset_world_when_not_joined(self): with self.assertRaises(error.DmEnvRpcError): self._connection.send(dm_env_rpc_pb2.ResetWorldRequest()) def test_cannot_step_when_not_joined(self): with self.assertRaises(error.DmEnvRpcError): self._connection.send(dm_env_rpc_pb2.StepRequest()) def test_cannot_reset_when_not_joined(self): with self.assertRaises(error.DmEnvRpcError): self._connection.send(dm_env_rpc_pb2.ResetRequest()) def test_cannot_join_world_with_wrong_name(self): with self.assertRaises(error.DmEnvRpcError): self._connection.send( dm_env_rpc_pb2.JoinWorldRequest(world_name='wrong_name')) def test_cannot_create_world_when_world_exists(self): with self.assertRaises(error.DmEnvRpcError): self._connection.send(dm_env_rpc_pb2.CreateWorldRequest()) def test_cannot_join_when_no_world_exists(self): self._connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name)) with self.assertRaises(error.DmEnvRpcError): self._connection.send( dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name)) self._connection.send(dm_env_rpc_pb2.CreateWorldRequest()) def test_cannot_destroy_world_when_still_joined(self): self._connection.send( dm_env_rpc_pb2.JoinWorldRequest(world_name=self._world_name)) with self.assertRaises(error.DmEnvRpcError): self._connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name=self._world_name)) def test_cannot_destroy_world_with_wrong_name(self): with self.assertRaises(error.DmEnvRpcError): self._connection.send( dm_env_rpc_pb2.DestroyWorldRequest(world_name='wrong_name')) class CatchGameTest(absltest.TestCase): def setUp(self): super(CatchGameTest, self).setUp() self._rows = 3 self._cols = 3 self._game = catch_environment.CatchGame(self._rows, self._cols, 1) def test_draw_board_correct_initial_state(self): board = self._game.draw_board() self.assertEqual(board.shape, (3, 3)) def test_draw_board_ball_in_top_row(self): board = self._game.draw_board() self.assertIn(1, board[0]) def test_draw_board_bat_in_center_bottom_row(self): board = self._game.draw_board() self.assertTrue(np.array_equal([0, 1, 0], board[2])) def test_update_drops_ball(self): self._game.update(action=0) board = self._game.draw_board() self.assertNotIn(1, board[0]) self.assertIn(1, board[1]) def test_has_terminated_when_ball_hits_bottom(self): self.assertFalse(self._game.has_terminated()) self._game.update(action=0) self.assertFalse(self._game.has_terminated()) self._game.update(action=0) self.assertTrue(self._game.has_terminated()) def test_update_moves_paddle(self): self._game.update(action=1) board = self._game.draw_board() self.assertTrue(np.array_equal([0, 0, 1], board[2])) def test_cannot_update_game_when_has_terminated(self): self._game.update(action=0) self._game.update(action=0) with self.assertRaises(RuntimeError): self._game.update(action=0) def test_no_reward_when_not_terminated(self): self.assertEqual(0, self._game.reward()) self._game.update(action=0) self.assertEqual(0, self._game.reward()) self._game.update(action=0) def test_has_reward_when_terminated(self): self._game.update(action=0) self._game.update(action=0) self.assertNotEqual(0, self._game.reward()) if __name__ == '__main__': absltest.main()
dm_env_rpc-master
examples/catch_test.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Example Catch human agent.""" from concurrent import futures from absl import app import grpc import pygame import catch_environment from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_adaptor from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2_grpc _FRAMES_PER_SEC = 3 _FRAME_DELAY_MS = int(1000.0 // _FRAMES_PER_SEC) _BLACK = (0, 0, 0) _WHITE = (255, 255, 255) _ACTION_LEFT = -1 _ACTION_NOTHING = 0 _ACTION_RIGHT = 1 _ACTION_PADDLE = 'paddle' _OBSERVATION_REWARD = 'reward' _OBSERVATION_BOARD = 'board' def _draw_row(row_str, row_index, standard_font, window_surface): text = standard_font.render(row_str, True, _WHITE) text_rect = text.get_rect() text_rect.left = 50 text_rect.top = 30 + (row_index * 30) window_surface.blit(text, text_rect) def _render_window(board, window_surface, reward): """Render the game onto the window surface.""" standard_font = pygame.font.SysFont('Courier', 24) instructions_font = pygame.font.SysFont('Courier', 16) num_rows = board.shape[0] num_cols = board.shape[1] window_surface.fill(_BLACK) # Draw board. header = '* ' * (num_cols + 2) _draw_row(header, 0, standard_font, window_surface) for board_index in range(num_rows): row = board[board_index] row_str = '* ' for c in row: row_str += 'x ' if c == 1. else ' ' row_str += '* ' _draw_row(row_str, board_index + 1, standard_font, window_surface) _draw_row(header, num_rows + 1, standard_font, window_surface) # Draw footer. reward_str = 'Reward: {}'.format(reward) _draw_row(reward_str, num_rows + 3, standard_font, window_surface) instructions = ('Instructions: Left/Right arrow keys to move paddle, Escape ' 'to exit.') _draw_row(instructions, num_rows + 5, instructions_font, window_surface) def _start_server(): """Starts the Catch gRPC server.""" server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) servicer = catch_environment.CatchEnvironmentService() dm_env_rpc_pb2_grpc.add_EnvironmentServicer_to_server(servicer, server) port = server.add_secure_port('localhost:0', grpc.local_server_credentials()) server.start() return server, port def main(_): pygame.init() server, port = _start_server() with dm_env_rpc_connection.create_secure_channel_and_connect( f'localhost:{port}') as connection: env, world_name = dm_env_adaptor.create_and_join_world( connection, create_world_settings={}, join_world_settings={}) with env: window_surface = pygame.display.set_mode((800, 600), 0, 32) pygame.display.set_caption('Catch Human Agent') keep_running = True while keep_running: requested_action = _ACTION_NOTHING for event in pygame.event.get(): if event.type == pygame.QUIT: keep_running = False break elif event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: requested_action = _ACTION_LEFT elif event.key == pygame.K_RIGHT: requested_action = _ACTION_RIGHT elif event.key == pygame.K_ESCAPE: keep_running = False break actions = {_ACTION_PADDLE: requested_action} timestep = env.step(actions) board = timestep.observation[_OBSERVATION_BOARD] reward = timestep.reward _render_window(board, window_surface, reward) pygame.display.update() pygame.time.wait(_FRAME_DELAY_MS) connection.send(dm_env_rpc_pb2.DestroyWorldRequest(world_name=world_name)) server.stop(None) if __name__ == '__main__': app.run(main)
dm_env_rpc-master
examples/catch_human_agent.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Catch example implemented as a gRPC EnvironmentServicer.""" import numpy as np from google.rpc import code_pb2 from google.rpc import status_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import dm_env_rpc_pb2_grpc from dm_env_rpc.v1 import spec_manager from dm_env_rpc.v1 import tensor_spec_utils from dm_env_rpc.v1 import tensor_utils _ACTION_PADDLE = 'paddle' _DEFAULT_ACTION = 0 _INITIAL_SEED = 1 _NUM_ROWS = 10 _NUM_COLUMNS = 10 _OBSERVATION_REWARD = 'reward' _OBSERVATION_BOARD = 'board' _WORLD_NAME = 'catch' _VALID_ACTIONS = [-1, 0, 1] _VALID_CREATE_AND_RESET_SETTINGS = ['seed'] class CatchGame(object): """Simple Catch game environment. The agent must move a paddle to intercept falling balls. Falling balls only move downwards on the column they are in. The observation is an array shape (rows, columns), with binary values: zero if a space is empty; 1 if it contains the paddle or a ball. The actions are discrete, and there are three available: stay, move left and move right. The rewards adjusted when the ball reaches the bottom of the screen. """ def __init__(self, rows, columns, seed): """Initializes a new Catch environment. Args: rows: number of rows. columns: number of columns. seed: random seed for the RNG. """ self._rows = rows self._columns = columns self._ball_x = np.random.RandomState(seed).randint(self._columns) self._ball_y = 0 self._paddle_x = self._columns // 2 self._paddle_y = self._rows - 1 def draw_board(self): """Draw the board into a numpy array and return it.""" board = np.zeros((self._rows, self._columns), dtype=np.float32) board[self._ball_y, self._ball_x] = 1. board[self._paddle_y, self._paddle_x] = 1. return board def update(self, action): """Updates the environment according to the action.""" if self.has_terminated(): raise RuntimeError('Trying to update terminated environment') # Move the paddle. self._paddle_x = np.clip(self._paddle_x + action, 0, self._columns - 1) # Drop the ball. self._ball_y += 1 def has_terminated(self): return self._ball_y == self._paddle_y def reward(self): """Provides the incremental reward for the current frame.""" if self.has_terminated(): return 1. if self._paddle_x == self._ball_x else -1. else: return 0 def _check_message_type(env, is_joined, message_type): """Checks the message type is valid given the environment's world state.""" if not env: if message_type not in ['create_world', 'leave_world']: raise RuntimeError('Cannot {} when no world exists.'.format(message_type)) else: if message_type == 'create_world': raise RuntimeError( 'This example does not support creating multiple worlds.') if is_joined: if message_type == 'destroy_world': raise RuntimeError('Cannot destroy world when still joined.') else: if message_type == 'reset_world': raise RuntimeError( 'This example does not support reset_world when not joined.') elif message_type in ['step', 'reset']: raise RuntimeError( 'Cannot {} when world not joined.'.format(message_type)) def _observation_spec(): """Returns the observation spec.""" return { 1: dm_env_rpc_pb2.TensorSpec( name=_OBSERVATION_BOARD, shape=[_NUM_ROWS, _NUM_COLUMNS], dtype=dm_env_rpc_pb2.FLOAT), 2: dm_env_rpc_pb2.TensorSpec( name=_OBSERVATION_REWARD, dtype=dm_env_rpc_pb2.FLOAT) } def _action_spec(): """Returns the action spec.""" paddle_action_spec = dm_env_rpc_pb2.TensorSpec( dtype=dm_env_rpc_pb2.INT8, name=_ACTION_PADDLE) tensor_spec_utils.set_bounds( paddle_action_spec, minimum=np.min(_VALID_ACTIONS), maximum=np.max(_VALID_ACTIONS)) return {1: paddle_action_spec} def _validate_settings(settings, valid_settings): """"Validate the provided settings with list of valid setting keys.""" unrecognized_settings = [ setting for setting in settings if setting not in valid_settings ] if unrecognized_settings: raise ValueError('Unrecognized settings provided! Invalid settings:' f' {unrecognized_settings}') class CatchGameFactory(object): """Factory for creating new CatchGame instances.""" def __init__(self, initial_seed): self._seed = initial_seed def new_game(self): env = CatchGame(rows=_NUM_ROWS, columns=_NUM_COLUMNS, seed=self._seed) self._seed += 1 return env def reset_seed(self, seed): self._seed = seed class CatchEnvironmentService(dm_env_rpc_pb2_grpc.EnvironmentServicer): """Runs the Catch game as a gRPC EnvironmentServicer.""" def Process(self, request_iterator, context): """Processes incoming EnvironmentRequests. For each EnvironmentRequest the internal message is extracted and handled. The response for that message is then placed in a EnvironmentResponse which is returned to the client. An error status will be returned if an unknown message type is received or if the message is invalid for the current world state. Args: request_iterator: Message iterator provided by gRPC. context: Context provided by gRPC. Yields: EnvironmentResponse: Response for each incoming EnvironmentRequest. """ env_factory = CatchGameFactory(_INITIAL_SEED) env = None is_joined = False skip_next_frame = False action_manager = spec_manager.SpecManager(_action_spec()) observation_manager = spec_manager.SpecManager(_observation_spec()) for request in request_iterator: environment_response = dm_env_rpc_pb2.EnvironmentResponse() try: message_type = request.WhichOneof('payload') internal_request = getattr(request, message_type) _check_message_type(env, is_joined, message_type) if message_type == 'create_world': _validate_settings( request.create_world.settings, valid_settings=_VALID_CREATE_AND_RESET_SETTINGS) seed = request.create_world.settings.get('seed', None) if seed is not None: env_factory.reset_seed(tensor_utils.unpack_tensor(seed)) env = env_factory.new_game() skip_next_frame = True response = dm_env_rpc_pb2.CreateWorldResponse(world_name=_WORLD_NAME) elif message_type == 'join_world': _validate_settings(request.join_world.settings, valid_settings=[]) if is_joined: raise RuntimeError( f'Tried to join world "{internal_request.world_name}" but ' f'already joined to world "{_WORLD_NAME}"') if internal_request.world_name != _WORLD_NAME: raise RuntimeError( f'Tried to join world "{internal_request.world_name}" but the ' f'only supported world is "{_WORLD_NAME}"') response = dm_env_rpc_pb2.JoinWorldResponse() for uid, action in _action_spec().items(): response.specs.actions[uid].CopyFrom(action) for uid, observation in _observation_spec().items(): response.specs.observations[uid].CopyFrom(observation) is_joined = True elif message_type == 'step': # We need to skip all actions after creating or resetting the # environment. if skip_next_frame: skip_next_frame = False else: unpacked_actions = action_manager.unpack(internal_request.actions) paddle_action = unpacked_actions.get(_ACTION_PADDLE, _DEFAULT_ACTION) if paddle_action not in _VALID_ACTIONS: raise RuntimeError( f'Invalid paddle action value: "{paddle_action}"!') env.update(paddle_action) response = dm_env_rpc_pb2.StepResponse() packed_observations = observation_manager.pack({ _OBSERVATION_BOARD: env.draw_board(), _OBSERVATION_REWARD: env.reward() }) for requested_observation in internal_request.requested_observations: response.observations[requested_observation].CopyFrom( packed_observations[requested_observation]) if env.has_terminated(): response.state = dm_env_rpc_pb2.EnvironmentStateType.TERMINATED else: response.state = dm_env_rpc_pb2.EnvironmentStateType.RUNNING if env.has_terminated(): env = env_factory.new_game() skip_next_frame = True elif message_type == 'reset': _validate_settings( request.reset.settings, valid_settings=_VALID_CREATE_AND_RESET_SETTINGS) seed = request.reset.settings.get('seed', None) if seed is not None: env_factory.reset_seed(tensor_utils.unpack_tensor(seed)) env = env_factory.new_game() skip_next_frame = True response = dm_env_rpc_pb2.ResetResponse() for uid, action in _action_spec().items(): response.specs.actions[uid].CopyFrom(action) for uid, observation in _observation_spec().items(): response.specs.observations[uid].CopyFrom(observation) elif message_type == 'reset_world': _validate_settings( request.reset_world.settings, valid_settings=_VALID_CREATE_AND_RESET_SETTINGS) seed = request.reset_world.settings.get('seed', None) if seed is not None: env_factory.reset_seed(tensor_utils.unpack_tensor(seed)) env = env_factory.new_game() skip_next_frame = True response = dm_env_rpc_pb2.ResetWorldResponse() elif message_type == 'leave_world': is_joined = False response = dm_env_rpc_pb2.LeaveWorldResponse() elif message_type == 'destroy_world': if internal_request.world_name != _WORLD_NAME: raise RuntimeError( 'Tried to destroy world "{}" but we only support world "{}"' .format(internal_request.world_name, _WORLD_NAME)) env = None response = dm_env_rpc_pb2.DestroyWorldResponse() else: raise RuntimeError('Unhandled message: {}'.format(message_type)) getattr(environment_response, message_type).CopyFrom(response) except Exception as e: # pylint: disable=broad-except environment_response.error.CopyFrom( status_pb2.Status(code=code_pb2.INTERNAL, message=str(e))) yield environment_response
dm_env_rpc-master
examples/catch_environment.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup for pip package.""" import os import setuptools _CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) def _get_version(): with open(os.path.join(_CURRENT_DIR, "kfac_jax", "__init__.py")) as fp: for line in fp: if line.startswith("__version__") and "=" in line: version = line[line.find("=") + 1:].strip(" '\"\n") if version: return version raise ValueError("`__version__` not defined in `kfac_jax/__init__.py`") def _parse_requirements(requirements_txt_path): """Parses requirements.txt and extracts all required packages.""" with open(requirements_txt_path) as f: packages = list() for line in f: if not (line.isspace() or line.startswith("#")): name = line.rstrip() if name.startswith("git+"): prefix = name.split("=")[-1] name = f"{prefix} @ {name}" packages.append(name) return packages _VERSION = _get_version() setuptools.setup( name="kfac-jax", version=_VERSION, url="https://github.com/google-deepmind/kfac-jax", license="Apache 2.0", author="DeepMind", description=( "A Jax package for approximate curvature estimation and " "optimization using KFAC." ), long_description=open(os.path.join(_CURRENT_DIR, "README.md")).read(), long_description_content_type="text/markdown", author_email="[email protected]", # Contained modules and scripts. packages=setuptools.find_namespace_packages(exclude=["tests", "examples"]), install_requires=_parse_requirements( os.path.join(_CURRENT_DIR, "requirements.txt") ), tests_require=_parse_requirements( os.path.join(_CURRENT_DIR, "requirements_tests.txt") ), extras_require={ "tests": _parse_requirements( os.path.join(_CURRENT_DIR, "requirements_tests.txt") ), }, requires_python=">=3.8", include_package_data=True, zip_safe=False, # PyPI package information. classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules", ], )
kfac-jax-main
setup.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """kfac-jax public APIs.""" from kfac_jax._src import curvature_blocks from kfac_jax._src import curvature_estimator from kfac_jax._src import layers_and_loss_tags from kfac_jax._src import loss_functions from kfac_jax._src import optimizer from kfac_jax._src import patches_second_moment from kfac_jax._src import tag_graph_matcher from kfac_jax._src import tracer from kfac_jax._src import utils __version__ = "0.0.5" # Patches Second Moments patches_moments = patches_second_moment.patches_moments patches_moments_explicit = patches_second_moment.patches_moments_explicit # Layers and loss tags LossTag = layers_and_loss_tags.LossTag LayerTag = layers_and_loss_tags.LayerTag register_generic = layers_and_loss_tags.register_generic register_dense = layers_and_loss_tags.register_dense register_conv2d = layers_and_loss_tags.register_conv2d register_scale_and_shift = layers_and_loss_tags.register_scale_and_shift # Tag graph matcher auto_register_tags = tag_graph_matcher.auto_register_tags # Tracer ProcessedJaxpr = tracer.ProcessedJaxpr loss_tags_vjp = tracer.loss_tags_vjp loss_tags_jvp = tracer.loss_tags_jvp loss_tags_hvp = tracer.loss_tags_hvp layer_tags_vjp = tracer.layer_tags_vjp # Loss functions LossFunction = loss_functions.LossFunction NegativeLogProbLoss = loss_functions.NegativeLogProbLoss DistributionNegativeLogProbLoss = loss_functions.DistributionNegativeLogProbLoss NormalMeanNegativeLogProbLoss = loss_functions.NormalMeanNegativeLogProbLoss NormalMeanVarianceNegativeLogProbLoss = ( loss_functions.NormalMeanVarianceNegativeLogProbLoss) MultiBernoulliNegativeLogProbLoss = ( loss_functions.MultiBernoulliNegativeLogProbLoss) CategoricalLogitsNegativeLogProbLoss = ( loss_functions.CategoricalLogitsNegativeLogProbLoss) OneHotCategoricalLogitsNegativeLogProbLoss = ( loss_functions.OneHotCategoricalLogitsNegativeLogProbLoss) register_sigmoid_cross_entropy_loss = ( loss_functions.register_sigmoid_cross_entropy_loss) register_multi_bernoulli_predictive_distribution = ( loss_functions.register_multi_bernoulli_predictive_distribution) register_softmax_cross_entropy_loss = ( loss_functions.register_softmax_cross_entropy_loss) register_categorical_predictive_distribution = ( loss_functions.register_categorical_predictive_distribution) register_squared_error_loss = loss_functions.register_squared_error_loss register_normal_predictive_distribution = ( loss_functions.register_normal_predictive_distribution) # Curvature blocks CurvatureBlock = curvature_blocks.CurvatureBlock ScaledIdentity = curvature_blocks.ScaledIdentity Diagonal = curvature_blocks.Diagonal Full = curvature_blocks.Full KroneckerFactored = curvature_blocks.KroneckerFactored TwoKroneckerFactored = curvature_blocks.TwoKroneckerFactored NaiveDiagonal = curvature_blocks.NaiveDiagonal NaiveFull = curvature_blocks.NaiveFull DenseDiagonal = curvature_blocks.DenseDiagonal DenseFull = curvature_blocks.DenseFull DenseTwoKroneckerFactored = curvature_blocks.DenseTwoKroneckerFactored Conv2DDiagonal = curvature_blocks.Conv2DDiagonal Conv2DFull = curvature_blocks.Conv2DFull Conv2DTwoKroneckerFactored = curvature_blocks.Conv2DTwoKroneckerFactored ScaleAndShiftDiagonal = curvature_blocks.ScaleAndShiftDiagonal ScaleAndShiftFull = curvature_blocks.ScaleAndShiftFull set_max_parallel_elements = curvature_blocks.set_max_parallel_elements get_max_parallel_elements = curvature_blocks.get_max_parallel_elements set_default_eigen_decomposition_threshold = ( curvature_blocks.set_default_eigen_decomposition_threshold) get_default_eigen_decomposition_threshold = ( curvature_blocks.get_default_eigen_decomposition_threshold) # Curvature estimators CurvatureEstimator = curvature_estimator.CurvatureEstimator BlockDiagonalCurvature = curvature_estimator.BlockDiagonalCurvature ExplicitExactCurvature = curvature_estimator.ExplicitExactCurvature ImplicitExactCurvature = curvature_estimator.ImplicitExactCurvature set_default_tag_to_block_ctor = ( curvature_estimator.set_default_tag_to_block_ctor) get_default_tag_to_block_ctor = ( curvature_estimator.get_default_tag_to_block_ctor) # Optimizers Optimizer = optimizer.Optimizer __all__ = ( # Modules "utils", "patches_second_moment", "layers_and_loss_tags", "loss_functions", "tag_graph_matcher", "tracer", "curvature_blocks", "curvature_estimator", "optimizer", # Patches second moments "patches_moments", "patches_moments_explicit", # Layer and loss tags "LossTag", "LayerTag", "register_generic", "register_dense", "register_conv2d", "register_scale_and_shift", # Tag graph matcher "auto_register_tags", # Tracer "ProcessedJaxpr", "loss_tags_vjp", "loss_tags_jvp", "loss_tags_hvp", "layer_tags_vjp", # Loss functions "LossFunction", "NegativeLogProbLoss", "DistributionNegativeLogProbLoss", "NormalMeanNegativeLogProbLoss", "NormalMeanVarianceNegativeLogProbLoss", "MultiBernoulliNegativeLogProbLoss", "CategoricalLogitsNegativeLogProbLoss", "OneHotCategoricalLogitsNegativeLogProbLoss", "register_sigmoid_cross_entropy_loss", "register_multi_bernoulli_predictive_distribution", "register_softmax_cross_entropy_loss", "register_categorical_predictive_distribution", "register_squared_error_loss", "register_normal_predictive_distribution", # Curvature blocks "CurvatureBlock", "ScaledIdentity", "Diagonal", "Full", "KroneckerFactored", "TwoKroneckerFactored", "NaiveDiagonal", "NaiveFull", "DenseDiagonal", "DenseFull", "DenseTwoKroneckerFactored", "Conv2DDiagonal", "Conv2DFull", "Conv2DTwoKroneckerFactored", "ScaleAndShiftDiagonal", "ScaleAndShiftFull", "set_max_parallel_elements", "get_max_parallel_elements", "set_default_eigen_decomposition_threshold", "get_default_eigen_decomposition_threshold", # Estimators "CurvatureEstimator", "BlockDiagonalCurvature", "ExplicitExactCurvature", "ImplicitExactCurvature", "set_default_tag_to_block_ctor", "get_default_tag_to_block_ctor", # Optimizers "Optimizer", ) # _________________________________________ # / Please don't use symbols in `_src` they \ # \ are not part of the KFAC Jax public API./ # ----------------------------------------- # \ ^__^ # \ (oo)\_______ # (__)\ )\/\ # ||----w | # || || # try: del _src # pylint: disable=undefined-variable except NameError: pass
kfac-jax-main
kfac_jax/__init__.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """"K-FAC loss functions objects, tags and registration functions.""" import abc from typing import Optional, Sequence, Tuple import distrax import jax import jax.numpy as jnp from kfac_jax._src import layers_and_loss_tags as tags from kfac_jax._src import utils Array = utils.Array Numeric = utils.Numeric PRNGKey = utils.PRNGKey Shape = utils.Shape DType = utils.DType class LossFunction(utils.Finalizable): """Abstract base class for loss functions. Note that unlike typical loss functions used in neural networks these are neither summed nor averaged over the batch and the output of evaluate() will not be a scalar. It is up to the user to then to correctly manipulate them as needed. """ def __init__(self, weight: Numeric): """Initializes the loss instance. Args: weight: The relative weight attributed to the loss. """ if not isinstance(weight, (int, float)): if not isinstance(weight, Array) or weight.size > 1: raise ValueError("`weight` must be a scalar value.") super().__init__() self._weight = weight self.finalize() @property def dtype(self) -> DType: return self.parameter_dependants[0].dtype @property def weight(self) -> Numeric: """The relative weight of the loss.""" return self._weight @property @abc.abstractmethod def targets(self) -> Optional[Array]: """The targets (if present) used for evaluating the loss.""" @property @abc.abstractmethod def parameter_dependants(self) -> Tuple[Array, ...]: """All the parameter dependent arrays of the loss.""" @property def num_parameter_dependants(self) -> int: """Number of parameter dependent arrays of the loss.""" return len(self.parameter_dependants) @property @abc.abstractmethod def parameter_independants(self) -> Tuple[Numeric, ...]: """All the parameter independent arrays of the loss.""" @property def num_parameter_independants(self) -> int: """Number of parameter independent arrays of the loss.""" return len(self.parameter_independants) @abc.abstractmethod def copy_with_different_inputs( self, parameter_dependants: Sequence[Array], ) -> "LossFunction": """Creates a copy of the loss function object, but with different inputs.""" def evaluate( self, targets: Optional[Array] = None, coefficient_mode: str = "regular", ) -> Array: """Evaluates the loss function on the targets. Args: targets: The targets, on which to evaluate the loss. If this is set to ``None`` will use ``self.targets`` instead. coefficient_mode: Specifies how to use the relative weight of the loss in the returned value. There are three options: 1. 'regular' - returns ``self.weight * loss(targets)`` 2. 'sqrt' - returns ``sqrt(self.weight) * loss(targets)`` 3. 'off' - returns ``loss(targets)`` Returns: The value of the loss scaled appropriately by ``self.weight`` according to the coefficient mode. Raises: ValueError if both ``targets`` and ``self.targets`` are ``None``. """ if targets is None and self.targets is None: raise ValueError("Cannot evaluate losses with unspecified targets.") elif targets is None: targets = self.targets if coefficient_mode == "regular": multiplier = self.weight elif coefficient_mode == "sqrt": multiplier = jnp.sqrt(self.weight) elif coefficient_mode == "off": multiplier = 1.0 else: raise ValueError(f"Unrecognized coefficient_mode={coefficient_mode}.") return self._evaluate(targets) * multiplier @abc.abstractmethod def _evaluate(self, targets: Array) -> Array: """Evaluates the value of the loss, disregarding the relative weight.""" def grad_of_evaluate( self, targets: Optional[Array], coefficient_mode: str, ) -> Tuple[Array, ...]: """Evaluates the gradient of the loss function, w.r.t. its inputs. Args: targets: The targets at which to evaluate the loss. If this is ``None`` will use ``self.targets`` instead. coefficient_mode: The coefficient mode to use for evaluation. See ``self.evaluate`` for more details. Returns: The gradient of the loss function w.r.t. its inputs, at the provided targets. """ def evaluate_sum(inputs: Sequence[Array]) -> Array: """Evaluates the loss summed over all axis, including batch etc.""" instance = self.copy_with_different_inputs(inputs) return jnp.sum(instance.evaluate(targets, coefficient_mode)) return jax.grad(evaluate_sum)(self.parameter_dependants) def multiply_ggn( self, vector: Sequence[Array], ) -> Tuple[Array, ...]: """Right-multiplies a vector by the GGN of the loss function. Here the GGN is the Generalized Gauss-Newton matrix (whose definition is somewhat flexible) of the loss function with respect to its inputs. Args: vector: The vector to multiply. Must have the same shape(s) as ``self.inputs``. Returns: The vector right-multiplied by the GGN. Will have the same shape(s) as ``self.inputs``. """ return utils.scalar_mul(self.multiply_ggn_unweighted(vector), self.weight) @abc.abstractmethod def multiply_ggn_unweighted( self, vector: Sequence[Array], ) -> Tuple[Array, ...]: """Unweighted version of :func:`~LossFunction.multiply_ggn`.""" def multiply_ggn_factor( self, vector: Array, ) -> Tuple[Array, ...]: """Right-multiplies a vector by a factor B of the GGN. Here the GGN is the Generalized Gauss-Newton matrix (whose definition is somewhat flexible) of the loss function with respect to its inputs. Typically this will be block-diagonal across different cases in the batch, since the loss function is typically summed across cases. Note that B can be any matrix satisfying ``B * B^T = G`` where ``G`` is the GGN, but will agree with the one used in the other methods of this class. Args: vector: The vector to multiply. Must be of the shape(s) given by 'self.ggn_factor_inner_shape'. Returns: The vector right-multiplied by B. Will be of the same shape(s) as ``self.inputs``. """ return utils.scalar_mul( self.multiply_ggn_factor_unweighted(vector), jnp.sqrt(self.weight)) @abc.abstractmethod def multiply_ggn_factor_unweighted( self, vector: Array ) -> Tuple[Array, ...]: """Unweighted version of :func:`~LossFunction.multiply_ggn_factor`.""" def multiply_ggn_factor_transpose( self, vector: Sequence[Array], ) -> Array: """Right-multiplies a vector by the transpose of a factor B of the GGN. Here the GGN is the Generalized Gauss-Newton matrix (whose definition is somewhat flexible) of the loss function with respect to its inputs. Typically this will be block-diagonal across different cases in the batch, since the loss function is typically summed across cases. Note that B can be any matrix satisfying ``B * B^T = G`` where G is the GGN, but will agree with the one used in the other methods of this class. Args: vector: The vector to multiply. Must have the same shape(s) as ``self.inputs``. Returns: The vector right-multiplied by B^T. Will be of the shape(s) given by ``self.ggn_factor_inner_shape``. """ return utils.scalar_mul( self.multiply_ggn_factor_transpose_unweighted(vector), jnp.sqrt(self.weight)) @abc.abstractmethod def multiply_ggn_factor_transpose_unweighted( self, vector: Sequence[Array], ) -> Array: """Unweighted version of :func:`~LossFunction.multiply_ggn_factor_transpose`.""" def multiply_ggn_factor_replicated_one_hot( self, index: Sequence[int], ) -> Tuple[Array, ...]: """Right-multiplies a replicated-one-hot vector by a factor B of the GGN. Here the GGN is the Generalized Gauss-Newton matrix (whose definition is somewhat flexible) of the loss function with respect to its inputs. Typically this will be block-diagonal across different cases in the batch, since the loss function is typically summed across cases. A replicated-one-hot vector means a tensor which, for each slice along the batch dimension (assumed to be dimension 0), is 1.0 in the entry corresponding to the given index and 0 elsewhere. Note that B can be any matrix satisfying ``B * B^T = G`` where G is the GGN, but will agree with the one used in the other methods of this class. Args: index: A tuple representing in the index of the entry in each slice that is 1.0. Note that len(index) must be equal to the number of elements of the ``ggn_factor_inner_shape`` tensor minus one. Returns: The vector right-multiplied by B^T. Will be of the same shape(s) as the ``inputs`` property. """ return utils.scalar_mul( self.multiply_ggn_factor_replicated_one_hot_unweighted(index), jnp.sqrt(self.weight)) @abc.abstractmethod def multiply_ggn_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array, ...]: """Unweighted version of :func:`~LossFunction.multiply_ggn_factor_replicated_one_hot`.""" @property @abc.abstractmethod def ggn_factor_inner_shape(self) -> Shape: """The shape of the array returned by `self.multiply_ggn_factor`.""" class NegativeLogProbLoss(LossFunction): """Base class for loss functions that represent negative log-probability.""" @property def parameter_dependants(self) -> Tuple[Array, ...]: return self.params @property @abc.abstractmethod def params(self) -> Tuple[Array, ...]: """Parameters to the underlying distribution.""" def multiply_fisher( self, vector: Sequence[Array], ) -> Tuple[Array, ...]: """Right-multiplies a vector by the Fisher. Args: vector: The vector to multiply. Must have the same shape(s) as ``self.inputs``. Returns: The vector right-multiplied by the Fisher. Will have of the same shape(s) as ``self.inputs``. """ return utils.scalar_mul( self.multiply_fisher_unweighted(vector), self.weight) @abc.abstractmethod def multiply_fisher_unweighted( self, vector: Sequence[Array], ) -> Tuple[Array, ...]: """Unweighted version of :func:`~LossFunction.multiply_fisher`.""" def multiply_fisher_factor( self, vector: Array, ) -> Tuple[Array, ...]: """Right-multiplies a vector by a factor B of the Fisher. Here the Fisher is the Fisher information matrix (i.e. expected outer- product of gradients) with respect to the parameters of the underlying probability distribution (whose log-prob defines the loss). Typically this will be block-diagonal across different cases in the batch, since the distribution is usually (but not always) conditionally iid across different cases. Note that B can be any matrix satisfying ``B * B^T = F`` where F is the Fisher, but will agree with the one used in the other methods of this class. Args: vector: The vector to multiply. Must have the same shape(s) as ``self.fisher_factor_inner_shape``. Returns: The vector right-multiplied by B. Will have the same shape(s) as ``self.inputs``. """ return utils.scalar_mul( self.multiply_fisher_factor_unweighted(vector), jnp.sqrt(self.weight)) @abc.abstractmethod def multiply_fisher_factor_unweighted( self, vector: Array, ) -> Tuple[Array, ...]: """Unweighted version of :func:`~LossFunction.multiply_fisher_factor`.""" def multiply_fisher_factor_transpose( self, vector: Sequence[Array], ) -> Array: """Right-multiplies a vector by the transpose of a factor B of the Fisher. Here the Fisher is the Fisher information matrix (i.e. expected outer- product of gradients) with respect to the parameters of the underlying probability distribution (whose log-prob defines the loss). Typically this will be block-diagonal across different cases in the batch, since the distribution is usually (but not always) conditionally iid across different cases. Note that B can be any matrix satisfying ``B * B^T = F`` where F is the Fisher, but will agree with the one used in the other methods of this class. Args: vector: The vector to multiply. Must have the same shape(s) as ``self.inputs``. Returns: The vector right-multiplied by B^T. Will have the shape given by ``self.fisher_factor_inner_shape``. """ return utils.scalar_mul( self.multiply_fisher_factor_transpose_unweighted(vector), jnp.sqrt(self.weight)) @abc.abstractmethod def multiply_fisher_factor_transpose_unweighted( self, vector: Sequence[Array], ) -> Array: """Unweighted version of :func:`~LossFunction.multiply_fisher_factor_transpose`.""" def multiply_fisher_factor_replicated_one_hot( self, index: Sequence[int], ) -> Tuple[Array, ...]: """Right-multiplies a replicated-one-hot vector by a factor B of the Fisher. Here the Fisher is the Fisher information matrix (i.e. expected outer- product of gradients) with respect to the parameters of the underlying probability distribution (whose log-prob defines the loss). Typically this will be block-diagonal across different cases in the batch, since the distribution is usually (but not always) conditionally iid across different cases. A replicated-one-hot vector means a tensor which, for each slice along the batch dimension (assumed to be dimension 0), is 1.0 in the entry corresponding to the given index and 0 elsewhere. Note that B can be any matrix satisfying ``B * B^T = H`` where H is the Fisher, but will agree with the one used in the other methods of this class. Args: index: A tuple representing in the index of the entry in each slice that is 1.0. Note that len(index) must be equal to the number of elements of the ``fisher_factor_inner_shape`` tensor minus one. Returns: The vector right-multiplied by B. Will have the same shape(s) as ``self.inputs``. """ return utils.scalar_mul( self.multiply_fisher_factor_replicated_one_hot_unweighted(index), jnp.sqrt(self.weight)) @abc.abstractmethod def multiply_fisher_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array, ...]: """Unweighted version of :func:`~LossFunction.multiply_fisher_factor_replicated_one_hot`.""" @property @abc.abstractmethod def fisher_factor_inner_shape(self) -> Shape: """The shape of the array returned by :func:`~LossFunction.multiply_fisher_factor`.""" @abc.abstractmethod def sample(self, rng: PRNGKey) -> Array: """Sample ``targets`` from the underlying distribution.""" def grad_of_evaluate_on_sample( self, rng: Array, coefficient_mode: str, ) -> Tuple[Array, ...]: """Evaluates the gradient of the log probability on a random sample. Args: rng: Jax PRNG key for sampling. coefficient_mode: The coefficient mode to use for evaluation. Returns: The gradient of the log probability of targets sampled from the distribution. """ return self.grad_of_evaluate(self.sample(rng), coefficient_mode) class NaturalParamsNegativeLogProbLoss(NegativeLogProbLoss, abc.ABC): """Negative log-probability loss, whose inputs are natural parameters. We will take the GGN of the loss to be the Fisher associated with the distribution, which also happens to be equal to the Hessian for this class of loss functions. See here: https://arxiv.org/abs/1412.1193 Natural parameters are defined for exponential-family models. See for example `wikipedia <https://en.wikipedia.org/wiki/Exponential_family>`__. """ def multiply_ggn_unweighted( self, vector: Sequence[Array], ) -> Tuple[Array, ...]: return self.multiply_fisher_unweighted(vector) def multiply_ggn_factor_unweighted( self, vector: Array, ) -> Tuple[Array, ...]: return self.multiply_fisher_factor_unweighted(vector) def multiply_ggn_factor_transpose_unweighted( self, vector: Sequence[Array], ) -> Array: return self.multiply_fisher_factor_transpose_unweighted(vector) def multiply_ggn_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array, ...]: return self.multiply_fisher_factor_replicated_one_hot_unweighted(index) @property def ggn_factor_inner_shape(self) -> Shape: return self.fisher_factor_inner_shape class DistributionNegativeLogProbLoss(NegativeLogProbLoss): """Negative log-probability loss that uses a Distrax distribution.""" @property @abc.abstractmethod def dist(self) -> distrax.Distribution: """The underlying Distrax distribution.""" def _evaluate(self, targets: Array) -> Array: # keeps leading dims intact return -self.dist.log_prob(targets) # pytype: disable=bad-return-type def sample(self, rng: PRNGKey) -> Array: return self.dist.sample(seed=rng) # pytype: disable=bad-return-type @property def fisher_factor_inner_shape(self) -> Shape: return jax.eval_shape( lambda: self.sample(rng=jax.random.PRNGKey(0))).shape class NormalMeanNegativeLogProbLoss(DistributionNegativeLogProbLoss, NaturalParamsNegativeLogProbLoss): """Loss log prob loss for a normal distribution parameterized by a mean vector. Note that the covariance is treated as the identity divided by 2. Also note that the Fisher for such a normal distribution with respect the mean parameter is given by: F = (1 / variance) * I See for example https://www.ii.pwr.edu.pl/~tomczak/PDF/[JMT]Fisher_inf.pdf. """ def __init__( self, mean: Array, targets: Optional[Array] = None, variance: Numeric = 0.5, weight: Numeric = 1.0, ): """Initializes the loss instance. Args: mean: The mean of the normal distribution. targets: Optional targets to use for evaluation. variance: The scalar variance of the normal distribution. weight: The relative weight of the loss. """ if not isinstance(variance, (int, float)): if not isinstance(variance, Array) or variance.size > 1: raise ValueError("`variance` must be either a python scalar or a " "scalar array.") self._mean = mean self._targets = targets self._variance = variance super().__init__(weight=weight) @property def mean(self) -> Array: return self._mean @property def variance(self) -> Numeric: return self._variance @property def targets(self) -> Optional[Array]: return self._targets @property def parameter_independants(self) -> Tuple[Numeric, ...]: arrays = (self.variance, self.weight) if self._targets is not None: arrays = (self._targets,) + arrays return arrays @property def dist(self) -> distrax.MultivariateNormalDiag: scale_diag = jnp.full_like(self.mean, jnp.sqrt(self.variance)) return distrax.MultivariateNormalDiag(loc=self.mean, scale_diag=scale_diag) @property def params(self) -> Tuple[Array]: return (self.mean,) def copy_with_different_inputs( self, parameter_dependants: Sequence[Array], ) -> "NormalMeanNegativeLogProbLoss": """Creates the same :class:`~LossFunction` object, but with different inputs. Args: parameter_dependants: The inputs to use to the constructor of a class instance. This must be a sequence of length 1. Returns: An instance of :class:`~NormalMeanNegativeLogPorLoss` with the provided inputs. Raises: A ValueError if the ``inputs`` is a sequence of different length than 1. """ [mean] = parameter_dependants return NormalMeanNegativeLogProbLoss( mean=mean, targets=self._targets, variance=self._variance, weight=self._weight, ) def multiply_fisher_unweighted( self, vector: Sequence[Array] ) -> Tuple[Array]: return (vector[0] / self._variance,) def multiply_fisher_factor_unweighted( self, vector: Array, ) -> Tuple[Array]: return (vector / jnp.sqrt(self._variance),) def multiply_fisher_factor_transpose_unweighted( self, vector: Sequence[Array], ) -> Array: # it's symmetric return self.multiply_fisher_factor_unweighted(vector[0])[0] def multiply_fisher_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array]: index = index[0] ones_slice = jnp.ones([self._mean.shape[0]])[..., None] output_slice = ones_slice / jnp.sqrt(self._variance) return (insert_slice_in_zeros(output_slice, 1, self._mean.shape[1], index),) class NormalMeanVarianceNegativeLogProbLoss(DistributionNegativeLogProbLoss): """Negative log prob loss for a normal distribution with mean and variance. This class parameterizes a multivariate normal distribution with n independent dimensions. Unlike :class:`~NormalMeanNegativeLogProbLoss`, this class does not assume the variance is held constant. The Fisher Information for n = 1 is given by: F = [[1 / variance, 0], [ 0, 0.5 / variance^2]] where the parameters of the distribution are concatenated into a single vector as ``[mean, variance]``. For n > 1, the mean parameter vector is concatenated with the variance parameter vector. For further details checkout the Wikipedia `page <https://en.wikipedia.org/wiki/Fisher_information#Multivariate_normal_distribution>`__. """ def __init__( self, mean: Array, variance: Array, targets: Optional[Array] = None, weight: Numeric = 1.0, ): """Initializes the loss instance. Args: mean: The mean of the normal distribution. variance: The variance of the normal distribution. targets: Optional targets to use for evaluation. weight: The relative weight of the loss. """ if mean.ndim != 2: raise ValueError("Only 2D mean array is supported.") if variance.ndim != 2: raise ValueError("Only 2D variance array is supported.") self._mean = mean self._variance = variance self._targets = targets super().__init__(weight=weight) @property def targets(self) -> Optional[Array]: return self._targets @property def parameter_independants(self) -> Tuple[Numeric, ...]: arrays = (self.weight,) if self._targets is not None: arrays = (self._targets,) + arrays return arrays @property def dist(self) -> distrax.MultivariateNormalDiag: return distrax.MultivariateNormalDiag( loc=self._mean, scale_diag=jnp.sqrt(self._variance)) @property def params(self) -> Tuple[Array, Array]: return self._mean, self._variance def copy_with_different_inputs( self, parameter_dependants: Sequence[Array] ) -> "NormalMeanVarianceNegativeLogProbLoss": """Creates the same :class:`~LossFunction` object, but with different inputs. Args: parameter_dependants: The inputs to use to the constructor of a class instance. This must be a sequence of length 2. Returns: An instance of :class:`~NormalMeanVarianceNegativeLogProbLoss` with the provided inputs. Raises: A ValueError if the ``inputs`` is a sequence of different length than 2. """ [mean, variance] = parameter_dependants return NormalMeanVarianceNegativeLogProbLoss( mean, variance, targets=self._targets, weight=self._weight) @property def _fisher_mean(self) -> Array: """The Fisher w.r.t. to the mean parameters.""" return 1. / self._variance @property def _fisher_mean_factor(self) -> Array: """The Fisher factor w.r.t. to the mean parameters.""" return jnp.sqrt(self._fisher_mean) @property def _fisher_var(self) -> Array: """The Fisher w.r.t. to the variance parameters.""" return 1. / (2 * jnp.square(self._variance)) @property def _fisher_var_factor(self) -> Array: """The Fisher factor w.r.t. to the variance parameters.""" return 1. / (jnp.sqrt(2.) * self._variance) def multiply_fisher_unweighted( self, vector: Sequence[Array], ) -> Tuple[Array, Array]: mean_vec, var_vec = vector return self._fisher_mean * mean_vec, self._fisher_var * var_vec def multiply_fisher_factor_unweighted( self, vector: Array, ) -> Tuple[Array, Array]: mean_vec, var_vec = jnp.split(vector, 2, axis=-1) result_mean_vec = self._fisher_mean_factor * mean_vec result_var_vec = self._fisher_var_factor * var_vec return result_mean_vec, result_var_vec def multiply_fisher_factor_transpose_unweighted( self, vector: Sequence[Array], ) -> Array: mean_vec, var_vec = vector result_mean_vec = self._fisher_mean_factor * mean_vec result_var_vec = self._fisher_var_factor * var_vec return jnp.concatenate([result_mean_vec, result_var_vec], axis=-1) def multiply_fisher_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array, Array]: [index] = index if index < int(self._mean.shape[-1]): # Index corresponds to mean parameter. mean_slice = self._fisher_mean_factor[:, index][..., None] mean_output = insert_slice_in_zeros(mean_slice, 1, int( self._mean.shape[1]), index) var_output = jnp.zeros_like(mean_output) else: index -= int(self._mean.shape[-1]) # Index corresponds to variance parameter. var_slice = self._fisher_var_factor[:, index][..., None] var_output = insert_slice_in_zeros(var_slice, 1, int(self._variance.shape[1]), index) mean_output = jnp.zeros_like(var_output) return mean_output, var_output @property def fisher_factor_inner_shape(self) -> Shape: return self._mean.shape[:-1] + self._mean.shape[-1:] * 2 def multiply_ggn_unweighted( self, vector: Sequence[Array], ) -> Tuple[Array, ...]: raise NotImplementedError() def multiply_ggn_factor_unweighted( self, vector: Array ) -> Tuple[Array, ...]: raise NotImplementedError() def multiply_ggn_factor_transpose_unweighted( self, vector: Sequence[Array], ) -> Array: raise NotImplementedError() def multiply_ggn_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array, ...]: raise NotImplementedError() @property def ggn_factor_inner_shape(self) -> Shape: raise NotImplementedError() class MultiBernoulliNegativeLogProbLoss(DistributionNegativeLogProbLoss, NaturalParamsNegativeLogProbLoss): """Negative log prob loss for multiple Bernoulli distributions parametrized by logits. Represents N independent Bernoulli distributions where N = len(logits). Its Fisher Information matrix is given by ``F = diag(p * (1-p))``, where ``p = sigmoid(logits)``. As F is diagonal with positive entries, its factor B is ``B = diag(sqrt(p * (1-p)))``. """ def __init__( self, logits: Array, targets: Optional[Array] = None, weight: Numeric = 1.0, ): """Initializes the loss instance. Args: logits: The logits of the Bernoulli distribution. targets: Optional targets to use for evaluation. weight: The relative weight of the loss. """ self._logits = logits self._targets = targets super().__init__(weight=weight) @property def targets(self) -> Optional[Array]: return self._targets @property def parameter_independants(self) -> Tuple[Numeric, ...]: arrays = (self.weight,) if self._targets is not None: arrays = (self._targets,) + arrays return arrays @property def dist(self) -> distrax.Bernoulli: return distrax.Bernoulli(logits=self._logits, dtype=jnp.int32) @property def _probs(self) -> Array: """The probabilities of the underlying Bernoulli distribution.""" return self.dist.probs # pytype: disable=bad-return-type @property def params(self) -> Tuple[Array]: return (self._logits,) def copy_with_different_inputs( self, parameter_dependants: Sequence[Array] ) -> "MultiBernoulliNegativeLogProbLoss": [logits] = parameter_dependants return MultiBernoulliNegativeLogProbLoss( logits, targets=self._targets, weight=self._weight) def multiply_fisher_unweighted( self, vector: Sequence[Array] ) -> Tuple[Array]: return (self._probs * (1 - self._probs) * vector[0],) def multiply_fisher_factor_unweighted( self, vector: Array ) -> Tuple[Array]: return (jnp.sqrt(self._probs * (1 - self._probs)) * vector,) def multiply_fisher_factor_transpose_unweighted( self, vector: Sequence[Array] ) -> Array: # it's symmetric in this case return self.multiply_fisher_factor_unweighted(vector[0])[0] def multiply_fisher_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array]: [index] = index probs_slice = self._probs[:, index][..., None] output_slice = jnp.sqrt(probs_slice * (1 - probs_slice)) return (insert_slice_in_zeros( output_slice, 1, self._logits.shape[1], index),) class CategoricalLogitsNegativeLogProbLoss(DistributionNegativeLogProbLoss, NaturalParamsNegativeLogProbLoss): """Negative log prob loss for a categorical distribution parameterized by logits. Note that the Fisher (for a single case) of a categorical distribution, with respect to the natural parameters (i.e. the logits), is given by ``F = diag(p) - p*p^T``, where ``p = softmax(logits)``. F can be factorized as ``F = B * B^T``, where ``B = diag(q) - p*q^T`` and ``q`` is the entry-wise square root of ``p``. This is easy to verify using the fact that ``q^T*q = 1`` . """ def __init__( self, logits: Array, targets: Optional[Array] = None, mask: Optional[Array] = None, weight: Numeric = 1.0, ): """Initializes the loss instance. Args: logits: The logits of the Categorical distribution of shape ``(batch_size, output_size)``. targets: Optional targets to use for evaluation, which specify an integer index of the correct class. Must be of shape ``(batch_size,)``. mask: Optional mask to apply to losses over the batch. Should be 0/1-valued and of shape ``(batch_size,)``. The tensors returned by ``evaluate`` and ``grad_of_evaluate``, as well as the various matrix vector products, will be multiplied by mask (with broadcasting to later dimensions). weight: The relative weight of the loss. """ if mask is not None and mask.shape != logits.shape[:1]: raise ValueError("If provided, mask.shape must be equal to " "logits.shape[:1].") self._logits = logits self._targets = targets self._mask = mask super().__init__(weight=weight) @property def targets(self) -> Optional[Array]: return self._targets @property def mask(self) -> Optional[Array]: return self._mask @property def parameter_independants(self) -> Tuple[Numeric, ...]: arrays = (self.weight,) if self.mask is not None: arrays = (self.mask,) + arrays if self.targets is not None: arrays = (self.targets,) + arrays return arrays @property def dist(self) -> distrax.Categorical: return distrax.Categorical(logits=self._logits, dtype=jnp.int32) def _evaluate(self, targets: Array) -> Array: evl = super()._evaluate(targets) if self.mask is not None: return evl * self.mask else: return evl @property def _probs(self) -> Array: """The probabilities of the underlying Bernoulli distribution.""" if self.mask is not None: return self.dist.probs * self.mask[..., None] else: return self.dist.probs @property def _sqrt_probs(self) -> Array: """The square root of ``self.probs``.""" if self.mask is not None: return jnp.sqrt(self.dist.probs) * self.mask[..., None] else: return jnp.sqrt(self.dist.probs) @property def params(self) -> Tuple[Array]: return (self._logits,) @property def fisher_factor_inner_shape(self) -> Shape: return self._logits.shape def copy_with_different_inputs( self, parameter_dependants: Sequence[Array] ) -> "CategoricalLogitsNegativeLogProbLoss": [logits] = parameter_dependants return CategoricalLogitsNegativeLogProbLoss( logits, targets=self.targets, mask=self.mask, weight=self.weight) def multiply_fisher_unweighted( self, vector: Sequence[Array] ) -> Tuple[Array]: probs = self._probs fisher_product = vector[0] * probs - probs * jnp.sum( vector[0] * probs, axis=-1, keepdims=True) return (fisher_product,) def multiply_fisher_factor_unweighted( self, vector: Array ) -> Tuple[Array]: probs = self._probs sqrt_probs = self._sqrt_probs return (sqrt_probs * vector - probs * jnp.sum( sqrt_probs * vector, axis=-1, keepdims=True),) def multiply_fisher_factor_transpose_unweighted( self, vector: Sequence[Array] ) -> Array: probs = self._probs sqrt_probs = self._sqrt_probs return sqrt_probs * vector[0] - sqrt_probs * jnp.sum( probs * vector[0], axis=-1, keepdims=True) def multiply_fisher_factor_replicated_one_hot_unweighted( self, index: Sequence[int], ) -> Tuple[Array]: [index] = index probs = self._probs sqrt_probs_slice = self._sqrt_probs[:, index][..., None] padded_slice = insert_slice_in_zeros(sqrt_probs_slice, 1, probs.shape[1], index) return (padded_slice - probs * sqrt_probs_slice,) class OneHotCategoricalLogitsNegativeLogProbLoss( CategoricalLogitsNegativeLogProbLoss): """Neg log prob loss for a categorical distribution with onehot targets. Identical to CategoricalLogitsNegativeLogProbLoss except that the underlying distribution is OneHotCategorical as opposed to Categorical. """ @property def dist(self) -> distrax.OneHotCategorical: return distrax.OneHotCategorical(logits=self._logits, dtype=jnp.int32) def copy_with_different_inputs( self, parameter_dependants: Sequence[Array] ) -> "OneHotCategoricalLogitsNegativeLogProbLoss": [logits] = parameter_dependants return OneHotCategoricalLogitsNegativeLogProbLoss( logits, targets=self.targets, mask=self.mask, weight=self.weight) def insert_slice_in_zeros( slice_to_insert: Array, dim: int, dim_size: int, position: int, ) -> Array: """Inserts slice into a larger array of zeros. Forms a new array which is the same shape as slice_to_insert, except that the dimension given by ``dim`` is expanded to the size given by ``dim_size``. ``position`` determines the position (index) at which to insert the slice within that dimension. Assumes slice_to_insert.shape[dim] = 1. Args: slice_to_insert: The slice to insert. dim: The dimension which to expand with zeros. dim_size: The new size of the ``dim`` dimension. position: The position of ``slice_to_insert`` in the new tensor. Returns: The new array. Raises: ValueError: If the slice's shape at the given dim is not 1. """ slice_shape = slice_to_insert.shape if slice_shape[dim] != 1: raise ValueError(f"Expected slice_to_insert.shape to have {dim} dim of 1," f" but was {slice_to_insert.shape[dim]}.") before = [0] * len(slice_shape) after = before[:] before[dim] = position after[dim] = dim_size - position - 1 return jnp.pad(slice_to_insert, list(zip(before, after))) # _______ _____ _ _ _ _ # |__ __| | __ \ (_) | | | | (_) # | | __ _ __ _ | |__) |___ __ _ _ ___| |_ _ __ __ _| |_ _ ___ _ __ # | |/ _` |/ _` | | _ // _ \/ _` | / __| __| '__/ _` | __| |/ _ \| '_ \ # | | (_| | (_| | | | \ \ __/ (_| | \__ \ |_| | | (_| | |_| | (_) | | | | # |_|\__,_|\__, | |_| \_\___|\__, |_|___/\__|_| \__,_|\__|_|\___/|_| |_| # __/ | __/ | # |___/ |___/ NormalMeanNegativeLogProbLoss_tag = tags.LossTag( NormalMeanNegativeLogProbLoss, parameter_dependants=["mean"], parameter_independants=["targets", "variance", "weight"], ) NormalMeanVarianceNegativeLogProbLoss_tag = tags.LossTag( NormalMeanVarianceNegativeLogProbLoss, parameter_dependants=["mean", "variance"], parameter_independants=["targets", "weight"], ) MultiBernoulliNegativeLogProbLoss_tag = tags.LossTag( MultiBernoulliNegativeLogProbLoss, parameter_dependants=["logits"], parameter_independants=["targets", "weight"], ) CategoricalLogitsNegativeLogProbLoss_tag = tags.LossTag( CategoricalLogitsNegativeLogProbLoss, parameter_dependants=["logits"], parameter_independants=["targets", "weight"], ) OneHotCategoricalLogitsNegativeLogProbLoss_tag = tags.LossTag( OneHotCategoricalLogitsNegativeLogProbLoss, parameter_dependants=["logits"], parameter_independants=["targets", "weight"], ) def register_normal_predictive_distribution( mean: Array, targets: Optional[Array] = None, variance: float = 0.5, weight: Numeric = 1.0, ): """Registers a normal predictive distribution. This corresponds to a squared error loss of the form ``weight/(2*var) * ||target - mean||^2`` NOTE: this function assumes you are *not* averaging over non-batch dimensions when computing the loss. i.e. it assumes a loss of the form ``mean(sum(target - prediction), axis=range(1,target.ndims), axis=0)`` and not ``mean(target - prediction)``. If your loss is of the latter form you can compensate for it by passing the appropriate value to ``weight``. Args: mean: A tensor defining the mean vector of the distribution. The first dimension will usually be the batch size, but doesn't need to be (unless using ``estimation_mode='fisher_exact'`` or ``estimation_mode='ggn_exact'`` in the optimizer/estimator). targets: (OPTIONAL) The targets for the loss function. Only required if using ``estimation_mode='fisher_empirical'`` in the optimizer/estimator. (Default: None) variance: float. The variance of the distribution. Note that the default value of 0.5 corresponds to a standard squared error loss weight * ||target - prediction||^2. If you want your squared error loss to be of the form ``0.5*coeff*||target - prediction||^2`` you should use variance=1.0. (Default: 0.5) weight: A scalar coefficient to multiply the log prob loss associated with this distribution. The Fisher will be multiplied by the corresponding factor. In general this is NOT equivalent to changing the temperature of the distribution, but in the ase of normal distributions it may be. (Default: 1.0) """ if targets is None: args = [mean, variance, weight] args_names = ["mean", "variance", "weight"] else: args = [mean, targets, variance, weight] args_names = ["mean", "targets", "variance", "weight"] NormalMeanNegativeLogProbLoss_tag.bind(*args, args_names=tuple(args_names)) def register_squared_error_loss( prediction: Array, targets: Optional[Array] = None, weight: Numeric = 1.0, ) -> Array: """Registers a squared error loss function. This assumes the squared error loss of the form ``||target - prediction||^2``, averaged across the mini-batch. If your loss uses a coefficient of 0.5 you need to set the "weight" argument to reflect this. NOTE: this function assumes you are *not* averaging over non-batch dimensions when computing the loss. i.e. it assumes a loss of the form ``mean(sum(target - prediction), axis=range(1,target.ndims), axis=0)`` and not ``mean(target - prediction)`` If your loss is of the latter form you can compensate for it by passing the appropriate value to ``weight``. Args: prediction: The prediction made by the network (i.e. its output). The first dimension will usually be the batch size, but doesn't need to be (unless using ``estimation_mode='fisher_exact'`` or ``estimation_mode='ggn_exact'`` in the optimizer/estimator). targets: (OPTIONAL) The targets for the loss function. Only required if using ``estimation_mode='fisher_empirical'`` in the optimizer/estimator. (Default: None) weight: A float coefficient to multiply the loss function by. (Default: 1.0) """ register_normal_predictive_distribution( prediction, targets, variance=0.5, weight=weight) # pytype: disable=bad-return-type # numpy-scalars def register_multi_bernoulli_predictive_distribution( logits: Array, targets: Optional[Array] = None, weight: Numeric = 1.0, ): """Registers a multi-Bernoulli predictive distribution. Note that this is distinct from :func:`~register_categorical_predictive_distribution` and should not be confused with it. Args: logits: The logits of the distribution (i.e. its parameters) as a 2D array of floats. The first dimension will usually be the batch size, but doesn't need to be (unless using ``estimation_mode='fisher_exact'`` or ``estimation_mode='ggn_exact'`` in the optimizer/estimator). targets: (OPTIONAL) The targets for the loss function. Only required if using ``estimation_mode='fisher_empirical'`` in the optimizer/estimator. (Default: None) weight: (OPTIONAL) a scalar. A coefficient to multiply the log prob loss associated with this distribution. The Fisher will be multiplied by the corresponding factor. This is NOT equivalent to changing the temperature of the distribution since we don't renormalize the log prob in the objective function. (Default: 1.0) """ if targets is None: args = [logits, weight] args_names = ["logits", "weight"] else: args = [logits, targets, weight] args_names = ["logits", "targets", "weight"] MultiBernoulliNegativeLogProbLoss_tag.bind( *args, args_names=tuple(args_names)) def register_sigmoid_cross_entropy_loss( logits: Array, targets: Optional[Array] = None, weight: Numeric = 1.0, ): """Registers a sigmoid cross-entropy loss function. Note that this is distinct from :func:`~register_softmax_cross_entropy_loss` and should not be confused with it. It is similar to :func:`~register_multi_bernoulli_predictive_distribution` but without the explicit probabilistic interpretation. It behaves identically for now. Args: logits: The input logits of the loss as a 2D array of floats. The first dimension will usually be the batch size, but doesn't need to be (unless using ``estimation_mode='fisher_exact'`` or ``estimation_mode='ggn_exact'`` in the optimizer/estimator). targets: (OPTIONAL) The targets for the loss function. Must be of the same shape as ``logits``. Only required if using ``estimation_mode='fisher_empirical'`` in the optimizer/estimator. (Default: None) weight: (OPTIONAL) a scalar. A coefficient to multiply the loss function by. (Default: 1.0) """ register_multi_bernoulli_predictive_distribution( logits, targets, weight=weight) def register_categorical_predictive_distribution( logits: Array, targets: Optional[Array] = None, mask: Optional[Array] = None, weight: Numeric = 1.0, ): """Registers a categorical predictive distribution. Note that this is distinct from :func:`~register_multi_bernoulli_predictive_distribution` and should not be confused with it. Args: logits: The logits of the distribution (i.e. its parameters) as a 2D array of floats. The first dimension will usually be the batch size, but doesn't need to be (unless using ``estimation_mode='fisher_exact'`` or ``estimation_mode='ggn_exact'`` in the optimizer/estimator). The second dimension is the one over which the softmax is computed. targets: (OPTIONAL) The values at which the log probability of this distribution is evaluated (to give the loss). Must be a 2D array of integers with shape ``(logits.shape[0],)``. Only required if using ``estimation_mode='fisher_empirical'`` in the optimizer/estimator. (Default: None) mask: (OPTIONAL) Mask to apply to log probabilities generated by the distribution. Should be 0/1-valued and of shape ``(logits.shape[0],)``. Log probablities corresponding to mask values of False will be treated as constant and equal to 0. (Default: None) weight: (OPTIONAL) a scalar. A coefficient to multiply the log prob loss associated with this distribution. The Fisher will be multiplied by the corresponding factor. This is NOT equivalent to changing the temperature of the distribution since we don't renormalize the log prob in the objective function. (Default: 1.0) """ if targets is not None: if targets.ndim == logits.ndim: tag_cls = OneHotCategoricalLogitsNegativeLogProbLoss_tag elif targets.ndim == logits.ndim - 1: tag_cls = CategoricalLogitsNegativeLogProbLoss_tag else: raise ValueError(f"The logits ndim is {logits.ndim} and the targets ndim " f"must be either equal or one less than it, but is " f"{targets.ndim}.") else: tag_cls = CategoricalLogitsNegativeLogProbLoss_tag args = [logits] args_names = ["logits"] if targets is not None: args = args + [targets] args_names = args_names + ["targets"] if mask is not None: args = args + [mask] args_names = args_names + ["mask"] args = args + [weight] args_names = args_names + ["weight"] tag_cls.bind(*args, args_names=tuple(args_names)) def register_softmax_cross_entropy_loss( logits: Array, targets: Optional[Array] = None, mask: Optional[Array] = None, weight: Numeric = 1.0, ) -> Array: """Registers a softmax cross-entropy loss function. Note that this is distinct from :func:`~register_sigmoid_cross_entropy_loss` and should not be confused with it. It is similar to :func:`~register_categorical_predictive_distribution` but without the explicit probabilistic interpretation. It behaves identically for now. Args: logits: The input logits of the loss as a 2D array of floats. The first dimension will usually be the batch size, but doesn't need to be (unless using ``estimation_mode='fisher_exact'`` or ``estimation_mode='ggn_exact'`` in the optimizer/estimator). The second dimension is the one over which the softmax is computed. targets: (OPTIONAL) The targets for the loss function. Must be a 1D array of integers with shape ``(logits.shape[0],)``. Only required if using ``estimation_mode='fisher_empirical'`` in the optimizer/estimator. (Default: None) mask: (OPTIONAL) Mask to apply to losses. Should be 0/1-valued and of shape ``(logits.shape[0],)``. Losses corresponding to mask values of False will be treated as constant and equal to 0. (Default: None) weight: (OPTIONAL) a scalar. A coefficient to multiply the loss function by. (Default: 1.0) """ register_categorical_predictive_distribution(logits, targets=targets, mask=mask, weight=weight) # pytype: disable=bad-return-type # numpy-scalars
kfac-jax-main
kfac_jax/_src/loss_functions.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC optimized functions for patches second moment(PSM) computation.""" import functools from typing import Optional, Sequence, TypeVar, Tuple, Union, List import jax from jax import interpreters from jax import lax import jax.numpy as jnp from kfac_jax._src import utils # Types for annotation T = TypeVar("T") Array = utils.Array Shape = utils.Shape TracedType = interpreters.partial_eval.DynamicJaxprTracer DimNumbers = Tuple[Shape, Shape, Shape] PaddingVariants = Union[str, int, Sequence[int], Sequence[Tuple[int, int]]] # Special global variables _USE_4D_CONVOLUTION: bool = True def set_use_4d_convolution_in_psm_loop(value: bool): """Sets whether a 4D convolution is used for the PSM computation.""" if not isinstance(value, bool): raise ValueError("The value provided must be a python bool.") global _USE_4D_CONVOLUTION _USE_4D_CONVOLUTION = value def get_use_4d_convolution_in_psm_loop() -> bool: """Returns whether a 4D convolution is used for the PSM computation.""" return _USE_4D_CONVOLUTION def _ceil(x: int, y: int) -> int: """Computes `ceil(x / y)` with only integer operations.""" return - (- x // y) class _ConvSpec: """Layout specification for arrays that will be used in a convolution.""" def __init__(self, order: Sequence[int]): """Initializes the array specification with the provided order.""" self.order = tuple(order) def __len__(self): return len(self.order) @property def n_axis(self) -> int: """Returns the index of the batch axis.""" return self.order[0] @property def c_axis(self) -> int: """Returns the index of the channel axis.""" return self.order[1] @property def spatial_axes(self) -> Tuple[int]: """Returns the indices of the spatial axes.""" return self.order[2:] def get_n(self, shape: Shape) -> int: """Returns the batch size of the given shape, under this spec layout.""" return shape[self.n_axis] def get_c(self, shape: Shape) -> int: """Returns the channel size of the given shape, under this spec layout.""" return shape[self.c_axis] def get_spatial(self, shape: Shape) -> Tuple[int, ...]: """Returns the spatial sizes of the given shape, under this spec layout.""" return tuple(shape[i] for i in self.spatial_axes) def expand_spatial_axes(self) -> "_ConvSpec": """Expands the layout spatial axes by preserving `n` and `c` order.""" n_axis = self.n_axis + sum(self.n_axis > axis for axis in self.spatial_axes) c_axis = self.c_axis + sum(self.c_axis > axis for axis in self.spatial_axes) spatial_axes = [] for axis in self.spatial_axes: spatial_axes.append(axis + sum(axis > a for a in self.spatial_axes)) spatial_axes.append(spatial_axes[-1] + 1) return _ConvSpec([n_axis, c_axis, *spatial_axes]) def swap_n_and_c(self) -> "_ConvSpec": """Swaps the batch and channel indices of the layout.""" return _ConvSpec([self.c_axis, self.n_axis, *self.spatial_axes]) def create_shape(self, n: T, c: T, *spatial_dims: T) -> Tuple[T, ...]: """Creates a shape according to this layout specification.""" if len(spatial_dims) != len(self.order) - 2: raise ValueError("Incorrect number of spatial dimensions.") result: List[T] = [None] * len(self) result[self.n_axis] = n result[self.c_axis] = c for ax, dim in zip(self.spatial_axes, spatial_dims): result[ax] = dim assert all(r is not None for r in result) return tuple(result) def change_nhwc_to_ihwo(self) -> "_ConvSpec": """Changes the layout from `NHWC` to `IHWO` where `I=C`, `O=N`.""" # Change the spec: NHWC -> IHWO where I=C, O=N order = [i - 2 if i > self.spatial_axes[1] else i for i in self.order[:4]] return _ConvSpec(order).swap_n_and_c() def _slice_array( array: Array, indices: Sequence[Union[int, TracedType]], sizes: Sequence[int], ) -> Array: """Takes a slice from the array provided.""" if any(isinstance(x, TracedType) for x in indices): # Any of the indices are dynamic values. return lax.dynamic_slice_p.bind(array, *indices, slice_sizes=sizes) else: # All indices are static values. index = tuple(slice(i, i + size) for i, size in zip(indices, sizes)) return array[index] def _output_spatial_shape( inputs_spatial_shape: Shape, kernel_spatial_shape: Shape, spatial_strides: Shape, padding: Union[str, Sequence[Tuple[int, int]]], ) -> Shape: """Returns the output spatial shape of the corresponding convolution.""" if isinstance(padding, str): if padding.lower() == "valid": return tuple(_ceil(d - k + 1, s) for d, k, s in zip(inputs_spatial_shape, kernel_spatial_shape, spatial_strides)) elif padding.lower() == "same": return tuple(_ceil(d, s) for d, s in zip(inputs_spatial_shape, spatial_strides)) else: raise ValueError(f"Unrecognized padding string {padding}!") else: shapes_strides_padding = zip( inputs_spatial_shape, kernel_spatial_shape, spatial_strides, padding) return tuple(_ceil(d + p[0] + p[1] - k + 1, s) for d, k, s, p in shapes_strides_padding) def _normalize_padding( inputs_spatial_shape: Shape, kernel_spatial_shape: Shape, spatial_strides: Shape, padding: PaddingVariants, ) -> Tuple[Tuple[int, int], ...]: """Returns the padding as a tuple of pairs of integers.""" n = len(kernel_spatial_shape) if isinstance(padding, str): if padding.lower() == "valid": return ((0, 0),) * n elif padding.lower() == "same": # https://github.com/tensorflow/tensorflow/blob/r1.8/tensorflow/core/kernels/conv_ops.cc#L571 output_shape = _output_spatial_shape(inputs_spatial_shape, kernel_spatial_shape, spatial_strides, "same") padding = [] for out_d, d, k, s in zip(output_shape, inputs_spatial_shape, kernel_spatial_shape, spatial_strides): pad = max(0, (out_d - 1) * s + k - d) padding.append((pad // 2, pad - pad // 2)) return tuple(padding) else: raise ValueError(f"Unrecognized padding: {padding}!") elif isinstance(padding, int): return ((padding, padding),) * n else: final_padding = [] for pad in padding: if isinstance(pad, int): final_padding.append((pad, pad)) else: final_padding.append(pad) return tuple(final_padding) def _normalize_strides( kernel_spatial_shape: Shape, strides: Union[int, Shape], ) -> Tuple[int, ...]: """Returns the strides as a tuple of integers.""" n = len(kernel_spatial_shape) if strides is None: return (1,) * n elif isinstance(strides, int): return (strides,) * n else: assert len(strides) == n return tuple(strides) def _data_format_to_dim_numbers( data_format: Optional[str], kernel_format: str = "HWIO", ) -> lax.ConvDimensionNumbers: """Converts the data format in dim numbers.""" if data_format is None: data_format = "NHWC" if not isinstance(data_format, str): raise ValueError("data_format must be either a python string or `None`.") data_format = lax.conv_general_permutations([data_format, kernel_format, data_format]) return lax.ConvDimensionNumbers(*data_format) def _parse_simple_args( inputs_shape: Shape, kernel_spatial_shape: Union[int, Shape], strides: Union[int, Shape] = 1, padding: PaddingVariants = "VALID", data_format: Optional[str] = "NHWC", dim_numbers: Optional[Union[DimNumbers, lax.ConvDimensionNumbers]] = None, ) -> Tuple[ Tuple[int, ...], Tuple[int, ...], Tuple[Tuple[int, int], ...], lax.ConvDimensionNumbers, ]: """Parses all convolutional arguments to a single unified format. Args: inputs_shape: A sequence of ints specifying the input's shape. kernel_spatial_shape: A sequence of ints specifying the kernel's shape. strides: A sequence of ints specifying strides in each spatial dimension, or a single int specifying the strides in every spatial dimension. padding: The padding can take one of the following formats: * str - Either 'VALID' or 'SAME' * int - Specifies the padding on both of sides of every spatial dimension. * sequence of ints - Specifies the padding on both sides of each spatial dimension. * sequence of pairs of ints - Specifies the padding on each side of each spatial dimension. data_format: The data format layout of the inputs. dim_numbers: If `data_format` is `None` this can specify the layout instead. Returns: A tuple of the (kernel shape, strides, padding, dim_numbers) """ spatial_dims = len(inputs_shape) - 2 if data_format is not None and dim_numbers is not None: raise ValueError("At least one of `data_format` and `dim_numbers` " "must be None.") if dim_numbers is not None: if not isinstance(dim_numbers, lax.ConvDimensionNumbers): if not isinstance(dim_numbers, (list, tuple)): raise ValueError("The provided dim_numbers argument must be either a " "list, tuple or lax.ConvDimensionNumbers.") if len(dim_numbers) != 3: raise ValueError("When the provided dim_numbers argument is a list or " "tuple it must have length 3, but has length " f"{len(dim_numbers)}.") lax_dim_numbers = lax.ConvDimensionNumbers(*dim_numbers) else: lax_dim_numbers: lax.ConvDimensionNumbers = dim_numbers else: lax_dim_numbers = _data_format_to_dim_numbers(data_format) if isinstance(kernel_spatial_shape, int): kernel_spatial_shape = (kernel_spatial_shape,) * spatial_dims if len(kernel_spatial_shape) != spatial_dims: raise ValueError("The provided argument `kernel_spatial_shape` must have " f"length equal to the spatial dimensions {spatial_dims} of" f" the inputs, but got {len(kernel_spatial_shape)}.") inputs_spatial_shape = _ConvSpec(lax_dim_numbers.lhs_spec).get_spatial( inputs_shape) kernel_spatial_shape = _ConvSpec(lax_dim_numbers.rhs_spec).get_spatial( kernel_spatial_shape) strides = _normalize_strides(kernel_spatial_shape, strides) padding = _normalize_padding( inputs_spatial_shape, kernel_spatial_shape, strides, padding) return kernel_spatial_shape, strides, padding, lax_dim_numbers def _num_conv_locations_full_spec( input_spatial_shape: Shape, kernel_spatial_shape: Shape, spatial_strides: Shape, spatial_padding: Sequence[Tuple[int, int]], ) -> int: """The number of convolution locations from the unified spec for arguments.""" if len(kernel_spatial_shape) != len(input_spatial_shape): raise ValueError("The `kernel_spatial_shape` and `input_spatial_shape` " "must have the same number of elements, got " f"{len(kernel_spatial_shape)} and " f"{len(input_spatial_shape)}.") if len(spatial_strides) != len(input_spatial_shape): raise ValueError("The `spatial_strides` and `input_spatial_shape` " "must have the same number of elements, got " f"{len(spatial_strides)} and " f"{len(input_spatial_shape)}.") if len(spatial_padding) != len(input_spatial_shape): raise ValueError("The `spatial_padding` and `input_spatial_shape` " "must have the same number of elements, got " f"{len(spatial_padding)} and " f"{len(input_spatial_shape)}.") num_locations = 1 for in_dim, k_dim, stride, padding in zip( input_spatial_shape, kernel_spatial_shape, spatial_strides, spatial_padding): num_locations *= _ceil(in_dim + padding[0] + padding[1] - k_dim + 1, stride) return num_locations def num_conv_locations( inputs_spatial_shape: Shape, kernel_spatial_shape: Union[int, Shape], spatial_strides: Union[int, Shape], spatial_padding: Union[str, int, Sequence[Tuple[int, int]]], ) -> int: """Returns the number of convolution locations for the provided shapes.""" inputs_spatial_shape = tuple(inputs_spatial_shape) n = len(inputs_spatial_shape) if isinstance(kernel_spatial_shape, int): kernel_spatial_shape = (kernel_spatial_shape,) * n spatial_strides = _normalize_strides(kernel_spatial_shape, spatial_strides) spatial_padding = _normalize_padding( inputs_spatial_shape, kernel_spatial_shape, spatial_strides, spatial_padding) return _num_conv_locations_full_spec( inputs_spatial_shape, kernel_spatial_shape, spatial_strides, spatial_padding) @utils.auto_scope_function def _the_conv4d( lhs: Array, lhs_spec: _ConvSpec, rhs: Array, rhs_spec: _ConvSpec, pad_h: int, pad_w: int, stride_h: int, stride_w: int, per_channel: bool = False, precision: Optional[jax.lax.Precision] = None, ) -> Array: """Performs a special conv4d or conv2d based on the global flag.""" assert len(rhs_spec) == 6 if get_use_4d_convolution_in_psm_loop(): # Reshape lhs to 6D array - (n, extra_h, 1, extra_w, 1, c) lhs_shape = list(lhs.shape) lhs_shape.insert(lhs_spec.spatial_axes[1] + 1, 1) lhs_shape.insert(lhs_spec.spatial_axes[0] + 1, 1) lhs = jnp.reshape(lhs, lhs_shape) # Change the spec: NHAWBC -> CHAWBN lhs_spec = rhs_spec.swap_n_and_c() # Change the spec: NHAWBC -> IHAWBO where I=C, O=N rhs_spec = rhs_spec.swap_n_and_c() dim_specs = (lhs_spec.order, rhs_spec.order, lhs_spec.order) if per_channel: @functools.partial(jax.vmap, in_axes=(lhs_spec.n_axis, rhs_spec.n_axis), out_axes=-1) def single_conv(x, y): return lax.conv_general_dilated( lhs=jnp.expand_dims(x, lhs_spec.n_axis), rhs=jnp.expand_dims(y, rhs_spec.n_axis), window_strides=(1, 1, 1, 1), padding=((0, pad_h), (0, 0), (0, pad_w), (0, 0)), lhs_dilation=(1, 1, 1, 1), rhs_dilation=(stride_h, 1, stride_w, 1), dimension_numbers=lax.ConvDimensionNumbers(*dim_specs), precision=precision, ) result = single_conv(lhs, rhs) assert result.shape[lhs_spec.n_axis] == 1 result = jnp.squeeze(result, lhs_spec.n_axis) assert result.shape[2] == 1 assert result.shape[4] == 1 result = jnp.squeeze(result, (2, 4)) return result[None] else: result = lax.conv_general_dilated( lhs=lhs, rhs=rhs, window_strides=(1, 1, 1, 1), padding=((0, pad_h), (0, 0), (0, pad_w), (0, 0)), lhs_dilation=(1, 1, 1, 1), rhs_dilation=(stride_h, 1, stride_w, 1), dimension_numbers=lax.ConvDimensionNumbers(*dim_specs), precision=precision, ) # Order the result such that one of the channel dims is after spatial dims if lhs_spec != (5, 0, 1, 2, 3, 4): min_index = 0 if lhs_spec.n_axis < lhs_spec.c_axis else 1 max_index = 1 - min_index axes = list(range(6)) if lhs_spec.order[min_index] != 0: axes.insert(0, axes.pop(lhs_spec.order[min_index])) if lhs_spec.order[max_index] != 5: axes.insert(5, axes.pop(lhs_spec.order[max_index])) result = jnp.transpose(result, axes=axes) assert result.shape[2] == 1 assert result.shape[4] == 1 result = jnp.squeeze(result, (2, 4)) return result[None, None] else: # Change the spec: NHWC -> CHWN lhs_spec = lhs_spec.swap_n_and_c() # Index rhs and remove the trivial dimensions rhs_slice: List[Union[slice, int]] = [slice(None)] * rhs.ndim rhs_slice[rhs_spec.spatial_axes[1]] = 0 rhs_slice[rhs_spec.spatial_axes[3]] = 0 rhs = rhs[tuple(rhs_slice)] rhs_spec = rhs_spec.change_nhwc_to_ihwo() dim_specs = (lhs_spec.order, rhs_spec.order, lhs_spec.order) if per_channel: vmap_single_conv = jax.vmap(lambda x, y: lax.conv_general_dilated( # pylint: disable=g-long-lambda lhs=jnp.expand_dims(x, lhs_spec.n_axis), rhs=jnp.expand_dims(y, rhs_spec.n_axis), window_strides=(1, 1), padding=((0, pad_h), (0, pad_w)), lhs_dilation=(1, 1), rhs_dilation=(stride_h, stride_w), dimension_numbers=lax.ConvDimensionNumbers(*dim_specs), precision=precision, ), in_axes=(lhs_spec.n_axis, rhs_spec.n_axis), out_axes=-1) result = vmap_single_conv(lhs, rhs) assert result.shape[lhs_spec.n_axis] == 1 result = jnp.squeeze(result, lhs_spec.n_axis) return result[None] else: result = lax.conv_general_dilated( lhs=lhs, rhs=rhs, window_strides=(1, 1), padding=((0, pad_h), (0, pad_w)), lhs_dilation=(1, 1), rhs_dilation=(stride_h, stride_w), dimension_numbers=lax.ConvDimensionNumbers(*dim_specs), precision=precision, ) # Order the result such that one of the channel dims is after spatial dims if lhs_spec != (3, 0, 1, 2): min_index = 0 if lhs_spec.n_axis < lhs_spec.c_axis else 1 max_index = 1 - min_index axes = list(range(4)) if lhs_spec.order[min_index] != 0: axes.insert(0, axes.pop(lhs_spec.order[min_index])) if lhs_spec.order[max_index] != 5: axes.insert(5, axes.pop(lhs_spec.order[max_index])) result = jnp.transpose(result, axes=axes) return result[None, None] def _validate_inputs_lengths( inputs: Array, kernel_spatial_shape: Shape, strides: Shape, padding: Tuple[Tuple[int, int], ...], ) -> None: """Checks that the provided arguments are valid.""" spatial_dims = inputs.ndim - 2 if spatial_dims != 2: raise ValueError("Currently `patches_second_moment` supports only 2D " "convolution, hence the input is expected to have rank 4," f" but has rank {inputs.ndim}.") if len(kernel_spatial_shape) != spatial_dims: raise ValueError("The argument `kernel_spatial_shape` must have length " f"equal to the number of spatial dimensions of the input -" f" {spatial_dims}, but instead has length " f"{len(kernel_spatial_shape)}.") if len(padding) != spatial_dims: raise ValueError("The argument `padding` must have length equal to the " "number of spatial dimensions of the input - " f"{spatial_dims}, but instead has length " f"{len(kernel_spatial_shape)}.") if len(strides) != 2: raise ValueError("The argument `strides` must have length equal to the " "number of spatial dimensions of the input - " f"{spatial_dims}, but instead has length " f"{len(kernel_spatial_shape)}.") @functools.partial(jax.jit, static_argnums=list(range(1, 12)), static_argnames=( "kernel_spatial_shape", "strides", "padding", "data_format", "dim_numbers", "inputs_dilation", "kernel_dilation", "feature_group_count", "batch_group_count", "unroll_loop", "precision")) @utils.auto_scope_function def patches_moments_explicit( inputs: Array, kernel_spatial_shape: Union[int, Shape], strides: Union[int, Shape] = 1, padding: PaddingVariants = "VALID", data_format: Optional[str] = "NHWC", dim_numbers: Optional[Union[DimNumbers, lax.ConvDimensionNumbers]] = None, inputs_dilation: Optional[Sequence[int]] = None, kernel_dilation: Optional[Sequence[int]] = None, feature_group_count: int = 1, batch_group_count: int = 1, unroll_loop: bool = False, precision: Optional[jax.lax.Precision] = None, weighting_array: Optional[Array] = None, ) -> Tuple[Array, Array]: """The exact same functionality as :func:`~patches_moments`, but explicitly extracts the patches via :func:`jax.lax.conv_general_dilated_patches`, potentially having a higher memory usage.""" kernel_spatial_shape, strides, padding, dim_numbers = _parse_simple_args( inputs.shape, kernel_spatial_shape, padding=padding, strides=strides, data_format=data_format, dim_numbers=dim_numbers) _validate_inputs_lengths(inputs, kernel_spatial_shape, strides, padding) in_spec = _ConvSpec(dim_numbers.lhs_spec) out_spec = _ConvSpec(dim_numbers.out_spec) n = in_spec.get_n(inputs.shape) c = in_spec.get_c(inputs.shape) inputs_spatial_shape = in_spec.get_spatial(inputs.shape) spec = _ConvSpec(dim_numbers.out_spec).swap_n_and_c().order matmul_dim_numbers = lax.ConvDimensionNumbers(spec, spec, spec) if feature_group_count not in (1, in_spec.get_c(inputs.shape)): raise ValueError("`patches_moments_explicit` does not support " "`feature_group_count` different from 1 or the number of " "channels of the inputs.") if batch_group_count != 1: raise ValueError("`patches_moments_explicit` does not support " "`batch_group_count` different from 1.") per_channel = feature_group_count != 1 vector_target_shape = kernel_spatial_shape + (c,) leading_shape = kernel_spatial_shape if per_channel else vector_target_shape matrix_target_shape = leading_shape + vector_target_shape vector_axis = tuple(a for a in range(4) if a != out_spec.c_axis) # Broadcast the weighting function if weighting_array is not None: if weighting_array.ndim == inputs.ndim: pass elif weighting_array.ndim == inputs.ndim - 1: axis = dim_numbers.lhs_spec[1] weighting_array = jnp.expand_dims(weighting_array, axis=axis) elif weighting_array.ndim == 1: while weighting_array.ndim < inputs.ndim: weighting_array = weighting_array[:, None] else: raise ValueError(f"`weighting_array` shape {weighting_array.shape} is " f"not compatible with the inputs shape {inputs.shape}" ".") if not per_channel: vector_shape = (c,) + kernel_spatial_shape matrix_shape = vector_shape + vector_shape if weighting_array is None: weighting_array = jnp.ones([], dtype=inputs.dtype) # Standard explicit patches calculation extracted_patches = lax.conv_general_dilated_patches( inputs, filter_shape=kernel_spatial_shape, window_strides=strides, padding=padding, lhs_dilation=inputs_dilation, rhs_dilation=kernel_dilation, dimension_numbers=dim_numbers, precision=precision, ) weighted_patches = extracted_patches * weighting_array matrix_results = lax.conv_general_dilated( extracted_patches, weighted_patches, window_strides=strides, padding="VALID", dimension_numbers=matmul_dim_numbers, precision=precision, ) matrix_results = jnp.reshape(matrix_results, matrix_shape) vector_results = jnp.reshape( jnp.sum(weighted_patches, axis=vector_axis), vector_shape) if c > 1: # The output of `conv_general_dilated_patches` is ordered `chw` return (jnp.transpose(matrix_results, (1, 2, 0, 4, 5, 3)), jnp.transpose(vector_results, [1, 2, 0])) else: return (jnp.reshape(matrix_results, matrix_target_shape), jnp.reshape(vector_results, vector_target_shape)) # Loop over channels def general_loop_body(i, image): index = in_spec.create_shape(0, i, 0, 0) sizes = in_spec.create_shape(n, 1, *inputs_spatial_shape) image_channel = _slice_array(image, index, sizes) # Index the weighting function if weighting_array is not None: if weighting_array.shape[in_spec.c_axis] == 1: wf_i = weighting_array else: wf_n = weighting_array[in_spec.n_axis] wf_spatial = [weighting_array.shape[a] for a in in_spec.spatial_axes] wf_sizes = in_spec.create_shape(wf_n, jnp.ones([]), *wf_spatial) wf_i = _slice_array(weighting_array, index, wf_sizes) else: wf_i = None matrix, vector = patches_moments_explicit( image_channel, kernel_spatial_shape=kernel_spatial_shape, strides=strides, padding=padding, data_format=None, dim_numbers=dim_numbers, precision=precision, weighting_array=wf_i, ) return jnp.squeeze(matrix, axis=2), vector if unroll_loop: results = [general_loop_body(ii, inputs) for ii in range(c)] matrix_results, vector_results = zip(*results) matrix_results = jnp.concatenate(matrix_results, axis=-1) vector_results = jnp.concatenate(vector_results, axis=-1) return matrix_results, vector_results def loop_cond(args): return args[0] < c def loop_body(args): i, image, matrix_result, vector_result = args matrix_update, vector_update = general_loop_body(i, image) matrix_result = lax.dynamic_update_slice( matrix_result, matrix_update, (0, 0, 0, 0, i)) vector_result = lax.dynamic_update_slice( vector_result, vector_update, (0, 0, i)) return i + 1, image, matrix_result, vector_result init_vals = (0, inputs, jnp.zeros(matrix_target_shape, dtype=inputs.dtype), jnp.zeros(vector_target_shape, dtype=inputs.dtype)) return lax.while_loop(loop_cond, loop_body, init_vals)[-2:] @functools.partial(jax.jit, static_argnums=list(range(1, 12)), static_argnames=( "kernel_spatial_shape", "strides", "padding", "data_format", "dim_numbers", "inputs_dilation", "kernel_dilation", "feature_group_count", "batch_group_count", "unroll_loop", "precision")) @utils.auto_scope_function def patches_moments( inputs: Array, kernel_spatial_shape: Union[int, Shape], strides: Union[int, Shape] = 1, padding: PaddingVariants = "VALID", data_format: Optional[str] = "NHWC", dim_numbers: Optional[Union[DimNumbers, lax.ConvDimensionNumbers]] = None, inputs_dilation: Optional[Sequence[int]] = None, kernel_dilation: Optional[Sequence[int]] = None, feature_group_count: int = 1, batch_group_count: int = 1, unroll_loop: bool = False, precision: Optional[jax.lax.Precision] = None, weighting_array: Optional[Array] = None, ) -> Tuple[Array, Array]: """Computes the first and second moment of the convolutional patches. Since the code is written to support arbitrary convolution data formats, e.g. both NHWC and NCHW, in comments above any of the procedures is written the simplified version of what the statements below do, if the data format was fixed to NHWC. Args: inputs: The batch of images. kernel_spatial_shape: The spatial dimensions of the filter (int or list of ints). strides: The spatial dimensions of the strides (int or list of ints). padding: The padding (str or list of pairs of ints). data_format: The data format of the inputs (None, NHWC, NCHW). dim_numbers: Instance of :class:`jax.lax.ConvDimensionNumbers` instead of data_format. inputs_dilation: An integer or sequence of integers, specifying the dilation for the image. Currently, `patches_moments` does not support dilation, so the only allowed values are `None, 1, (1,1)`. kernel_dilation: An integer or sequence of integers, specifying the dilation for the kernel. Currently, `patches_moments` does not support dilation, so the only allowed values are `None, 1, (1,1)`. feature_group_count: The feature grouping for grouped convolutions. Currently, `patches_moments` supports only 1 and number of channels of the inputs. batch_group_count: The batch grouping for grouped convolutions. Currently, `patches_moments` supports only 1. unroll_loop: Whether to unroll the loop in python. precision: In what precision to run the computation. For more details please read Jax documentation of :func:`jax.lax.conv_general_dilated`. weighting_array: A tensor specifying additional weighting of each element of the moment's average. Returns: The matrix of the patches' second and first moment as a pair. The tensor of the patches' second moment has a shape `kernel_spatial_shape + (, channels) + kernel_spatial_shape + (, channels)`. The tensor of the patches' first moment has a shape `kernel_spatial_shape + (, channels)`. """ kernel_spatial_shape, strides, padding, dim_numbers = _parse_simple_args( inputs.shape, kernel_spatial_shape, padding=padding, strides=strides, data_format=data_format, dim_numbers=dim_numbers) _validate_inputs_lengths(inputs, kernel_spatial_shape, strides, padding) # Extract useful fixed integer values from the inputs in_spec = _ConvSpec(dim_numbers.lhs_spec) rhs_spec = _ConvSpec(dim_numbers.rhs_spec) inputs_spatial_shape = in_spec.get_spatial(inputs.shape) n = in_spec.get_n(inputs.shape) c = in_spec.get_c(inputs.shape) in_h, in_w = inputs_spatial_shape ker_h, ker_w = kernel_spatial_shape pad_h, pad_w = padding s_h, s_w = strides if inputs_dilation not in (None, 1, (1, 1)): raise ValueError("`patches_second_moment` does not support input dilation.") if kernel_dilation not in (None, 1, (1, 1)): raise ValueError("`patches_second_moment` does not support kernel " "dilation.") if feature_group_count not in (1, in_spec.get_c(inputs.shape)): raise ValueError("`patches_second_moment` does not support " "`feature_group_count` different from 1 or the number of " "channels of the inputs.") if batch_group_count != 1: raise ValueError("PSM does not support `batch_group_count` different from " "1.") per_channel = feature_group_count != 1 # Sanity check if in_h + pad_h[0] + pad_h[1] < ker_h or in_w + pad_w[0] + pad_w[1] < ker_w: padded_h = in_h + pad_h[0] + pad_h[1] padded_w = in_w + pad_w[0] + pad_w[1] raise ValueError("The provided image has spatial padded shape " f"({padded_h}, {padded_w}) while the kernel has a larger " f"shape ({ker_h}, {ker_w}). This means a convolution is " "not possible.") # First we calculate the maximum number of times the kernel can be applied # into the image, including the padding and ignoring the stride. ker_max_h = in_h + pad_h[0] + pad_h[1] - ker_h + 1 ker_max_w = in_w + pad_w[0] + pad_w[1] - ker_w + 1 # Second we calculate the size of the image that is covered when performing # a VALID convolution with the kernel, provided the padding. out_h = _ceil(ker_max_h, s_h) * s_h - s_h + ker_h out_w = _ceil(ker_max_w, s_w) * s_w - s_w + ker_w # Finally, we potentially add extra padding on the right in order to make the # padded image sizes divisible by their strides. This is needed so we can use # later reshape the image into multiples of the strides, which allows us to # execute a strided slice via XLA's dynamic slice. Note that # in certain cases this could lead to negative padding, which is correct. # Example: image (9, 9), kernel (2, 2), strides (2, 2), padding (0, 0) # Then ker_max = 8, out_h = 8, padded_height = 8 and the padding is -1. padded_h = _ceil(out_h, s_h) * s_h padded_w = _ceil(out_w, s_w) * s_w # Actually pad the image (extra 0 for internal padding has to be added) extra_pad_h = (pad_h[0], padded_h - in_h - pad_h[0], 0) extra_pad_w = (pad_w[0], padded_w - in_w - pad_w[0], 0) spatial_padding = in_spec.create_shape( (0, 0, 0), (0, 0, 0), extra_pad_h, extra_pad_w) padded_image = lax.pad(inputs, jnp.asarray(0.0, dtype=inputs.dtype), spatial_padding) # Reshape the input based on strides # rhs_shape = [n, out_h // str_h, str_h, out_w // str_w, str_w, c] rhs_spec = in_spec.expand_spatial_axes() rhs_shape = rhs_spec.create_shape( n, c, padded_h // s_h, s_h, padded_w // s_w, s_w) # sizes = (n, rhs_h, 1, rhs_w, 1, c) rhs_h = (padded_h - ker_h) // s_h + 1 rhs_w = (padded_w - ker_w) // s_w + 1 sizes = rhs_spec.create_shape(n, c, rhs_h, 1, rhs_w, 1) # Broadcast the weighting function if weighting_array is not None: if weighting_array.ndim == inputs.ndim: shape = rhs_spec.create_shape(n, c, rhs_h, 1, rhs_w, 1) elif weighting_array.ndim == inputs.ndim - 1: shape = rhs_spec.create_shape(n, 1, rhs_h, 1, rhs_w, 1) elif weighting_array.ndim == 1: shape = rhs_spec.create_shape(n, 1, 1, 1, 1, 1) else: raise ValueError(f"`weighting_array` shape {weighting_array.shape} is " f"not compatible with the inputs shape {inputs.shape}" ".") reshaped_weighting_array = jnp.reshape(weighting_array, shape) else: reshaped_weighting_array = 1 def general_loop_body(i, image): reshaped_image = jnp.reshape(image, rhs_shape) # Slice the reshaped input iw = i % ker_w ih = i // ker_w # index = (0, ih // sh, ih % sh, iw // sw, iw % sw, 0) index = rhs_spec.create_shape( 0, 0, ih // s_h, ih % s_h, iw // s_w, iw % s_w) conv_rhs = _slice_array(reshaped_image, index, sizes) conv_rhs = conv_rhs * reshaped_weighting_array # Compute the correct padding for the convolution dilated_bound_h = 0 if rhs_h == 0 else (rhs_h - 1) * s_h + 1 dilated_bound_w = 0 if rhs_w == 0 else (rhs_w - 1) * s_w + 1 conv_pad_h = ker_h - (padded_h - dilated_bound_h + 1) conv_pad_w = ker_w - (padded_w - dilated_bound_w + 1) # Compute matrix update matrix_update = _the_conv4d( lhs=image, lhs_spec=in_spec, rhs=conv_rhs, rhs_spec=rhs_spec, pad_h=conv_pad_h, pad_w=conv_pad_w, stride_h=s_h, stride_w=s_w, per_channel=per_channel, precision=precision, ) # Compute vector update axis = tuple(i for i in range(len(rhs_spec)) if i != rhs_spec.c_axis) vector_update = jnp.sum(conv_rhs, axis=axis) vector_update = lax.broadcast_in_dim(vector_update, (1, 1, c), (2,)) return ih, iw, matrix_update, vector_update vector_shape = kernel_spatial_shape + (c,) leading_shape = kernel_spatial_shape if per_channel else vector_shape matrix_shape = leading_shape + vector_shape if unroll_loop: matrix_results, vector_results = zip( *[general_loop_body(ii, padded_image)[-2:] for ii in range(ker_h * ker_w)]) matrix_results = jnp.stack(matrix_results, axis=0) matrix_results = jnp.reshape(matrix_results, matrix_shape) vector_results = jnp.stack(vector_results, axis=0) vector_results = jnp.reshape(vector_results, vector_shape) return matrix_results, vector_results else: def loop_cond(args): return args[0] < ker_h * ker_w def loop_body(loop_inputs): i, image, matrix_result, vector_result = loop_inputs ih, iw, matrix_update, vector_update = general_loop_body(i, image) # Update matrix value indices = (ih, iw, 0, 0, 0) + (() if per_channel else (0,)) matrix_result = lax.dynamic_update_slice_p.bind( matrix_result, matrix_update, *indices) # Update vector value vector_result = lax.dynamic_update_slice_p.bind( vector_result, vector_update, ih, iw, 0) return i + 1, image, matrix_result, vector_result # Initialize loop states with zeros matrix_init = jnp.zeros(matrix_shape, dtype=inputs.dtype) vector_init = jnp.zeros(vector_shape, dtype=inputs.dtype) init_vals = (0, padded_image, matrix_init, vector_init) return lax.while_loop(loop_cond, loop_body, init_vals)[-2:]
kfac-jax-main
kfac_jax/_src/patches_second_moment.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
kfac-jax-main
kfac_jax/_src/__init__.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC losses and layers tagging Jax primitives.""" import types from typing import Any, Generic, Optional, Sequence, Type, TypeVar, Tuple, Union import jax from jax import core from jax.interpreters import batching as jax_batching # Types for annotation T = TypeVar("T") Array = jax.Array Arrays = Tuple[Array, ...] ArrayOrXla = TypeVar("ArrayOrXla", Array, jax.interpreters.xla.XlaOp) class LossTag(core.Primitive, Generic[T]): """A Jax primitive for tagging K-FAC losses. The primitive is no-op at runtime, however its goal is to tag (annotate) the Jax computation graph what expression exactly is the loss and what type of loss it represents. This is the only way for K-FAC to know how to compute the curvature matrix. """ # Whether the primitive returns multiple outputs (from core.Primitive) multiple_results = True def __init__( self, cls: Type[T], parameter_dependants: Sequence[str], parameter_independants: Sequence[str], ): """Initializes a loss tag primitive for the given :class:`~LossFunction` class. When the primitive is created, the constructor automatically registers it with the standard Jax machinery for differentiation, :func:`jax.vmap` and XLA lowering. For further details see please take a look at the JAX documentation on `primitives <https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html>`__. Args: cls: The corresponding class of :class:`~LossFunction` that this tag represents. parameter_dependants: The names of each of the parameter **dependent** inputs to the tag. parameter_independants: The names of each of the parameter **independent** inputs to the tag. """ super().__init__(cls.__name__ + "_tag") self._cls = cls self._parameter_dependants = tuple(parameter_dependants) self._parameter_independants = tuple(parameter_independants) jax.interpreters.mlir.register_lowering(self, self._mlir_lowering) jax.interpreters.ad.primitive_jvps[self] = self._jvp # This line defines how does the tag behave under vmap. It is required for # any primitive that can be used inside a vmap. The reason why we want to # allow this is two fold - one to not break user code when the tags are not # used at all, and two - to be able to define a network with code for a # single example which is the vmap-ed for a batch. jax_batching.primitive_batchers[self] = self._batching @property def parameter_dependants_names(self) -> Tuple[str, ...]: """The number of parameter dependent inputs to the tag primitive.""" return self._parameter_dependants @property def parameter_independants_names(self) -> Tuple[str, ...]: """The number of parameter **independent** inputs to the tag primitive.""" return self._parameter_independants @property def arguments_names(self): return self.parameter_dependants_names + self.parameter_independants_names def extract_parameter_dependants( self, *args: T, args_names: Sequence[str], ) -> Tuple[T, ...]: assert len(args) == len(args_names) arg_map = dict(zip(args_names, args)) return tuple(arg_map[name] for name in self.parameter_dependants_names) def loss(self, *args: Array, args_names: Sequence[str]) -> T: """Constructs an instance of the corresponding :class:`~LossFunction` class.""" assert len(args) == len(args_names) arg_map = dict(zip(args_names, args)) return self._cls(**arg_map) def get_outputs( self, *args: ArrayOrXla, args_names: Sequence[str], ) -> Tuple[ArrayOrXla, ...]: """Verifies that the number of arguments matches expectations.""" assert len(args) == len(args_names) return tuple(arg for name, arg in zip(args_names, args) if name in self.parameter_dependants_names) def impl(self, *operands: Array, args_names: Sequence[str]) -> Arrays: return self.get_outputs(*operands, args_names=args_names) def abstract_eval( self, *operands: Array, args_names: Sequence[str], ) -> Tuple[Arrays, jax.core.Effects]: return (self.get_outputs(*operands, args_names=args_names), jax.core.no_effects) def _mlir_lowering( self, context: jax.interpreters.mlir.LoweringRuleContext, *args, args_names: Sequence[str], ) -> Tuple[Any, ...]: """The XLA translation rule for this primitive (creates a no-op tuple).""" return self.get_outputs(*args, args_names=args_names) def _jvp( self, arg_values: Sequence[Array], arg_tangents: Sequence[Array], args_names: Sequence[str], ) -> Tuple[Arrays, Arrays]: """Computes the Jacobian-vector product for the primitive.""" if len(arg_values) != len(arg_tangents): raise ValueError("Values and tangents are not the same length.") primal_output = self.bind(*arg_values, args_names=tuple(args_names)) tangent_output = self.get_outputs(*arg_tangents, args_names=args_names) return primal_output, tangent_output def _batching( self, batched_args: Sequence[Array], batched_dims: Union[int, Tuple[int, ...]], args_names: Sequence[str], ) -> Tuple[Array, Union[int, Tuple[int, ...]]]: """Defines how the primitive behaves under :func:`jax.vmap`.""" return self.bind(*batched_args, args_names=tuple(args_names)), batched_dims class LayerTag(core.Primitive): """A Jax primitive for tagging K-FAC layers. The primitive is no-op at runtime, however its goal is to tag (annotate) the Jax computation graph what expressions represents a single unique layer type. This is the only way for K-FAC to know how to compute the curvature matrix. """ def __init__(self, name: str, num_inputs: int, num_outputs: int): """Initializes a layer tag primitive with the given name. Any layer tag primitive must have the following interface `layer_tag( *outputs, *inputs, *parameters, **kwargs)`. We refer collectively to ``inputs`` , ``outputs`` and ``parameters`` as operands. All operands must be Jax arrays, while any of the values in ``kwargs`` must be hashable fixed constants. When the primitive is created, the constructor automatically registers it with the standard Jax machinery for differentiation, :func:`jax.vmap` and XLA lowering. For further details see please take a look at the JAX documentation on `primitives <https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html>`__. Args: name: The name of the layer primitive. num_inputs: The number of inputs to the layer. num_outputs: The number of outputs to the layer. """ super().__init__(name) if num_outputs > 1: raise NotImplementedError( f"Only single outputs are supported, got: num_outputs={num_outputs}.") self._num_outputs = num_outputs self._num_inputs = num_inputs jax.interpreters.mlir.register_lowering(self, self._mlir_lowering) # pytype: disable=wrong-arg-types # numpy-scalars jax.interpreters.ad.deflinear(self, self._transpose) jax.interpreters.ad.primitive_transposes[self] = self._transpose # This line defines how does the tag behave under vmap. It is required for # any primitive that can be used inside a vmap. The reason why we want to # allow this is two fold - one to not break user code when the tags are not # used at all, and two - to be able to define a network with code for a # single example which is the vmap-ed for a batch. jax_batching.primitive_batchers[self] = self._batching @property def num_outputs(self) -> int: """The number of outputs of the layer tag that this primitive represents.""" return self._num_outputs @property def num_inputs(self) -> int: """The number of inputs of the layer tag that this primitive represents.""" return self._num_inputs def split_all_inputs( self, all_inputs: Sequence[T], ) -> Tuple[ Tuple[T, ...], Tuple[T, ...], Tuple[T, ...] ]: """Splits the operands of the primitive into ``(outputs, inputs, params)``.""" outputs = tuple(all_inputs[:self.num_outputs]) inputs = tuple(all_inputs[self.num_outputs:self.num_outputs + self.num_inputs]) params = tuple(all_inputs[self.num_outputs + self.num_inputs:]) return outputs, inputs, params def get_outputs(self, *operands: Array, **_: Any) -> Array: """Extracts the ``outputs`` of a layer from the operands of the primitive.""" outputs = self.split_all_inputs(operands)[0] assert self.num_outputs == len(outputs) == 1 return outputs[0] def _mlir_lowering( self, context: jax.interpreters.mlir.LoweringRuleContext, *args, **_: Any, ) -> Tuple[Any, ...]: """The XLA translation rule for this primitive - returns the ``outputs`` .""" # Need to return a sequence return (self.get_outputs(*args),) @classmethod def _transpose( cls, cotangent: Array, *operands: Array, **_: Any, ) -> Tuple[Union[Array, None], ...]: """Computes the cotangents of the operands from those of the primitive.""" del cls # not used return (cotangent,) + (None,) * (len(operands) - 1) def impl(self, *operands: Array, **_: Any) -> Array: return self.get_outputs(*operands) def abstract_eval(self, *operands: Array, **_: Any) -> Array: jax_version = ( jax.__version_info__ if hasattr(jax, "__version_info__") else tuple(map(int, jax.__version__.split(".")))) if jax_version > (0, 3, 4): return self.get_outputs(*operands), jax.core.no_effects # pytype: disable=bad-return-type # numpy-scalars return self.get_outputs(*operands) def _batching( self, batched_operands: Sequence[Array], batched_dims: Union[int, Tuple[int, ...]], **kwargs: Any ) -> Tuple[Array, int]: """Defines how the primitive behaves under :func:`jax.vmap`.""" return self.bind(*batched_operands, **kwargs), batched_dims[0] def generic_get_outputs( self: LayerTag, *operands: Array, ) -> Array: """Special logic for generic tag's ``get_outputs``.""" # The generic tags have no `inputs` and `outputs` so instead they return just # the parameters. assert self.num_inputs == self.num_outputs == 0 params = self.split_all_inputs(operands)[2] if len(params) != 1: raise ValueError("A generic tag can have only one parameter.") return params[0] generic = LayerTag(name="generic_tag", num_inputs=0, num_outputs=0) setattr(generic, "get_outputs", types.MethodType(generic_get_outputs, generic)) def register_generic(parameter: Array) -> Array: """Registers a generic tag around the provided parameter array.""" return generic.bind(parameter) dense = LayerTag(name="dense_tag", num_inputs=1, num_outputs=1) def register_dense( y: Array, x: Array, w: Array, b: Optional[Array] = None, **kwargs, ) -> Array: """Registers a dense layer: ``y = matmul(x, w) + b``.""" if b is None: return dense.bind(y, x, w, **kwargs) return dense.bind(y, x, w, b, **kwargs) conv2d = LayerTag(name="conv2d_tag", num_inputs=1, num_outputs=1) def register_conv2d( y: Array, x: Array, w: Array, b: Optional[Array] = None, **kwargs: Any ) -> Array: """Registers a 2d convolution layer: ``y = conv2d(x, w) + b``.""" if b is None: return conv2d.bind(y, x, w, **kwargs) return conv2d.bind(y, x, w, b, **kwargs) scale_and_shift = LayerTag( name="scale_and_shift_tag", num_inputs=1, num_outputs=1) def register_scale_and_shift( y: Array, x: Array, scale: Optional[Array] = None, shift: Optional[Array] = None, ) -> Array: """Registers a scale and shift layer: ``y = x * scale + shift``.""" if scale is not None and shift is not None: args = (scale, shift) elif scale is not None: args = (scale,) elif shift is not None: args = (shift,) else: raise ValueError("At least one of `scale` and `shift` must be provided.") return scale_and_shift.bind( y, x, *args, has_scale=scale is not None, has_shift=shift is not None) class LossTagEqn(core.JaxprEqn): """A class used only for annotation purposes.""" primitive: LossTag class LayerTagEqn(core.JaxprEqn): """A class used only for annotation purposes.""" primitive: LayerTag
kfac-jax-main
kfac_jax/_src/layers_and_loss_tags.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC curvature explicit and implicit estimators. Curvature matrices are always defined in terms of some single differentiable function of the parameters and inputs. In all cases in this module this quantity is not the output from the model function (usually provided as argument to the constructor of each curvature matrix), but is the sum of all losses (weighted accordingly) which have been registered with a loss tag in the computation graph of the model function. This quantity is referred to as the ``total_loss``. In this module there are three curvature matrices considered: ``H`` - the Hessian matrix ``F`` - the Fisher matrix ``G`` - The Generalized Gauss-Newton(GGN) matrix Vectors that are multiplied by a curvature matrix (or any of its matrix powers) are always represented as a PyTree structure, equivalent to the parameters of the model function. In all functions such vector is named ``parameter_structured_vector`` in the argument list. Factors of a matrix ``M`` are defined as matrices ``B`` such that ``BB^T = M``. If we have to left-multiply ``B`` with a vector ``v``, than ``v`` has the same format as if we have to multiply the whole curvature matrix ``M``. However the second size of ``B`` is not clearly defined (and can be different for the different curvature matrices). In all methods working with factors, e.g. if we need to right multiply ``B`` with a vector ``v`` or the result of left multiplying ``B`` by a parameter structured vector, then the provided vector ``v`` should be a list of lists of arrays. Each element of ``v`` corresponds to a single loss registered in the model function, and its elements should have the shapes as the corresponding ``loss.XXX_inner_shapes`` (XXX=Hessian, Fisher or GGN). In all function such vector is named ``loss_vectors`` in the argument list. See for example: www.cs.utoronto.ca/~jmartens/docs/HF_book_chapter.pdf and https://arxiv.org/abs/1412.1193 for more information about the Hessian, Fisher and GGN matrices and how to compute matrix-vector products. """ import abc import functools from typing import Any, Callable, Optional, Sequence, Mapping, Generic, TypeVar, Tuple, Union, Dict import jax from jax import scipy import jax.numpy as jnp from kfac_jax._src import curvature_blocks from kfac_jax._src import layers_and_loss_tags as tags from kfac_jax._src import loss_functions from kfac_jax._src import tracer from kfac_jax._src import utils import numpy as np # Types for annotation Array = utils.Array PRNGKey = utils.PRNGKey Numeric = utils.Numeric Scalar = utils.Scalar Shape = utils.Shape CurvatureBlockCtor = Callable[ [tags.LayerTagEqn, str], curvature_blocks.CurvatureBlock ] StateType = TypeVar("StateType") # Special global variables _ESTIMATION_MODES = ("fisher_gradients", "fisher_empirical", "fisher_exact", "fisher_curvature_prop", "ggn_exact", "ggn_curvature_prop") _DEFAULT_TAG_TO_BLOCK_CTOR: Dict[str, CurvatureBlockCtor] = dict( dense_tag=curvature_blocks.DenseTwoKroneckerFactored, conv2d_tag=curvature_blocks.Conv2DTwoKroneckerFactored, generic_tag=curvature_blocks.NaiveDiagonal, scale_and_shift_tag=curvature_blocks.ScaleAndShiftDiagonal, ) def get_default_tag_to_block_ctor( tag_name: str ) -> Optional[CurvatureBlockCtor]: """Returns the default curvature block constructor for the give tag name.""" return _DEFAULT_TAG_TO_BLOCK_CTOR.get(tag_name) def set_default_tag_to_block_ctor( tag_name: str, block_ctor: CurvatureBlockCtor ) -> None: """Sets the default curvature block constructor for the given tag.""" _DEFAULT_TAG_TO_BLOCK_CTOR[tag_name] = block_ctor def set_multi_default_tag_to_block_ctor( tags_to_block_ctor: Mapping[str, CurvatureBlockCtor] ): _DEFAULT_TAG_TO_BLOCK_CTOR.update(tags_to_block_ctor) class ImplicitExactCurvature: """Represents all exact curvature matrices never constructed explicitly.""" def __init__( self, func: utils.Func, params_index: int = 0, batch_size_extractor: Callable[[utils.Batch], Numeric] = utils.default_batch_size_extractor, ): """Initializes the ImplicitExactCurvature instance. Args: func: The model function, which should have at least one registered loss. params_index: The index of the parameters argument in arguments list of ``func``. batch_size_extractor: A function that takes as input the function arguments and returns the batch size for a single device. (Default: ``kfac.utils.default_batch_size_extractor``) """ self._loss_tags_vjp = tracer.loss_tags_vjp( func=func, params_index=params_index ) self._loss_tags_jvp = tracer.loss_tags_jvp( func=func, params_index=params_index, ) self._loss_tags_hvp = tracer.loss_tags_hvp( func=func, params_index=params_index, ) self._batch_size_extractor = batch_size_extractor def batch_size(self, func_args: utils.FuncArgs) -> Numeric: """The expected batch size given a list of loss instances.""" return self._batch_size_extractor(func_args[-1]) @classmethod def _multiply_loss_fisher( cls, losses: Sequence[loss_functions.NegativeLogProbLoss], loss_vectors: Sequence[Sequence[Array]] ) -> Tuple[Tuple[Array, ...], ...]: """Multiplies ``loss_vectors`` by the Fisher of the total loss.""" assert len(losses) == len(loss_vectors) return tuple(loss.multiply_fisher(vec) for loss, vec in zip(losses, loss_vectors)) @classmethod def _multiply_loss_ggn( cls, losses: Sequence[loss_functions.LossFunction], loss_vectors: Sequence[Sequence[Array]] ) -> Tuple[Tuple[Array, ...], ...]: """Multiplies ``loss_vectors`` by the GGN of the total loss.""" return tuple(loss.multiply_ggn(vec) for loss, vec in zip(losses, loss_vectors)) @classmethod def _multiply_loss_fisher_factor( cls, losses: Sequence[loss_functions.NegativeLogProbLoss], loss_inner_vectors: Sequence[Array], ) -> Tuple[Tuple[Array, ...], ...]: """Multiplies the vectors with the Fisher factors of each loss. Args: losses: A sequence of loss instances. loss_inner_vectors: A sequence of vectors, each corresponding to one instance of a loss in losses. Returns: The product of all vectors with the factors of the Fisher of each the losses. """ assert len(losses) == len(loss_inner_vectors) return tuple(loss.multiply_fisher_factor(vec) for loss, vec in zip(losses, loss_inner_vectors)) @classmethod def _multiply_loss_ggn_factor( cls, losses: Sequence[loss_functions.LossFunction], loss_inner_vectors: Sequence[Array], ) -> Tuple[Tuple[Array, ...], ...]: """Multiplies the vectors with the GGN factors of each loss. Args: losses: A sequence of loss instances. loss_inner_vectors: A sequence of vectors, each corresponding to one instance of a loss in losses. Returns: The product of all vectors with the factors of the GGN of each the losses. """ return tuple(loss.multiply_ggn_factor(vec) for loss, vec in zip(losses, loss_inner_vectors)) @classmethod def _multiply_loss_fisher_factor_transpose( cls, losses: Sequence[loss_functions.NegativeLogProbLoss], loss_vectors: Sequence[Sequence[Array]] ) -> Tuple[Array, ...]: """Multiplies the vectors with the transposed Fisher factors of each loss. Args: losses: A sequence of loss instances. loss_vectors: A sequence of vectors, each corresponding to one instance of a loss in losses. Returns: The product of all vectors with the factors of the Fisher of each the losses. """ assert len(losses) == len(loss_vectors) return tuple(loss.multiply_fisher_factor_transpose(vec) for loss, vec in zip(losses, loss_vectors)) @classmethod def _multiply_loss_ggn_factor_transpose( cls, losses: Sequence[loss_functions.LossFunction], loss_vectors: Sequence[Sequence[Array]] ) -> Tuple[Array, ...]: """Multiplies the vectors with the transposed GGN factors of each loss. Args: losses: A sequence of loss instances. loss_vectors: A sequence of vectors, each corresponding to one instance of a loss in losses. Returns: The product of all vectors with the factors of the GGN of each the losses. """ return tuple(loss.multiply_ggn_factor_transpose(vec) for loss, vec in zip(losses, loss_vectors)) @classmethod def _assert_losses_same( cls, losses1: Sequence[loss_functions.LossFunction], losses2: Sequence[loss_functions.LossFunction], ) -> None: """Asserts that the two losses sequence are equivalent.""" assert len(losses1) == len(losses2) for loss1, loss2 in zip(losses1, losses2): assert isinstance(loss1, type(loss2)) inputs1 = jax.tree_util.tree_leaves(loss1.parameter_dependants) inputs2 = jax.tree_util.tree_leaves(loss2.parameter_dependants) for in1, in2 in zip(inputs1, inputs2): assert in1.shape == in2.shape assert in1.dtype == in2.dtype @utils.auto_scope_method def multiply_hessian( self, func_args: utils.FuncArgs, parameter_structured_vector: utils.Params, ) -> utils.Params: """Multiplies the vector with the Hessian matrix of the total loss. Args: func_args: The inputs to the model function, on which to evaluate the Hessian matrix. parameter_structured_vector: The vector which to multiply with the Hessian matrix. Returns: The product ``Hv``. """ vector, _ = self._loss_tags_hvp(func_args, parameter_structured_vector) batch_size = self.batch_size(func_args) assert utils.abstract_objects_equal(parameter_structured_vector, vector) return utils.scalar_div(vector, batch_size) @utils.auto_scope_method def multiply_fisher( self, func_args: utils.FuncArgs, parameter_structured_vector: utils.Params, ) -> utils.Params: """Multiplies the vector with the Fisher matrix of the total loss. Args: func_args: The inputs to the model function, on which to evaluate the Fisher matrix. parameter_structured_vector: The vector which to multiply with the Fisher matrix. Returns: The product ``Fv``. """ losses: Sequence[loss_functions.NegativeLogProbLoss] losses, jacobian_vectors = self._loss_tags_jvp( func_args, parameter_structured_vector) if any(not isinstance(l, loss_functions.NegativeLogProbLoss) for l in losses): raise ValueError("To use `multiply_fisher` all registered losses must " "be a subclass of `NegativeLogProbLoss`.") _, vjp = self._loss_tags_vjp(func_args) self._assert_losses_same(losses, _) loss_fisher_jacobian_vectors = self._multiply_loss_fisher( losses, jacobian_vectors) vector = vjp(loss_fisher_jacobian_vectors) batch_size = self.batch_size(func_args) assert utils.abstract_objects_equal(parameter_structured_vector, vector) return utils.scalar_div(vector, batch_size) @utils.auto_scope_method def multiply_ggn( self, func_args: utils.FuncArgs, parameter_structured_vector: utils.Params, ) -> utils.Params: """Multiplies the vector with the GGN matrix of the total loss. Args: func_args: The inputs to the model function, on which to evaluate the GGN matrix. parameter_structured_vector: The vector which to multiply with the GGN matrix. Returns: The product ``Gv``. """ losses, jacobian_vectors = self._loss_tags_jvp( func_args, parameter_structured_vector) _, vjp = self._loss_tags_vjp(func_args) self._assert_losses_same(losses, _) loss_ggn_jacobian_vectors = self._multiply_loss_ggn( losses, jacobian_vectors) vector = vjp(loss_ggn_jacobian_vectors) batch_size = self.batch_size(func_args) assert utils.abstract_objects_equal(parameter_structured_vector, vector) return utils.scalar_div(vector, batch_size) @utils.auto_scope_method def multiply_fisher_factor_transpose( self, func_args: utils.FuncArgs, parameter_structured_vector: utils.Params, ) -> Tuple[Array, ...]: """Multiplies the vector with the transposed factor of the Fisher matrix. Args: func_args: The inputs to the model function, on which to evaluate the Fisher matrix. parameter_structured_vector: The vector which to multiply with the Fisher matrix. Returns: The product ``B^T v``, where ``F = BB^T``. """ losses: Sequence[loss_functions.NegativeLogProbLoss] losses, jacobian_vectors = self._loss_tags_jvp( func_args, parameter_structured_vector) if any(not isinstance(l, loss_functions.NegativeLogProbLoss) for l in losses): raise ValueError("To use `multiply_fisher` all registered losses must " "be a subclass of `NegativeLogProbLoss`.") loss_vectors = self._multiply_loss_fisher_factor_transpose( losses, jacobian_vectors) batch_size = self.batch_size(func_args) return utils.scalar_div(loss_vectors, jnp.sqrt(batch_size)) @utils.auto_scope_method def multiply_ggn_factor_transpose( self, func_args: utils.FuncArgs, parameter_structured_vector: utils.Params, ) -> Tuple[Array, ...]: """Multiplies the vector with the transposed factor of the GGN matrix. Args: func_args: The inputs to the model function, on which to evaluate the GGN matrix. parameter_structured_vector: The vector which to multiply with the GGN matrix. Returns: The product ``B^T v``, where ``G = BB^T``. """ losses, jacobian_vectors = self._loss_tags_jvp( func_args, parameter_structured_vector) vectors = self._multiply_loss_ggn_factor_transpose(losses, jacobian_vectors) batch_size = self.batch_size(func_args) return utils.scalar_div(vectors, jnp.sqrt(batch_size)) @utils.auto_scope_method def multiply_fisher_factor( self, func_args: utils.FuncArgs, loss_inner_vectors: Sequence[Array], ) -> utils.Params: """Multiplies the vector with the factor of the Fisher matrix. Args: func_args: The inputs to the model function, on which to evaluate the Fisher matrix. loss_inner_vectors: The vector which to multiply with the Fisher factor matrix. Returns: The product ``Bv``, where ``F = BB^T``. """ losses: Sequence[loss_functions.NegativeLogProbLoss] losses, vjp = self._loss_tags_vjp(func_args) if any(not isinstance(l, loss_functions.NegativeLogProbLoss) for l in losses): raise ValueError("To use `multiply_fisher` all registered losses must " "be a subclass of `NegativeLogProbLoss`.") fisher_factor_vectors = self._multiply_loss_fisher_factor( losses, loss_inner_vectors) vectors = vjp(fisher_factor_vectors) batch_size = self.batch_size(func_args) return utils.scalar_div(vectors, jnp.sqrt(batch_size)) @utils.auto_scope_method def multiply_ggn_factor( self, func_args: utils.FuncArgs, loss_inner_vectors: Sequence[Array], ) -> utils.Params: """Multiplies the vector with the factor of the GGN matrix. Args: func_args: The inputs to the model function, on which to evaluate the GGN matrix. loss_inner_vectors: The vector which to multiply with the GGN factor matrix. Returns: The product ``Bv``, where ``G = BB^T``. """ losses, vjp = self._loss_tags_vjp(func_args) ggn_factor_vectors = self._multiply_loss_ggn_factor( losses, loss_inner_vectors) vectors = vjp(ggn_factor_vectors) batch_size = self.batch_size(func_args) return utils.scalar_div(vectors, jnp.sqrt(batch_size)) @utils.auto_scope_method def multiply_jacobian_transpose( self, func_args: utils.FuncArgs, loss_input_vectors: Sequence[Sequence[Array]], ) -> utils.Params: """Multiplies a vector by the model's transposed Jacobian. Args: func_args: The inputs to the model function. loss_input_vectors: A sequence over losses of sequences of arrays that are the size of the loss's inputs. This represents the vector to be multiplied. Returns: The product ``J^T v``, where ``J`` is the model's Jacobian and ``v`` is is given by ``loss_inner_vectors``. """ _, vjp = self._loss_tags_vjp(func_args) return vjp(loss_input_vectors) def get_loss_inner_vector_shapes_and_batch_size( self, func_args: utils.FuncArgs, mode: str ) -> Tuple[Tuple[Shape, ...], int]: """Get shapes of loss inner vectors, and the batch size. Args: func_args: The inputs to the model function. mode: A string representing the type of curvature matrix for the loss inner vectors. Can be "fisher" or "ggn". Returns: Shapes of loss inner vectors in a tuple, and the batch size as an int. """ losses, _ = self._loss_tags_vjp(func_args) # pytype: disable=attribute-error # always-use-return-annotations batch_size = self.batch_size(func_args) if mode == "fisher": return (tuple(loss.fisher_factor_inner_shape for loss in losses), # pytype: disable=bad-return-type # numpy-scalars batch_size) elif mode == "ggn": return tuple(loss.ggn_factor_inner_shape for loss in losses), batch_size # pytype: disable=bad-return-type # numpy-scalars else: raise ValueError(f"Unrecognized mode: {mode}") def get_loss_input_shapes_and_batch_size( self, func_args: utils.FuncArgs ) -> Tuple[Tuple[Tuple[Shape, ...], ...], int]: """Get shapes of loss input vectors, and the batch size. Args: func_args: The inputs to the model function. Returns: A tuple over losses of tuples containing the shapes of their different inputs, and the batch size (as an int). """ losses, _ = self._loss_tags_vjp(func_args) # pytype: disable=attribute-error # always-use-return-annotations batch_size = self.batch_size(func_args) return (tuple(tuple(x.shape for x in loss.parameter_dependants) # pytype: disable=bad-return-type # numpy-scalars for loss in losses), batch_size) class CurvatureEstimator(Generic[StateType], utils.Finalizable): """An abstract curvature estimator class. This is a class that abstracts away the process of estimating a curvature matrix and provides many useful functionalities for interacting with it. The state of the estimator contains two parts: the estimated curvature internal representation, as well as potential cached values of different expression involving the curvature matrix (for example matrix powers). The cached values are only updated once you call the method :func:`~CurvatureEstimator.update_cache`. Multiple methods contain the keyword argument ``use_cached`` which specify whether you want to compute the corresponding expression using the current curvature estimate or used a cached version. Attributes: func: The model evaluation function. params_index: The index of the parameters argument in arguments list of ``func``. default_estimation_mode: The estimation mode which to use by default when calling :func:`~CurvatureEstimator.update_curvature_matrix_estimate`. """ def __init__( self, func: utils.Func, params_index: int = 0, default_estimation_mode: str = "fisher_gradients", ): """Initializes the CurvatureEstimator instance. Args: func: The model function, which should have at least one registered loss. params_index: The index of the parameters argument in arguments list of ``func``. default_estimation_mode: The estimation mode which to use by default when calling :func:`~CurvatureEstimator.update_curvature_matrix_estimate`. """ if default_estimation_mode not in _ESTIMATION_MODES: raise ValueError("Unrecognised default_estimation_mode " f"{default_estimation_mode}.") super().__init__() self.func = func self.params_index = params_index self.default_estimation_mode = default_estimation_mode @property def default_mat_type(self) -> str: """The type of matrix that this estimator is approximating.""" idx = self.default_estimation_mode.index("_") return self.default_estimation_mode[:idx] @property @abc.abstractmethod def dim(self) -> int: """The number of elements of all parameter variables together.""" @abc.abstractmethod def init( self, rng: PRNGKey, func_args: utils.FuncArgs, exact_powers_to_cache: Optional[curvature_blocks.ScalarOrSequence], approx_powers_to_cache: Optional[curvature_blocks.ScalarOrSequence], cache_eigenvalues: bool = False, ) -> StateType: """Initializes the state for the estimator. Args: rng: The PRNGKey which to be used for any randomness of the initialization. func_args: Example function arguments, which to be used to trace the model function and initialize the state. exact_powers_to_cache: A single value, or multiple values in a list, which specify which exact matrix powers that each block should be caching. Matrix powers for which you intend to call ``self.multiply_matrix_power``, ``self.multiply_inverse`` or ``self.multiply`` with ``exact_power=True`` and ``use_cached=True`` must be provided here. approx_powers_to_cache: A single value, or multiple values in a list, which specify approximate matrix powers that each block should be caching. Matrix powers for which you intend to call ``self.multiply_matrix_power``, ``self.multiply_inverse`` or ``self.multiply`` with ``exact_power=False`` and ``use_cached=True`` must be provided here. cache_eigenvalues: Specifies whether each block should be caching the eigenvalues of its approximate curvature. Returns: The initialized state of the estimator. """ @abc.abstractmethod def sync( self, state: StateType, pmap_axis_name: Optional[str], ) -> StateType: """Synchronizes across devices the state of the estimator.""" @abc.abstractmethod def multiply_matpower( self, state: StateType, parameter_structured_vector: utils.Params, identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, pmap_axis_name: Optional[str], ) -> utils.Params: """Computes ``(CurvatureMatrix + identity_weight I)**power`` times ``vector``. Args: state: The state of the estimator. parameter_structured_vector: A vector in the same structure as the parameters of the model. identity_weight: Specifies the weight of the identity element that is added to the curvature matrix. This can be either a scalar value or a list/tuple of scalar in which case each value specifies the weight individually for each block. power: The power to which you want to raise the matrix ``(EstimateCurvature + identity_weight I)``. exact_power: When set to ``True`` the matrix power of ``EstimateCurvature + identity_weight I`` is computed exactly. Otherwise this method might use a cheaper approximation, which *may* vary across different blocks. use_cached: Whether to use a cached (and possibly stale) version of the curvature matrix estimate. pmap_axis_name: The name of any pmap axis, which will be used for aggregating any computed values over multiple devices, as well as parallelizing the computation over devices in a block-wise fashion. Returns: A parameter structured vector containing the product. """ def multiply( self, state: StateType, parameter_structured_vector: utils.Params, identity_weight: Numeric, exact_power: bool, use_cached: bool, pmap_axis_name: Optional[str], ) -> utils.Params: """Computes ``(CurvatureMatrix + identity_weight I)`` times ``vector``.""" return self.multiply_matpower( state=state, parameter_structured_vector=parameter_structured_vector, identity_weight=identity_weight, power=1, exact_power=exact_power, use_cached=use_cached, pmap_axis_name=pmap_axis_name ) def multiply_inverse( self, state: StateType, parameter_structured_vector: utils.Params, identity_weight: Numeric, exact_power: bool, use_cached: bool, pmap_axis_name: Optional[str], ) -> utils.Params: """Computes ``(CurvatureMatrix + identity_weight I)^-1`` times ``vector``.""" return self.multiply_matpower( state=state, parameter_structured_vector=parameter_structured_vector, identity_weight=identity_weight, power=-1, exact_power=exact_power, use_cached=use_cached, pmap_axis_name=pmap_axis_name ) @abc.abstractmethod def eigenvalues( self, state: StateType, use_cached: bool, ) -> Array: """Computes the eigenvalues of the curvature matrix. Args: state: The state of the estimator. use_cached: Whether to use a cached versions of the eigenvalues or to use the most recent curvature estimates to compute them. The cached version are going to be *at least* as fresh as the last time you called :func:`~CurvatureEstimator.update_cache` with ``eigenvalues=True``. Returns: A single array containing the eigenvalues of the curvature matrix. """ @abc.abstractmethod def update_curvature_matrix_estimate( self, state: StateType, ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, rng: PRNGKey, func_args: utils.FuncArgs, estimation_mode: Optional[str] = None, ) -> StateType: """Updates the estimator's curvature estimates. Args: state: The state of the estimator to update. ema_old: Specifies the weight of the old value when computing the updated estimate in the moving average. ema_new: Specifies the weight of the new value when computing the updated estimate in the moving average. batch_size: The batch size. rng: A PRNGKey to be used for any potential sampling in the estimation process. func_args: A structure with the values of the inputs to the traced function (the ``tagged_func`` passed into the constructor) which to be used for the estimation process. Should have the same structure as the argument ``func_args`` passed in the constructor. estimation_mode: The type of curvature estimator to use. By default (e.g. if ``None``) will use ``self.default_estimation_mode``. One of: * fisher_gradients - the basic estimation approach from the original K-FAC paper. * fisher_curvature_prop - method which estimates the Fisher using self-products of random 1/-1 vectors times "half-factors" of the Fisher, as described `here <https://arxiv.org/abs/1206.6464>`__. * fisher_exact - is the obvious generalization of Curvature Propagation to compute the exact Fisher (modulo any additional diagonal or Kronecker approximations) by looping over one-hot vectors for each coordinate of the output instead of using 1/-1 vectors. It is more expensive to compute than the other three options by a factor equal to the output dimension, roughly speaking. * fisher_empirical - computes the 'empirical' Fisher information matrix (which uses the data's distribution for the targets, as opposed to the true Fisher which uses the model's distribution) and requires that each registered loss have specified targets. * ggn_curvature_prop - Analogous to fisher_curvature_prop, but estimates the Generalized Gauss-Newton matrix (GGN). * ggn_exact - Analogous to fisher_exact, but estimates the Generalized Gauss-Newton matrix (GGN). Returns: The updated state. """ @abc.abstractmethod def update_cache( self, state: StateType, identity_weight: Numeric, exact_powers: Optional[curvature_blocks.ScalarOrSequence], approx_powers: Optional[curvature_blocks.ScalarOrSequence], eigenvalues: bool, pmap_axis_name: Optional[str], ) -> StateType: """Updates the estimator cached values. Args: state: The state of the estimator to update. identity_weight: Specified the weight of the identity element that is added to the curvature matrix. This can be either a scalar value or a list/tuple of scalar in which case each value specifies the weight individually for each block. exact_powers: Specifies which exact matrix powers in the cache should be updated. approx_powers: Specifies which approximate matrix powers in the cache should be updated. eigenvalues: Specifies whether to update the cached eigenvalues of each block. If they have not been cached before, this will create an entry with them in the block's cache. pmap_axis_name: The name of any pmap axis, which will be used for aggregating any computed values over multiple devices, as well as parallelizing the computation over devices in a block-wise fashion. Returns: The updated state. """ @abc.abstractmethod def to_dense_matrix(self, state: StateType) -> Array: """Returns an explicit dense array representing the curvature matrix.""" class BlockDiagonalCurvature( CurvatureEstimator["BlockDiagonalCurvature.State"]): """Block diagonal curvature estimator class.""" @utils.register_state_class class State(utils.State): """Persistent state of the estimator. Attributes: synced: A Jax boolean, specifying if the state has been synced across devices (this does not include the cache, which is never explicitly synced). blocks_states: A tuple of the state of the estimator corresponding to each block. """ synced: Array blocks_states: Tuple[curvature_blocks.CurvatureBlock.State, ...] def __init__( self, func: utils.Func, params_index: int = 0, default_estimation_mode: str = "fisher_gradients", layer_tag_to_block_ctor: Optional[Mapping[str, CurvatureBlockCtor]] = None, index_to_block_ctor: Optional[Mapping[Tuple[int, ...], CurvatureBlockCtor]] = None, auto_register_tags: bool = True, distributed_multiplies: bool = True, distributed_cache_updates: bool = True, num_samples: int = 1, should_vmap_samples: bool = False, **auto_register_kwargs: Any, ): """Initializes the curvature instance. Args: func: The model function, which should have at least one registered loss. params_index: The index of the parameters argument in arguments list of ``func``. default_estimation_mode: The estimation mode which to use by default when calling ``self.update_curvature_matrix_estimate``. layer_tag_to_block_ctor: An optional dict mapping tags to specific classes of block approximations, which to override the default ones. index_to_block_ctor: An optional dict mapping a specific block parameter indices to specific classes of block approximation, which to override the default ones. To get the correct indices check ``estimator.indices_to_block_map``. auto_register_tags: Whether to automatically register layer tags for parameters that have not been manually registered. For further details see ``tag_graph_matcher.auto_register_tags``. distributed_multiplies: Whether to distribute the curvature matrix multiplication operations across the different devices in a block-wise fashion. If False, each device will (redundantly) perform the operations for all of the blocks. distributed_cache_updates: Whether to distribute the cache update multiplication operations across the different devices in a block-wise fashion. If False, each device will (redundantly) perform the operations for all of the blocks. num_samples: Number of samples (per case) to use when computing stochastic curvature matrix estimates. This option is only used when ``estimation_mode == 'fisher_gradients'`` or ``estimation_mode == '[fisher,ggn]_curvature_prop'``. should_vmap_samples: Whether to use ``jax.vmap`` to compute samples when ``num_samples > 1``. **auto_register_kwargs: Any keyword arguments to pass to into the auto registration function. """ super().__init__(func, params_index, default_estimation_mode) self._index_to_block_ctor = index_to_block_ctor or dict() self._layer_tag_to_block_ctor = layer_tag_to_block_ctor or dict() self._auto_register_tags = auto_register_tags self._auto_register_kwargs = auto_register_kwargs self._vjp = tracer.layer_tags_vjp( func=func, params_index=params_index, auto_register_tags=auto_register_tags, **auto_register_kwargs ) # Initialized during finalization self._jaxpr: Optional[tracer.ProcessedJaxpr] = None self._blocks: Optional[Tuple[curvature_blocks.CurvatureBlock]] = None self._distributed_multiplies = distributed_multiplies self._distributed_cache_updates = distributed_cache_updates self._num_samples = num_samples self._should_vmap_samples = should_vmap_samples def _check_finalized(self): if not self.finalized: raise ValueError("The estimator has not been finalized. Call `init` or " "`finalize` first.") def _create_blocks(self): """Creates all the curvature blocks instances in ``self._blocks``.""" assert self._jaxpr is not None blocks_list = [] counters = dict() for tag_eqn, idx in zip(self._jaxpr.layer_tags, self._jaxpr.layer_indices): # pytype: disable=attribute-error # always-use-return-annotations # Correctly get the block class if idx in self._index_to_block_ctor: cls = self._index_to_block_ctor[idx] elif tag_eqn.primitive.name in self._layer_tag_to_block_ctor: cls = self._layer_tag_to_block_ctor[tag_eqn.primitive.name] elif tag_eqn.primitive.name in _DEFAULT_TAG_TO_BLOCK_CTOR: cls = _DEFAULT_TAG_TO_BLOCK_CTOR[tag_eqn.primitive.name] else: raise ValueError(f"Did not find anywhere a block class for tag " f"{tag_eqn.primitive.name}.") if "name" in tag_eqn.params: block_name = tag_eqn.params["name"] assert block_name not in counters counters[block_name] = 1 else: if isinstance(cls, functools.partial): block_name = cls.func.__name__ else: block_name = cls.__name__ c = counters.get(block_name, 0) counters[block_name] = c + 1 block_name += "__" + str(c) blocks_list.append(cls(tag_eqn, block_name)) self._blocks = tuple(blocks_list) @property def blocks(self) -> Optional[Tuple[curvature_blocks.CurvatureBlock]]: """The tuple of :class:`~CurvatureBlock` instances used for each layer.""" self._check_finalized() return self._blocks @property def num_blocks(self) -> int: """The number of separate blocks that this estimator has.""" return len(self.blocks) @property def block_dims(self) -> Shape: """The number of elements of all parameter variables for each block.""" return tuple(block.dim for block in self.blocks) @property def dim(self) -> int: """The number of elements of all parameter variables together.""" return sum(self.block_dims) @property def jaxpr(self) -> tracer.ProcessedJaxpr: self._check_finalized() return self._jaxpr # pytype: disable=bad-return-type # always-use-return-annotations @property def params_structure_vector_of_indices(self) -> utils.Params: """A tree structure with parameters replaced by their indices.""" return jax.tree_util.tree_unflatten( self.jaxpr.params_tree, range(len(self.jaxpr.params_vars_flat)) ) @property def indices_to_block_map( self ) -> Mapping[Tuple[int, ...], curvature_blocks.CurvatureBlock]: """A mapping of parameter indices to their associated blocks.""" return dict(zip(self.jaxpr.layer_indices, self.blocks)) @property def params_block_index(self) -> utils.Params: """A structure, which shows each parameter to which block it corresponds. Returns: A parameter-like structure, where each parameter is replaced by an integer index. This index specifies the block (found by ``self.blocks[index]``) which approximates the part of the curvature matrix associated with the parameter. """ params_block_index: list[Optional[int]] = [None] * self.num_params_variables for i, block_indices in enumerate(self.jaxpr.layer_indices): for index in block_indices: params_block_index[index] = i assert all(x is not None for x in params_block_index) return jax.tree_util.tree_unflatten( self.jaxpr.params_tree, params_block_index) @property def num_params_variables(self) -> int: """The number of separate parameter variables of the model.""" return len(self.jaxpr.params_vars_flat) @utils.auto_scope_method def _compute_losses_vjp(self, func_args: utils.FuncArgs): """Computes all model statistics needed for estimating the curvature.""" return self._vjp(func_args) def params_vector_to_blocks_vectors( self, parameter_structured_vector: utils.Params, ) -> Tuple[Tuple[Array, ...]]: """Splits the parameters to values for each corresponding block.""" params_values_flat = jax.tree_util.tree_leaves(parameter_structured_vector) blocks_vectors: list[Tuple[Array, ...]] = [] for indices in self.jaxpr.layer_indices: blocks_vectors.append(tuple(params_values_flat[i] for i in indices)) return tuple(blocks_vectors) def blocks_vectors_to_params_vector( self, blocks_vectors: Sequence[Sequence[Array]], ) -> utils.Params: """Reverses the effect of ``self.vectors_to_blocks``.""" if len(blocks_vectors) != self.num_blocks: raise ValueError("Incorrect number of block vectors. Expected " f"{self.num_blocks}, but got {len(blocks_vectors)}.") values_flat: list[Optional[Array]] = [None] * self.num_params_variables for idx, (indices, vectors) in enumerate( zip(self.jaxpr.layer_indices, blocks_vectors)): if len(indices) != len(vectors): raise ValueError(f"Expected len(block_vectors[{idx}])=={len(indices)}, " f"not {len(vectors)}.") for i, v in zip(indices, vectors): assert values_flat[i] is None values_flat[i] = v assert not any(v is None for v in values_flat) return jax.tree_util.tree_unflatten(self.jaxpr.params_tree, values_flat) def _finalize(self, func_args: utils.FuncArgs): self._jaxpr = self._vjp(func_args, return_only_jaxpr=True) # pytype: disable=annotation-type-mismatch # always-use-return-annotations self._create_blocks() @utils.auto_scope_method def init( self, rng: PRNGKey, func_args: utils.FuncArgs, exact_powers_to_cache: Optional[curvature_blocks.ScalarOrSequence], approx_powers_to_cache: Optional[curvature_blocks.ScalarOrSequence], cache_eigenvalues: bool = False, ) -> "BlockDiagonalCurvature.State": if not self.finalized: self.finalize(func_args) blocks_init = [] blocks_rng = jax.random.split(rng, self.num_blocks) for block, block_rng in zip(self.blocks, blocks_rng): block_init = block.init( rng=block_rng, exact_powers_to_cache=exact_powers_to_cache, approx_powers_to_cache=approx_powers_to_cache, cache_eigenvalues=cache_eigenvalues) blocks_init.append(block_init) return BlockDiagonalCurvature.State( synced=jnp.asarray(True), blocks_states=tuple(blocks_init), ) def _sync_state( self, state: "BlockDiagonalCurvature.State", pmap_axis_name: Optional[str], ) -> "BlockDiagonalCurvature.State": block_states = [] for block, block_state in zip(self.blocks, state.blocks_states): block_states.append(block.sync(block_state.copy(), pmap_axis_name)) return BlockDiagonalCurvature.State( synced=jnp.asarray(True), blocks_states=tuple(block_states), ) @utils.auto_scope_method def sync( self, state: "BlockDiagonalCurvature.State", pmap_axis_name: Optional[str], ) -> "BlockDiagonalCurvature.State": return jax.lax.cond( state.synced, lambda s: s, functools.partial(self._sync_state, pmap_axis_name=pmap_axis_name), state, ) @utils.auto_scope_method def multiply_matpower( self, state: "BlockDiagonalCurvature.State", parameter_structured_vector: utils.Params, identity_weight: Union[Numeric, Sequence[Numeric]], power: Scalar, exact_power: bool, use_cached: bool, pmap_axis_name: Optional[str], ) -> utils.Params: blocks_vectors = self.params_vector_to_blocks_vectors( parameter_structured_vector) identity_weight = utils.to_tuple_or_repeat(identity_weight, self.num_blocks) thunks = [] for block, block_state, block_vector, block_identity_weight in zip( self.blocks, state.blocks_states, blocks_vectors, identity_weight): thunks.append( functools.partial( block.multiply_matpower, state=block_state, vector=block_vector, identity_weight=block_identity_weight, power=power, exact_power=exact_power, use_cached=use_cached, ) ) if self._distributed_multiplies and pmap_axis_name is not None: result = utils.distribute_thunks(thunks, pmap_axis_name) else: result = tuple(thunk() for thunk in thunks) parameter_structured_result = self.blocks_vectors_to_params_vector(result) assert utils.abstract_objects_equal( parameter_structured_vector, parameter_structured_result) return parameter_structured_result @utils.auto_scope_method def block_eigenvalues( self, state: "BlockDiagonalCurvature.State", use_cached: bool, ) -> Tuple[Array, ...]: """Computes the eigenvalues for each block of the curvature estimator. Args: state: The state of the estimator. use_cached: Whether to use a cached versions of the eigenvalues or to use the most recent curvature estimates to compute them. The cached version are going to be *at least* as fresh as the last time you called :func:`~CurvatureEstimator.update_cache` with ``eigenvalues=True``. Returns: A tuple of arrays containing the eigenvalues for each block. The order of this tuple corresponds to the ordering of ``self.blocks``. To understand which parameters correspond to which block you can call ``self.parameters_block_index``. """ return tuple(block.eigenvalues(b_state, use_cached=use_cached) for block, b_state in zip(self.blocks, state.blocks_states)) @utils.auto_scope_method def eigenvalues( self, state: "BlockDiagonalCurvature.State", use_cached: bool, ) -> Array: blocks_eigenvalues = self.block_eigenvalues(state, use_cached) return jnp.concatenate(blocks_eigenvalues, axis=0) @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: "BlockDiagonalCurvature.State", ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, rng: PRNGKey, func_args: utils.FuncArgs, estimation_mode: Optional[str] = None, ) -> "BlockDiagonalCurvature.State": if not self.finalized: self.finalize(func_args) estimation_mode = estimation_mode or self.default_estimation_mode # Compute the losses and the VJP function from the function inputs losses, losses_vjp = self._compute_losses_vjp(func_args) if "fisher" in estimation_mode: if any(not isinstance(l, loss_functions.NegativeLogProbLoss) for l in losses): raise ValueError( f"One of the losses in the function is not an instance of " f"`loss_functions.NegativeLogProbLoss`, which is incompatible " f"with the estimation mode provided - {estimation_mode}.") # Helper function that updates the blocks given a vjp vector def update_blocks(vjp_vec_, state_, ema_old_, ema_new_): blocks_info_ = losses_vjp(vjp_vec_) assert len(blocks_info_) == self.num_blocks new_state = [] for block_, block_state_, block_info_ in zip( self.blocks, state_.blocks_states, blocks_info_): new_state.append(block_.update_curvature_matrix_estimate( block_state_, block_info_, ema_old_, ema_new_, batch_size)) return BlockDiagonalCurvature.State( synced=jnp.asarray(False), blocks_states=tuple(new_state), ) def maybe_do_multiple_updates(update_func): if self._num_samples > 1 and self._should_vmap_samples: def f(rng_i): return update_func(state, rng_i, ema_old) states = jax.vmap(f)(jax.random.split(rng, self._num_samples)) # This implementation is quick and hacky and might break in the future. return jax.tree_util.tree_map( lambda x: ( # pylint: disable=g-long-lambda jnp.mean(x, axis=0) if jnp.issubdtype(x.dtype, jnp.floating) else x[0]), states) elif self._num_samples > 1: def f(carry, rng_i): state_i, ema_old_i = carry new_state_i = update_func(state_i, rng_i, ema_old_i) return (new_state_i, jnp.ones_like(ema_old_i)), None (new_state, _), _ = jax.lax.scan( f, init=(state, jnp.asarray(ema_old)), xs=jax.random.split(rng, self._num_samples) ) return new_state else: return update_func(state, rng, ema_old) if estimation_mode == "fisher_gradients": def update_func(state_i, rng_i, ema_old_i): keys = jax.random.split( rng_i, len(losses)) if len(losses) > 1 else [rng_i] vjp_vec = tuple( loss.grad_of_evaluate_on_sample(key, coefficient_mode="sqrt") for loss, key in zip(losses, keys)) return update_blocks(vjp_vec, state_i, ema_old_i, ema_new) return maybe_do_multiple_updates(update_func) elif estimation_mode == "fisher_empirical": vjp_vec = tuple( loss.grad_of_evaluate(None, coefficient_mode="regular") for loss in losses) return update_blocks(vjp_vec, state, ema_old, ema_new) elif estimation_mode in ("fisher_curvature_prop", "ggn_curvature_prop"): def update_func(state_i, rng_i, ema_old_i): keys = jax.random.split( rng_i, len(losses)) if len(losses) > 1 else [rng_i] vjp_vec = [] for loss, key in zip(losses, keys): if estimation_mode == "fisher_curvature_prop": shape = loss.fisher_factor_inner_shape random_b = jax.random.bernoulli(key, shape=shape) vjp_vec.append(loss.multiply_fisher_factor(random_b * 2.0 - 1.0)) else: shape = loss.ggn_factor_inner_shape random_b = jax.random.bernoulli(key, shape=shape) vjp_vec.append(loss.multiply_ggn_factor(random_b * 2.0 - 1.0)) return update_blocks(tuple(vjp_vec), state_i, ema_old_i, ema_new) return maybe_do_multiple_updates(update_func) elif estimation_mode in ("fisher_exact", "ggn_exact"): # We use the following trick to simulate summation. The equation is: # estimate = ema_old * estimate + ema_new * (sum_i estimate_index_i^2) # weight = ema_old * weight + ema_new # Instead we update the estimate n times with the following updates: # for k = 1 # estimate_k = ema_old * estimate + (ema_new/n) * n*estimate_index_k^2 # weight_k = ema_old * weight + (ema_new/n) # for k > 1: # estimate_k = 1.0 * estimate_k-1 + (ema_new/n) * n*estimate_index_k^2 # weight_k = 1.0 * weight_k-1 + (ema_new/n) # Which is mathematically equivalent to the original version. zero_tangents = jax.tree_util.tree_map( jnp.zeros_like, list(loss.parameter_dependants for loss in losses)) if estimation_mode == "fisher_exact": shapes = [l.fisher_factor_inner_shape[1:] for l in losses] else: shapes = [l.ggn_factor_inner_shape[1:] for l in losses] total_num_indices = sum(sum(s) for s in shapes) ema_new = ema_new / total_num_indices # For now we support only inner shapes of 1 dimension, hence below the # (loss_num_indices,). assert all(len(s) == 1 for s in shapes) for i, (loss, (loss_num_indices,)) in enumerate(zip(losses, shapes)): for index in range(loss_num_indices): vjp_vec = zero_tangents.copy() if estimation_mode == "fisher_exact": vjp_vec[i] = loss.multiply_fisher_factor_replicated_one_hot([index]) else: vjp_vec[i] = loss.multiply_ggn_factor_replicated_one_hot([index]) if isinstance(vjp_vec[i], Array): # In the special case of only one parameter, it still needs to be a # tuple for the tangents. vjp_vec[i] = (vjp_vec[i],) vjp_vec[i] = jax.tree_util.tree_map( lambda x: x * jnp.sqrt(total_num_indices), vjp_vec[i]) state = update_blocks(tuple(vjp_vec), state, ema_old, ema_new) ema_old = 1.0 return state else: raise ValueError(f"Unrecognised estimation_mode {estimation_mode}.") @utils.auto_scope_method def update_cache( self, state: "BlockDiagonalCurvature.State", identity_weight: Union[Numeric, Sequence[Numeric]], exact_powers: Optional[curvature_blocks.ScalarOrSequence], approx_powers: Optional[curvature_blocks.ScalarOrSequence], eigenvalues: bool, pmap_axis_name: Optional[str], ) -> "BlockDiagonalCurvature.State": identity_weight = utils.to_tuple_or_repeat(identity_weight, self.num_blocks) thunks = [] for block, block_state, block_identity_weight in zip(self.blocks, state.blocks_states, identity_weight): thunks.append( functools.partial( block.update_cache, state=block_state, identity_weight=block_identity_weight, exact_powers=exact_powers, approx_powers=approx_powers, eigenvalues=eigenvalues, ) ) if self._distributed_cache_updates and pmap_axis_name is not None: assert utils.in_pmap(pmap_axis_name) def filter_outputs(thunk, vals): # We must precompute the matches outside of the thunk itself, as the # thunk will be traced separately from the current compiled context # (since it's called within a lax.switch statement). matches = jax.tree_util.tree_map(lambda o, v: o is v, thunk(), vals) def new_thunk(): return jax.tree_util.tree_map( lambda o, m: None if m else o, thunk(), matches ) return new_thunk # Create new thunks that only return the state arrays that they actually # modify. This should reduce the communication costs associated with the # syncs performed by utils.distribute_thunks. filtered_thunks = tuple( filter_outputs(thunk, block_state) for thunk, block_state in zip(thunks, state.blocks_states)) new_states = utils.distribute_thunks(filtered_thunks, pmap_axis_name) # Restore all of the unmodified state arrays. new_states = jax.tree_util.tree_map(lambda s, n: s if n is None else n, state.blocks_states, new_states) else: new_states = tuple(thunk() for thunk in thunks) return BlockDiagonalCurvature.State( synced=state.synced, blocks_states=new_states, ) @utils.auto_scope_method def to_diagonal_block_dense_matrix( self, state: "BlockDiagonalCurvature.State", ) -> Tuple[Array, ...]: """Returns a tuple of arrays with explicit dense matrices of each block.""" return tuple(block.to_dense_matrix(block_state) for block, block_state in zip(self.blocks, state.blocks_states)) @utils.auto_scope_method def to_dense_matrix( self, state: "BlockDiagonalCurvature.State" ) -> Array: return scipy.linalg.block_diag(*self.to_diagonal_block_dense_matrix(state)) class ExplicitExactCurvature(BlockDiagonalCurvature): """Explicit exact full curvature estimator class. This class estimates the full curvature matrix by looping over the batch dimension of the input data and for each single example computes an estimate of the curvature matrix and then averages over all examples in the input data. This implies that the computation scales linearly (without parallelism) with the batch size. The class stores the estimated curvature as a dense matrix, hence its memory requirement is (number of parameters)^2. If ``estimation_mode`` is ``fisher_exact`` or ``ggn_exact`` than this would compute the exact curvature, but other modes are also supported. As a result of looping over the input data this class needs to know the index of the batch in the arguments to the model function and additionally, since the loop is achieved through indexing, each array leaf of that argument must have the same first dimension size, which will be interpreted as the batch size. """ def __init__( self, func: utils.Func, params_index: int = 0, batch_index: int = 1, default_estimation_mode: str = "fisher_exact", layer_tag_to_block_ctor: Optional[Mapping[str, CurvatureBlockCtor]] = None, index_to_block_ctor: Optional[Mapping[Tuple[int, ...], CurvatureBlockCtor]] = None, auto_register_tags: bool = True, **auto_register_kwargs ): """Initializes the curvature instance. Args: func: The model function, which should have at least one registered loss. params_index: The index of the parameters argument in arguments list of ``func``. batch_index: Specifies at which index of the inputs to ``func`` is the batch, representing data over which we average the curvature. default_estimation_mode: The estimation mode which to use by default when calling ``self.update_curvature_matrix_estimate``. layer_tag_to_block_ctor: An optional dict mapping tags to specific classes of block approximations, which to override the default ones. index_to_block_ctor: An optional dict mapping a specific block parameter indices to specific classes of block approximation, which to override the default ones. To get the correct indices check ``estimator.indices_to_block_map``. auto_register_tags: Whether to automatically register layer tags for parameters that have not been manually registered. For further details see :func:``~auto_register_tags``. **auto_register_kwargs: Any keyword arguments to pass to into the auto registration function. """ super().__init__( func=func, default_estimation_mode=default_estimation_mode, params_index=params_index, layer_tag_to_block_ctor=layer_tag_to_block_ctor, index_to_block_ctor=index_to_block_ctor, auto_register_tags=auto_register_tags, **auto_register_kwargs ) self._batch_index = batch_index @property def batch_index(self) -> int: """The index in the inputs of the model function, which is the batch.""" return self._batch_index def _create_blocks(self): # Here in order to be able to have a block together for all parameters, we # create a non-existing (in the original graph) generic layer tag equation. assert self._jaxpr is not None jax_version = ( jax.__version_info__ if hasattr(jax, "__version_info__") else tuple(map(int, jax.__version__.split(".")))) if jax_version > (0, 3, 4): self._blocks = (curvature_blocks.NaiveFull( layer_tag_eq=tags.LayerTagEqn( primitive=tags.generic, invars=list(self._jaxpr.params_vars_flat), outvars=list(self._jaxpr.params_vars_flat), params={}, effects=jax.core.no_effects, source_info=jax.core.source_info_util.new_source_info() ), name="ExactCurvature" ),) else: self._blocks = (curvature_blocks.NaiveFull( layer_tag_eq=tags.LayerTagEqn( primitive=tags.generic, invars=list(self._jaxpr.params_vars_flat), outvars=list(self._jaxpr.params_vars_flat), params={}, source_info=jax.core.source_info_util.new_source_info() # pytype: disable=missing-parameter ), name="ExactCurvature" ),) def _compute_losses_vjp(self, func_args): # For some reason pytype can't detect that this attribute exists from the # super class. losses, losses_vjp = self._vjp(func_args) # pytype: disable=attribute-error def modified_losses_jvp(vjp_vec): blocks_info = losses_vjp(vjp_vec) tangents = [block["params_tangent"] for block in blocks_info] tangents = jax.tree_util.tree_leaves(tangents) # Need to reorder all of the block information to follow the canonical # order of variables params_vars = BlockDiagonalCurvature.params_vector_to_blocks_vectors( self, self.jaxpr.params_vars) # pytype: disable=wrong-arg-types order = np.argsort([p.count for p in jax.tree_util.tree_leaves(params_vars)]) return [dict(params_tangent=tuple(tangents[i] for i in order))] return losses, modified_losses_jvp def params_vector_to_blocks_vectors( self, parameter_structured_vector: utils.Params, ) -> Tuple[Tuple[Array, ...]]: return (tuple(jax.tree_util.tree_leaves(parameter_structured_vector)),) def blocks_vectors_to_params_vector( self, blocks_vectors: Sequence[Sequence[Array]], ) -> utils.Params: assert len(blocks_vectors) == self.num_blocks return jax.tree_util.tree_unflatten( self.jaxpr.params_tree, blocks_vectors[0]) def update_curvature_matrix_estimate( self, state: BlockDiagonalCurvature.State, ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, rng: PRNGKey, func_args: utils.FuncArgs, estimation_mode: Optional[str] = None, ) -> curvature_blocks.Full.State: rng = jax.random.split(rng, batch_size) def single_state_update( index: Numeric, state_: curvature_blocks.Full.State ) -> curvature_blocks.Full.State: is_first = index == 0 args = list(func_args) # Index the batch for the `index` arguments. args[self._batch_index] = jax.tree_util.tree_map( lambda x: x[index][None], args[self._batch_index]) return BlockDiagonalCurvature.update_curvature_matrix_estimate( self, state=state_, ema_old=is_first * ema_old + (1 - is_first) * 1.0, ema_new=ema_new / batch_size, batch_size=1, rng=rng[index], func_args=args, estimation_mode=estimation_mode, ) return jax.lax.fori_loop(0, batch_size, single_state_update, state) def update_cache( self, state: BlockDiagonalCurvature.State, identity_weight: Numeric, exact_powers: Optional[curvature_blocks.ScalarOrSequence], approx_powers: Optional[curvature_blocks.ScalarOrSequence], eigenvalues: bool, pmap_axis_name: Optional[str], ) -> curvature_blocks.Full.State: block_state = self.blocks[0].update_cache( state=state.blocks_states[0], identity_weight=identity_weight, exact_powers=exact_powers, approx_powers=approx_powers, eigenvalues=eigenvalues, ) return BlockDiagonalCurvature.State(blocks_states=(block_state,))
kfac-jax-main
kfac_jax/_src/curvature_estimator.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC optimizer.""" import functools from typing import Callable, Iterator, Optional, Sequence, Any, Generic, Tuple, Union, Dict import jax from jax import lax import jax.numpy as jnp from kfac_jax._src import curvature_estimator from kfac_jax._src import utils from typing_extensions import TypeAlias # Types for annotation Array = utils.Array PRNGKey = utils.PRNGKey Numeric = utils.Numeric Params = utils.Params Batch = utils.Batch FuncState = Any # FuncState = utils.FuncState FuncAux = utils.FuncAux OptimizerState: TypeAlias = "Optimizer.State" ScheduleType = Union[Callable[[Numeric, Optional[Numeric]], Numeric], Callable[[Numeric], Numeric]] FuncArgsVariants = Union[ Tuple[Params, Batch], Tuple[Params, FuncState, Batch], Tuple[Params, PRNGKey, Batch], Tuple[Params, FuncState, PRNGKey, Batch], ] FuncOutputs = Union[ Array, Tuple[Array, FuncState], Tuple[Array, FuncAux], Tuple[Array, Tuple[FuncState, FuncAux]], ] ValueFunc = Callable[..., FuncOutputs] ValueAndGradFunc = Callable[..., Tuple[FuncOutputs, Params]] ReturnWithFuncState = Tuple[ Params, OptimizerState, FuncState, Dict[str, Array] ] ReturnWithoutFuncState = Tuple[ Params, OptimizerState, Dict[str, Array] ] ReturnEither = Union[ReturnWithFuncState, ReturnWithoutFuncState] class Optimizer(utils.WithStagedMethods): """The K-FAC optimizer.""" @utils.register_state_class class State(Generic[Params], utils.State): r"""Persistent state of the optimizer. Attributes: velocities: The update to the parameters from the previous step - :math:`\theta_t - \theta_{t-1}`. estimator_state: The persistent state for the curvature estimator. damping: When using damping adaptation, this will contain the current value. data_seen: The number of training cases that the optimizer has processed. step_counter: An integer giving the current step number :math:`t`. """ velocities: Params estimator_state: curvature_estimator.BlockDiagonalCurvature.State damping: Optional[Array] data_seen: Numeric step_counter: Numeric @classmethod def from_dict(cls, dict_representation: Dict[str, Any]) -> OptimizerState: dict_representation["estimator_state"] = ( curvature_estimator.BlockDiagonalCurvature.State.from_dict( dict_representation["estimator_state"] ) ) return cls(**dict_representation) def __init__( self, value_and_grad_func: ValueAndGradFunc, l2_reg: Numeric, value_func_has_aux: bool = False, value_func_has_state: bool = False, value_func_has_rng: bool = False, use_adaptive_learning_rate: bool = False, learning_rate_schedule: Optional[ScheduleType] = None, use_adaptive_momentum: bool = False, momentum_schedule: Optional[ScheduleType] = None, use_adaptive_damping: bool = False, damping_schedule: Optional[ScheduleType] = None, initial_damping: Optional[Numeric] = None, min_damping: Numeric = 1e-8, max_damping: Numeric = jnp.inf, include_damping_in_quad_change: bool = False, damping_adaptation_interval: int = 5, damping_adaptation_decay: Numeric = 0.9, damping_lower_threshold: Numeric = 0.25, damping_upper_threshold: Numeric = 0.75, always_use_exact_qmodel_for_damping_adjustment: bool = False, norm_constraint: Optional[Numeric] = None, num_burnin_steps: int = 10, estimation_mode: str = "fisher_gradients", curvature_ema: Numeric = 0.95, curvature_update_period: int = 1, inverse_update_period: int = 5, use_exact_inverses: bool = False, batch_process_func: Optional[Callable[[Batch], Batch]] = None, register_only_generic: bool = False, patterns_to_skip: Sequence[str] = (), auto_register_kwargs: Optional[Dict[str, Any]] = None, layer_tag_to_block_ctor: Optional[ Dict[str, curvature_estimator.CurvatureBlockCtor] ] = None, multi_device: bool = False, debug: bool = False, batch_size_extractor: Callable[ [Batch], Numeric ] = utils.default_batch_size_extractor, pmap_axis_name: str = "kfac_axis", forbid_setting_attributes_after_finalize: bool = True, modifiable_attribute_exceptions: Sequence[str] = (), include_norms_in_stats: bool = False, include_per_param_norms_in_stats: bool = False, distributed_precon_apply: bool = True, distributed_inverses: bool = True, num_estimator_samples: int = 1, should_vmap_estimator_samples: bool = False, ): """Initializes the K-FAC optimizer with the provided settings. NOTE: Please read the docstring for this constructor carefully. Especially the description of ``value_and_grad_func``. A note on the "damping" parameter: One of the main complications of using second-order optimizers like K-FAC is the "damping" parameter. This parameter is multiplied by the identity matrix and (approximately) added to the curvature matrix (i.e. the Fisher or GGN) before it is inverted and multiplied by the gradient when computing the update (before any learning rate scaling). The damping should follow the scale of the objective, so that if you multiply your loss by some factor you should do the same for the damping. Roughly speaking, larger damping values constrain the update vector to a smaller region around zero, which is needed in general since the second-order approximations that underly second-order methods can break down for large updates. (In gradient descent the learning rate plays an analogous role.) The relationship between the damping parameter and the radius of this region is complicated and depends on the scale of the objective amongst other things. The optimizer provides a system for adjusting the damping automatically via the ``use_adaptive_damping`` argument, although this system is not reliable, especially for highly stochastic objectives. Using a fixed value or a manually tuned schedule can work as good or better for some problems, while it can be a very poor choice for others (like deep autoencoders). Empirically we have found that using a fixed value works well enough for common architectures like convnets and transformers. Args: value_and_grad_func: Python callable. This function should return the value of the loss to be optimized and its gradients, and optionally the model state and auxiliary information (usually statistics to log). Note that it should *not* be jitted/pmapped or otherwise compiled by JAX, as this can lead to errors. (Compilation is done internally by the optimizer.) interface of this function should be should be: ``out_args, loss_grads = value_and_grad_func(*in_args)``. Here, ``in_args`` is ``(params, func_state, rng, batch)``, with ``rng`` omitted if ``value_func_has_rng`` is ``False``, and with ``func_state`` omitted if ``value_func_has_state`` is ``False``. Meanwhile, ``out_args`` is ``(loss, (func_state, aux))`` if ``value_func_has_state`` and ``value_func_has_aux`` are both ``True``, ``(loss, func_state)`` if if ``value_func_has_state`` is ``True`` and ``value_func_has_aux`` is ``False``, ``(loss, aux)`` if ``value_func_has_state`` is ``False`` and ``value_func_has_aux`` is ``True``, and finally ``loss`` if ``value_func_has_state`` and ``value_func_has_aux`` are both ``False``. This should be consistent with how JAX's ``value_and_grad`` API function is typically used. l2_reg: Scalar. Set this value to tell the optimizer what L2 regularization coefficient you are using (if any). Note the coefficient appears in the regularizer as ``coeff / 2 * sum(param**2)``. This adds an additional diagonal term to the curvature and hence will affect the quadratic model when using adaptive damping. Note that the user is still responsible for adding regularization to the loss. value_func_has_aux: Boolean. Specifies whether the provided callable ``value_and_grad_func`` returns auxiliary data. (Default: ``False``) value_func_has_state: Boolean. Specifies whether the provided callable ``value_and_grad_func`` has a persistent state that is passed in and out. (Default: ``False``) value_func_has_rng: Boolean. Specifies whether the provided callable ``value_and_grad_func`` additionally takes as input an rng key. (Default: ``False``) use_adaptive_learning_rate: Boolean. Specifies whether to use the special rule from the original K-FAC paper for picking the learning rate at each step. Note that this won't work well for stochastic objectives. If this is ``False``, the user must use the ``learning_rate`` argument of the step function, or the constructor argument ``learning_rate_schedule``. (Default: ``False``) learning_rate_schedule: Callable. A schedule for the learning rate. This should take as input the current step number, and optionally the amount of data seen so far as a keyword argument ``data_seen``, and return a single array that represents the learning rate. (Default: ``None``) use_adaptive_momentum: Boolean. Specifies whether to use the special rule from the original K-FAC paper for picking the momentum "decay" parameter at each step. Note that this won't work well for stochastic objectives. If this is ``False``, the user must use the ``momentum`` argument of the step function, or the constructor argument ``momentum_schedule``. (Default: ``False``) momentum_schedule: Callable. A schedule for the momentum parameter. This should take as input the current step number, and optionally the amount of data seen so far as a keyword argument ``data_seen``, and return a single array that represents the momentum. (Default: ``None``) use_adaptive_damping: Boolean. Specifies whether the optimizer will use the Levenberg-Marquardt method to automatically adjust the damping every ``damping_adaptation_interval`` iterations. If this is set to ``False`` the user must provide a value to the damping argument of the step function at each iteration, or use the ``damping_schedule`` constructor argument. Note that the effectiveness of this technique seems to vary between problems. (Default: ``False``) damping_schedule: Callable. A schedule for the damping. This should take as input the current step number, and optionally the amount of data seen so far as a keyword argument ``data_seen``, and return a single array that represents the learning rate. (Default: ``None``) initial_damping: Scalar or None. This specifies the initial value of the damping that the optimizer will use when using automatic damping adaptation. (Default: ``None``) min_damping: Scalar. Minimum value the damping parameter can take when using automatic damping adaptation. Note that the default value of 1e-8 is quite arbitrary, and you may have to adjust this up or down for your particular problem. If you are using a non-zero value of l2_reg you *may* be able to set this to zero. (Default: ``1e-8``) max_damping: Scalar. Maximum value the damping parameter can take when using automatic damping adaptation. (Default: ``Infinity``) include_damping_in_quad_change: Boolean. Whether to include the contribution of the damping in the quadratic model for the purposes computing the reduction ration ("rho") in the Levenberg-Marquardt scheme used for adapting the damping. Note that the contribution from the ``l2_reg`` argument is always included. (Default: ``False``) damping_adaptation_interval: Int. The number of steps in between adapting the damping parameter. (Default: ``5``) damping_adaptation_decay: Scalar. The damping parameter will be adjusted up or down by ``damping_adaptation_decay ** damping_adaptation_interval``, or remain unchanged, every ``damping_adaptation_interval`` number of iterations. (Default: ``0.9``) damping_lower_threshold: Scalar. The damping parameter is increased if the reduction ratio is below this threshold. (Default: ``0.25``) damping_upper_threshold: Scalar. The damping parameter is decreased if the reduction ratio is below this threshold. (Default: ``0.75``) always_use_exact_qmodel_for_damping_adjustment: Boolean. When using learning rate and/or momentum adaptation, the quadratic model change used for damping adaption is always computed using the exact curvature matrix. Otherwise, there is an option to use either the exact or approximate curvature matrix to compute the quadratic model change, which is what this argument controls. When True, the exact curvature matrix will be used, which is more expensive, but could possibly produce a better damping schedule. (Default: ``False``) norm_constraint: Scalar. If specified, the update is scaled down so that its approximate squared Fisher norm ``v^T F v`` is at most the specified value. (Note that here ``F`` is the approximate curvature matrix, not the exact.) May only be used when ``use_adaptive_learning_rate`` is ``False``. (Default: ``None``) num_burnin_steps: Int. At the start of optimization, e.g. the first step, before performing the actual step the optimizer will perform this many times updates to the curvature approximation without updating the actual parameters. (Default: ``10``) estimation_mode: String. The type of estimator to use for the curvature matrix. See the documentation for :class:`~CurvatureEstimator` for a detailed description of the possible options. (Default: ``fisher_gradients``). curvature_ema: The decay factor used when calculating the covariance estimate moving averages. (Default: ``0.95``) curvature_update_period: Int. The number of steps in between updating the the curvature estimates. (Default: ``1``) inverse_update_period: Int. The number of steps in between updating the the computation of the inverse curvature approximation. (Default: ``5``) use_exact_inverses: Bool. If ``True``, preconditioner inverses are computed "exactly" without the pi-adjusted factored damping approach. Note that this involves the use of eigendecompositions, which can sometimes be much more expensive. (Default: ``False``) batch_process_func: Callable. A function which to be called on each batch before feeding to the KFAC on device. This could be useful for specific device input optimizations. (Default: ``None``) register_only_generic: Boolean. Whether when running the auto-tagger to register only generic parameters, or allow it to use the graph matcher to automatically pick up any kind of layer tags. (Default: ``False``) patterns_to_skip: Tuple. A list of any patterns that should be skipped by the graph matcher when auto-tagging. (Default: ``()``) auto_register_kwargs: Any additional kwargs to be passed down to :func:`~auto_register_tags`, which is called by the curvature estimator. (Default: ``None``) layer_tag_to_block_ctor: Dictionary. A mapping from layer tags to block classes which to override the default choices of block approximation for that specific tag. See the documentation for :class:`~CurvatureEstimator` for a more detailed description. (Default: ``None``) multi_device: Boolean. Whether to use pmap and run the optimizer on multiple devices. (Default: ``False``) debug: Boolean. If neither the step or init functions should be jitted. Note that this also overrides ``multi_device`` and prevents using pmap. (Default: ``False``) batch_size_extractor: A function that takes as input the function arguments and returns the batch size for a single device. (Default: ``kfac.utils.default_batch_size_extractor``) pmap_axis_name: String. The name of the pmap axis to use when ``multi_device`` is set to True. (Default: ``kfac_axis``) forbid_setting_attributes_after_finalize: Boolean. By default after the object is finalized, you can not set any of its properties. This is done in order to protect the user from making changes to the object attributes that would not be picked up by various internal methods after they have been compiled. However, if you are extending this class, and clearly understand the risks of modifying attributes, setting this to ``False`` will remove the restriction. (Default: ``True``) modifiable_attribute_exceptions: Sequence of strings. Gives a list of names for attributes that can be modified after finalization even when ``forbid_setting_attributes_after_finalize`` is ``True``. (Default: ``()``) include_norms_in_stats: Boolean. It True, the vector norms of the gradient, preconditioned gradient, and parameter update are included in the statistics returned by the step function. (Default: ``False``) include_per_param_norms_in_stats: Boolean. It True, the per-parameter vector norms of the gradient, preconditioned gradient, and parameter update are included in the statistics returned by the step function. (Default: ``False``) distributed_precon_apply: Boolean. Whether to distribute the application of the preconditioner across the different devices in a layer-wise fashion. If False, each device will (redundantly) perform the required operations for all of the layers. (Default: True) distributed_inverses: Boolean. Whether to distribute the inverse computations (required to compute the preconditioner) across the different devices in a layer-wise fashion. If False, each device will (redundantly) perform the required computations for all of the layers. (Default: True) num_estimator_samples: Number of samples (per case) to use when computing stochastic curvature matrix estimates. This option is only used when ``estimation_mode == 'fisher_gradients'`` or ``estimation_mode == '[fisher,ggn]_curvature_prop'``. (Default: 1) should_vmap_estimator_samples: Whether to use ``jax.vmap`` to compute samples when ``num_estimator_samples > 1``. (Default: False) """ super().__init__( multi_device=multi_device, pmap_axis_name=pmap_axis_name if multi_device else None, debug=debug, forbid_setting_attributes_after_finalize= forbid_setting_attributes_after_finalize, excluded_attribute_names=modifiable_attribute_exceptions, ) if use_adaptive_damping and initial_damping is None: raise ValueError("When use_adaptive_damping is True you must provide a " "value for initial_damping.") if not use_adaptive_damping and initial_damping is not None: raise ValueError("When use_adaptive_damping is False you should not " "provide a value for initial_damping.") if use_adaptive_learning_rate and learning_rate_schedule is not None: raise ValueError("If you are using adaptive learning rate than " "`learning_rate_schedule` should be None.") if use_adaptive_momentum and momentum_schedule is not None: raise ValueError("If you are using adaptive momentum than " "`momentum_schedule` should be None.") if use_adaptive_damping and damping_schedule is not None: raise ValueError("If you are using adaptive damping than " "`damping_schedule` should be None.") self._value_and_grad_func = value_and_grad_func self._value_func_has_aux = value_func_has_aux self._value_func_has_state = value_func_has_state self._value_func_has_rng = value_func_has_rng self._value_func: ValueFunc = convert_value_and_grad_to_value_func( value_and_grad_func, has_aux=value_func_has_aux, ) self._l2_reg = jnp.asarray(l2_reg) self._use_adaptive_learning_rate = use_adaptive_learning_rate self._learning_rate_schedule = learning_rate_schedule self._use_adaptive_momentum = use_adaptive_momentum if momentum_schedule is not None: def schedule_with_first_step_zero( global_step: Array, data_seen: Optional[Numeric] = None, ) -> Array: value = utils.call_func_with_conditional_kwargs( momentum_schedule, global_step, data_seen=data_seen) check = jnp.equal(global_step, 0) return check * jnp.zeros_like(value) + (1 - check) * value self._momentum_schedule = schedule_with_first_step_zero else: self._momentum_schedule = None self._use_adaptive_damping = use_adaptive_damping self._damping_schedule = damping_schedule self._initial_damping = initial_damping self._min_damping = min_damping self._max_damping = max_damping self._include_damping_in_quad_change = include_damping_in_quad_change self._damping_adaptation_decay = damping_adaptation_decay self._damping_adaptation_interval = damping_adaptation_interval self._damping_lower_threshold = damping_lower_threshold self._damping_upper_threshold = damping_upper_threshold self._always_use_exact_qmodel_for_damping_adjustment = ( always_use_exact_qmodel_for_damping_adjustment) self._norm_constraint = norm_constraint self._num_burnin_steps = num_burnin_steps self._estimation_mode = estimation_mode self._curvature_ema = curvature_ema if curvature_update_period > inverse_update_period: raise ValueError( "curvature_update_period ({}) cannot be larger than" " inverse_update_period ({}) as the identical matrix inversion would" " be redundantly performed. Set inverse_update_period larger instead." .format(curvature_update_period, inverse_update_period) ) self._curvature_update_period = curvature_update_period self._inverse_update_period = inverse_update_period self._register_only_generic = register_only_generic self._layer_tag_to_block_cls = layer_tag_to_block_ctor self._patterns_to_skip = patterns_to_skip self._batch_process_func = batch_process_func or (lambda x: x) self._include_norms_in_stats = include_norms_in_stats self._include_per_param_norms_in_stats = include_per_param_norms_in_stats self._batch_size_extractor = batch_size_extractor self._use_cached_inverses = (self._inverse_update_period != 1) self._use_exact_inverses = use_exact_inverses # Curvature estimator self._estimator = curvature_estimator.BlockDiagonalCurvature( func=self._value_func, default_estimation_mode=estimation_mode, params_index=0, layer_tag_to_block_ctor=layer_tag_to_block_ctor, register_only_generic=register_only_generic, patterns_to_skip=patterns_to_skip, distributed_multiplies=distributed_precon_apply, distributed_cache_updates=distributed_inverses, num_samples=num_estimator_samples, should_vmap_samples=should_vmap_estimator_samples, **(auto_register_kwargs or {}), ) self._implicit = curvature_estimator.ImplicitExactCurvature( self._value_func, params_index=0, batch_size_extractor=batch_size_extractor, ) # Each subclass should call finalize on its own, so this gets called only # for instances of exactly this class type. if type(self) == Optimizer: # pylint: disable=unidiomatic-typecheck self.finalize() @property def num_burnin_steps(self) -> int: """The number of burnin steps to run before the first parameter update.""" return self._num_burnin_steps @property def l2_reg(self) -> Array: """The weight of the additional diagonal term added to the curvature.""" return self._l2_reg @property def estimator(self) -> curvature_estimator.BlockDiagonalCurvature: """The underlying curvature estimator used by the optimizer.""" return self._estimator @property def damping_decay_factor(self) -> Numeric: """How fast to decay the damping, when using damping adaptation.""" return self._damping_adaptation_decay ** self._damping_adaptation_interval @property def _exact_powers_to_cache(self) -> Optional[Union[int, Sequence[int]]]: if self._use_exact_inverses and self._use_cached_inverses: return -1 else: return None @property def _approx_powers_to_cache(self) -> Optional[Union[int, Sequence[int]]]: if not self._use_exact_inverses and self._use_cached_inverses: return -1 else: return None def should_update_damping( self, state: "Optimizer.State", ) -> Array: """Whether at the current step the optimizer should update the damping.""" return (state.step_counter + 1) % self._damping_adaptation_interval == 0 def should_update_estimate_curvature( self, state: "Optimizer.State" ) -> Union[Array, bool]: """Whether at the current step the optimizer should update the curvature estimates.""" if self._curvature_update_period == 1: return True return state.step_counter % self._curvature_update_period == 0 def should_update_inverse_cache( self, state: "Optimizer.State" ) -> Union[Array, bool]: """Whether at the current step the optimizer should update the inverse curvature approximation.""" if self._inverse_update_period == 1: return True return state.step_counter % self._inverse_update_period == 0 @functools.partial(utils.staged, static_argnums=1) def _rng_split( self, rng: PRNGKey, num: int, ) -> Tuple[Array, ...]: """Splits the ``rng`` key.""" return tuple(jax.random.split(rng, num)) @utils.auto_scope_method def compute_loss_value(self, func_args: FuncArgsVariants) -> Array: """Computes the value of the loss function being optimized.""" return self._value_func(*func_args) def verify_args_and_get_step_counter( self, step_counter: Array, learning_rate: Optional[Array] = None, momentum: Optional[Array] = None, damping: Optional[Array] = None, global_step_int: Optional[int] = None, ) -> int: """Verifies that the arguments passed to the step function are correct.""" # Verify correct arguments invocation if self._use_adaptive_learning_rate and learning_rate is not None: raise ValueError("When use_adaptive_learning_rate is set to True you " "should not pass a value to the step function.") elif not self._use_adaptive_learning_rate and ( self._learning_rate_schedule is None and learning_rate is None): raise ValueError("When use_adaptive_learning_rate is set to False and " "`learning_rate_schedule` is None you must provide a " "value to the step function.") elif self._learning_rate_schedule is not None and learning_rate is not None: raise ValueError("When you have passed a `learning_rate_schedule` you " "should not pass a value to the step function.") if self._use_adaptive_momentum and momentum is not None: raise ValueError("When use_adaptive_momentum is set to True you " "should not pass a value to the step function.") elif not self._use_adaptive_momentum and ( self._momentum_schedule is None and momentum is None): raise ValueError("When use_adaptive_momentum is set to False and " "`momentum_schedule` is None you must provide a value to" " the step function.") elif self._momentum_schedule is not None and momentum is not None: raise ValueError("When you have passed a `momentum_schedule` you should " "not pass a value to the step function.") if self._use_adaptive_damping and damping is not None: raise ValueError("When use_adaptive_damping is set to True you " "should not pass a value to the step function.") elif not self._use_adaptive_damping and ( self._damping_schedule is None and damping is None): raise ValueError("When use_adaptive_damping is set to False and " "`damping_schedule` is None you must provide a value to " "the step function.") elif self._damping_schedule is not None and damping is not None: raise ValueError("When you have passed a `damping_schedule` you should " "not pass a value to the step function.") if global_step_int is None: if self.multi_device: return int(utils.get_first(step_counter)) else: return int(step_counter) return global_step_int @utils.staged def _setup_state_and_schedules( self, learning_rate: Optional[Array], momentum: Optional[Array], damping: Optional[Array], step_counter: Array, data_seen: Array, ) -> Tuple[Optional[Array], Optional[Array], Array]: """Helper function for setting up learning rate, momentum and damping.""" # Compute schedules if applicable if self._learning_rate_schedule is not None: assert learning_rate is None learning_rate = utils.call_func_with_conditional_kwargs( self._learning_rate_schedule, step_counter, data_seen=data_seen) if self._momentum_schedule is not None: assert momentum is None momentum = utils.call_func_with_conditional_kwargs( self._momentum_schedule, step_counter, data_seen=data_seen) if self._damping_schedule is not None: assert damping is None damping = utils.call_func_with_conditional_kwargs( self._damping_schedule, step_counter, data_seen=data_seen) else: assert damping is not None return learning_rate, momentum, damping def _setup_func_args_and_rng( self, params: Params, rng: PRNGKey, batch: Batch, func_state: Optional[FuncState], ) -> Tuple[FuncArgsVariants, Array]: """Helper function for setting up the model function arguments correctly.""" # Preprocess the batch and construct correctly the function arguments batch = self._batch_process_func(batch) # Correctly split rng if self._value_func_has_rng: rng, func_rng = jax.random.split(rng) else: func_rng = None # Make the function args func_args = make_func_args( params=params, func_state=func_state, rng=func_rng, batch=batch, has_state=self._value_func_has_state, has_rng=self._value_func_has_rng, ) return func_args, rng def _maybe_update_estimator_state( self, state: "Optimizer.State", should_update: Union[Array, bool], update_func: Callable[ ..., curvature_estimator.BlockDiagonalCurvature.State ], **update_func_kwargs, ) -> "Optimizer.State": """Updates the estimator state if it is the right iteration.""" # Copy this first since we mutate it later in this function. state = state.copy() state.estimator_state = lax.cond( should_update, functools.partial(update_func, **update_func_kwargs), lambda state_: state_, state.estimator_state, ) return state def _update_estimator_curvature( self, estimator_state: curvature_estimator.BlockDiagonalCurvature.State, func_args: FuncArgsVariants, rng: PRNGKey, ema_old: Numeric, ema_new: Numeric, sync: Union[Array, bool] = True ) -> curvature_estimator.BlockDiagonalCurvature.State: """Updates the curvature estimator state.""" state = self.estimator.update_curvature_matrix_estimate( state=estimator_state, ema_old=ema_old, ema_new=ema_new, # Note that the batch is always the last entry of FuncArgsVariantsdef batch_size=self._batch_size_extractor(func_args[-1]), rng=rng, func_args=func_args, ) return jax.lax.cond( sync, functools.partial(self.estimator.sync, pmap_axis_name=self.pmap_axis_name), lambda state_: state_, state, ) def _maybe_update_estimator_curvature( self, state: "Optimizer.State", func_args: FuncArgsVariants, rng: PRNGKey, ema_old: Numeric, ema_new: Numeric, sync: Union[Array, bool] = True, ) -> "Optimizer.State": """Updates the curvature estimates if it is the right iteration.""" return self._maybe_update_estimator_state( state, self.should_update_estimate_curvature(state), self._update_estimator_curvature, func_args=func_args, rng=rng, ema_old=ema_old, ema_new=ema_new, sync=sync, ) @utils.auto_scope_method def _compute_loss_and_grads( self, func_args: FuncArgsVariants, ) -> Tuple[Array, Params, FuncState, FuncAux]: """Computes the model loss value and its gradients.""" out, grads = self._value_and_grad_func(*func_args) loss, func_state, aux = extract_func_outputs( out, self._value_func_has_aux, self._value_func_has_state) return loss, grads, func_state, aux def _maybe_update_inverse_cache( self, state: "Optimizer.State", damping: Array, ) -> "Optimizer.State": """Updates the estimator state cache if it is the right iteration.""" return self._maybe_update_estimator_state( state, self.should_update_inverse_cache(state), self.estimator.update_cache, identity_weight=self.l2_reg + damping, exact_powers=self._exact_powers_to_cache, approx_powers=self._approx_powers_to_cache, eigenvalues=False, pmap_axis_name=self.pmap_axis_name, ) # TODO(jamesmartens, botev): It's ugly that this method implements the norm # constraint on top of computing the preconditioned gradient. Should refactor. @utils.staged def _compute_preconditioned_gradient( self, state: "Optimizer.State", grads: Params, coefficient: Optional[Array], damping: Array, ) -> Tuple[Params, Optional[Array]]: """Computes the preconditioned gradient, maybe applying norm-constraint.""" preconditioned_grads = self.estimator.multiply_inverse( state=state.estimator_state, parameter_structured_vector=grads, identity_weight=self.l2_reg + damping, exact_power=self._use_exact_inverses, use_cached=self._use_cached_inverses, pmap_axis_name=self.pmap_axis_name, ) if self._norm_constraint is not None: assert not self._use_adaptive_learning_rate assert coefficient is not None sq_norm_grads = utils.inner_product(preconditioned_grads, grads) sq_norm_scaled_grads = sq_norm_grads * coefficient ** 2 max_coefficient = jnp.sqrt(self._norm_constraint / sq_norm_scaled_grads) coefficient = jnp.minimum(max_coefficient, 1) preconditioned_grads = utils.scalar_mul(preconditioned_grads, coefficient) else: sq_norm_scaled_grads = None return preconditioned_grads, sq_norm_scaled_grads def _compute_quad_change_for_damping( self, state: "Optimizer.State", delta: Params, grads: Params, damping: Array, func_args: FuncArgsVariants, ) -> Array: """The quadratic model change, when lr and momentum are non-adaptive.""" assert not (self._use_adaptive_learning_rate or self._use_adaptive_momentum) if self._always_use_exact_qmodel_for_damping_adjustment: quad_model = self.compute_exact_quad_model( [delta], grads, func_args) else: quad_model = self.compute_approx_quad_model(state, [delta], grads) w = jnp.ones([]) return self._solve_quad_model(quad_model, damping, [delta], [w])[1] def _coefficients_and_quad_change( self, state: "Optimizer.State", vectors: Sequence[Params], grads: Params, learning_rate: Optional[Array], momentum: Optional[Array], damping: Array, func_args: Optional[FuncArgsVariants] = None, ) -> Tuple[Tuple[Optional[Array], Optional[Array]], Array]: """The correct update coefficients and corresponding quadratic change.""" # Compute the coefficients of the update vectors # The learning rate is defined as the negative of the coefficient by which # we multiply the gradients, while the momentum is the coefficient by # which we multiply the velocities. neg_learning_rate = -learning_rate if learning_rate is not None else None coefficients = (neg_learning_rate, momentum) if self._use_adaptive_learning_rate or self._use_adaptive_momentum: quad_model = self.compute_exact_quad_model(vectors, grads, func_args) return self._solve_quad_model(quad_model, damping, vectors, coefficients) else: assert all(c is not None for c in coefficients) if self._use_adaptive_damping: delta = self.weighted_sum_of_objects(vectors, coefficients) quad_change = lax.cond( self.should_update_damping(state), lambda args: self._compute_quad_change_for_damping(*args), lambda args: jnp.nan, (state, delta, grads, damping, func_args), ) else: quad_change = jnp.nan return coefficients, quad_change @utils.auto_scope_method def _update_damping( self, old_damping: Array, old_loss: Array, quad_change: Array, new_func_args: FuncArgsVariants, ) -> Tuple[Array, Array, Array]: """Updates the damping parameter.""" new_loss = self.compute_loss_value(new_func_args) # Sync new_loss = utils.pmean_if_pmap(new_loss, self.pmap_axis_name) damping, rho = self._compute_new_damping_and_rho( old_loss, new_loss, quad_change, old_damping) return damping, rho, new_loss @utils.staged def _init( self, params: Params, rng: PRNGKey, batch: Batch, func_state: Optional[FuncState] = None, ) -> "Optimizer.State": """A staged function to initialize the optimizer state .""" return Optimizer.State( velocities=jax.tree_util.tree_map(jnp.zeros_like, params), estimator_state=self.estimator.init( rng=rng, func_args=make_func_args( params=params, func_state=func_state, rng=rng, batch=self._batch_process_func(batch), has_state=self._value_func_has_state, has_rng=self._value_func_has_rng, ), exact_powers_to_cache=self._exact_powers_to_cache, approx_powers_to_cache=self._approx_powers_to_cache, cache_eigenvalues=False ), damping=(jnp.array(self._initial_damping, dtype=float) if self._use_adaptive_damping else None), data_seen=jnp.array(0, dtype=int), step_counter=jnp.array(0, dtype=int) ) def init( self, params: Params, rng: PRNGKey, batch: Batch, func_state: Optional[FuncState] = None, ) -> "Optimizer.State": """Initializes the optimizer and returns the appropriate optimizer state.""" if not self.finalized: self.finalize(params, rng, batch, func_state) return self._init(params, rng, batch, func_state) @functools.partial(utils.staged, donate_argnums=[1, 3, 5]) def _burnin( self, params: Params, state: "Optimizer.State", rng: Array, batch: Batch, func_state: Optional[FuncState], accumulator: utils.MultiChunkAccumulator ) -> Tuple["Optimizer.State", utils.MultiChunkAccumulator]: """A single burnin step, updating only the curvature estimate.""" # Copy this first since we mutate it later in this function. accumulator = accumulator.copy() func_args, rng = self._setup_func_args_and_rng( params, rng, batch, func_state) # Update curvature estimate state.estimator_state = self._update_estimator_curvature( state.estimator_state, func_args, rng, 1.0, 1.0) # Optionally update func_state if func_state is not None: out, _ = self._value_and_grad_func(*func_args) _, func_state, _ = extract_func_outputs( out, self._value_func_has_aux, self._value_func_has_state) accumulator.add(func_state) return state, accumulator def burnin( self, num_steps: int, params: Params, state: "Optimizer.State", rng: PRNGKey, data_iterator: Iterator[Batch], func_state: Optional[FuncState] = None, ) -> Tuple["Optimizer.State", Optional[FuncState]]: """Runs all burnin steps required.""" if num_steps > 0: rng = self._rng_split(rng, num_steps) accumulator = utils.MultiChunkAccumulator.zeros_like( func_state, self.multi_device) for rng_i in rng: batch = next(data_iterator) state, accumulator = self._burnin( params, state, rng_i, batch, func_state, accumulator) func_state = accumulator.value_and_clear() return state, func_state @functools.partial(utils.staged, donate_argnums=(0, 1, 4)) @utils.auto_scope_method def _step( self, params: Params, state: "Optimizer.State", rng: Array, batch: Batch, func_state: Optional[FuncState], learning_rate: Optional[Array], momentum: Optional[Array], damping: Optional[Array] )-> ReturnEither: """A single full step of the optimizer.""" # Copy this first since we mutate it later in this function. state = state.copy() # Setup arguments learning_rate, momentum, damping = self._setup_state_and_schedules( learning_rate, momentum, state.damping if self._use_adaptive_damping else damping, state.step_counter, state.data_seen) func_args, rng = self._setup_func_args_and_rng( params, rng, batch, func_state) # Update curvature estimate state = self._maybe_update_estimator_curvature( state, func_args, rng, self._curvature_ema, 1.0, sync=self.should_update_inverse_cache( state ), # sync curvature estimates only before inverses are updated. ) del rng # should not be used after this point! # Compute loss and gradients loss, grads, func_state, aux = self._compute_loss_and_grads(func_args) # Sync loss, grads = utils.pmean_if_pmap((loss, grads), self.pmap_axis_name) # Update the inverse curvature state = self._maybe_update_inverse_cache(state, damping) # Compute proposed directions preconditioned_gradient, sq_norm_scaled_grads = ( self._compute_preconditioned_gradient(state, grads, learning_rate, damping) ) vectors = (preconditioned_gradient, state.velocities) # Compute the coefficients for the vectors coefficients, quad_model_change = self._coefficients_and_quad_change( state=state, vectors=vectors, grads=grads, learning_rate=learning_rate, momentum=momentum, damping=damping, func_args=func_args) # Compute delta and update velocities delta = self.weighted_sum_of_objects(vectors, coefficients) state.velocities = delta # Update parameters params = jax.tree_util.tree_map(jnp.add, params, delta) # Optionally compute the reduction ratio and update the damping if self._use_adaptive_damping: state.damping, rho, new_loss = lax.cond( self.should_update_damping(state), lambda args: self._update_damping(*args), lambda args: (args[0], jnp.nan, jnp.nan), operand=(state.damping, loss, quad_model_change, (params,) + func_args[1:]) ) else: new_loss, rho = jnp.nan, jnp.nan # Compute per-device and total batch size batch_size = self._batch_size_extractor(func_args[-1]) if self.multi_device: total_batch_size = batch_size * jax.device_count() else: total_batch_size = batch_size # Update data seen and step counter state.data_seen = state.data_seen + total_batch_size state.step_counter = state.step_counter + 1 # Statistics with useful information # Unlike other norm stats, sq_norm_scaled_grads has to be computed if # norm_constraint is not None, so log it by default even if the other # norm stats are not logged. This reduces the overall computational cost if # no other grad stats are desired. stats = dict( step=state.step_counter, batch_size=jnp.asarray(total_batch_size, dtype=jnp.int32), data_seen=state.data_seen, loss=loss, new_loss=new_loss, learning_rate=-coefficients[0], momentum=coefficients[1], damping=damping, rho=rho, quad_model_change=quad_model_change, scaled_grad_norm_sq=sq_norm_scaled_grads, ) if self._value_func_has_aux: stats["aux"] = utils.pmean_if_pmap(aux, self.pmap_axis_name) if self._include_norms_in_stats: stats["param_norm"] = utils.norm(params) stats["grad_norm"] = utils.norm(grads) stats["precon_grad_norm"] = utils.norm(preconditioned_gradient) stats["update_norm"] = utils.norm(delta) if self._include_per_param_norms_in_stats: stats.update(utils.per_parameter_norm(params, "param_norm")) stats.update(utils.per_parameter_norm(grads, "grad_norm")) stats.update( utils.per_parameter_norm(preconditioned_gradient, "precon_grad_norm") ) stats.update(utils.per_parameter_norm(delta, "update_norm")) if self._value_func_has_state: return params, state, func_state, stats else: assert func_state is None return params, state, stats def step( self, params: Params, state: "Optimizer.State", rng: PRNGKey, data_iterator: Optional[Iterator[Batch]] = None, batch: Optional[Batch] = None, func_state: Optional[FuncState] = None, learning_rate: Optional[Array] = None, momentum: Optional[Array] = None, damping: Optional[Array] = None, global_step_int: Optional[int] = None )-> ReturnEither: """Performs a single update step using the optimizer. NOTE: please do not jit/pmap or otherwise compile this function with JAX, as this can lead to errors. Compilation is handled internally by the optimizer. Args: params: The current parameters of the model. state: The current state of the optimizer. rng: A Jax PRNG key. Should be different for each iteration and each Jax process/host. data_iterator: A data iterator to use (if not passing ``batch``). batch: A single batch used to compute the update. Should only pass one of ``data_iterator`` or ``batch``. func_state: Any function state that gets passed in and returned. learning_rate: Learning rate to use if the optimizer was created with ``use_adaptive_learning_rate=True``, ``None`` otherwise. momentum: Momentum to use if the optimizer was created with ``use_adaptive_momentum=True``, ``None`` otherwise. damping: Damping to use if the optimizer was created with ``use_adaptive_damping=True``, ``None`` otherwise. See discussion of constructor argument ``initial_damping`` for more information about damping. global_step_int: The global step as a python int. Note that this must match the step internal to the optimizer that is part of its state. Returns: (params, state, stats) if ``value_func_has_state=False`` and (params, state, func_state, stats) otherwise, where * params is the updated model parameters. * state is the updated optimizer state. * func_state is the updated function state. * stats is a dictionary of useful statistics including the loss. """ if (data_iterator is None) == (batch is None): raise ValueError("Exactly one of the arguments ``data_iterator`` and " "``batch`` must be provided.") step_counter_int = self.verify_args_and_get_step_counter( step_counter=state.step_counter, learning_rate=learning_rate, momentum=momentum, damping=damping, global_step_int=global_step_int, ) if step_counter_int == 0: if data_iterator is not None: rng, burnin_rng = self._rng_split(rng, 2) state, func_state = self.burnin( num_steps=self.num_burnin_steps, params=params, state=state, rng=burnin_rng, data_iterator=data_iterator, func_state=func_state, ) if data_iterator is not None: batch = next(data_iterator) return self._step(params, state, rng, batch, func_state, learning_rate, momentum, damping) def compute_l2_quad_matrix( self, vectors: Sequence[Params] ) -> Array: """Computes the matrix corresponding to the prior/regularizer. Args: vectors: A sequence of parameter-like PyTree structures, each one representing a different vector. Returns: A matrix with i,j entry equal to ``self.l2_reg * v_i^T v_j``. """ return self.l2_reg * utils.matrix_of_inner_products(vectors) @utils.auto_scope_method def compute_exact_quad_model( self, vectors: Sequence[Params], grads: Params, func_args: Optional[FuncArgsVariants] = None, ) -> Tuple[Array, Array, Array]: """Computes the components of the exact quadratic model.""" if func_args is None: raise ValueError("When you have not provided `c_factor_v` you must " "provide `func_args`.") if self.estimator.default_mat_type == "fisher": c_factor_v = tuple(self._implicit.multiply_fisher_factor_transpose (func_args, vi) for vi in vectors) elif self.estimator.default_mat_type == "ggn": c_factor_v = tuple(self._implicit.multiply_ggn_factor_transpose (func_args, vi) for vi in vectors) else: raise ValueError(f"Unrecognized estimator.mat_type=" f"{self.estimator.default_mat_type}.") return (utils.matrix_of_inner_products(c_factor_v), utils.matrix_of_inner_products(vectors), utils.vector_of_inner_products(grads, vectors)) @functools.partial(utils.staged, donate_argnums=2) @utils.auto_scope_method def compute_approx_quad_model( self, state: "Optimizer.State", vectors: Sequence[Params], grads: Params, ) -> Tuple[Array, Array, Array]: """Computes the components of the approximate quadratic model.""" # v_i^T C v_j def c_times_v(v): return self.estimator.multiply( state=state.estimator_state, parameter_structured_vector=v, identity_weight=0.0, exact_power=True, use_cached=False, pmap_axis_name=self.pmap_axis_name, ) c_vectors = [c_times_v(v_i) for v_i in vectors] return (utils.symmetric_matrix_inner_products(c_vectors, vectors), utils.matrix_of_inner_products(vectors), utils.vector_of_inner_products(grads, vectors)) @utils.staged def compute_quadratic_model_value( self, a: Array, a_damped: Array, b: Array, w: Array, ) -> Array: """Computes the quadratic model value from the inputs provided.""" a_final = a_damped if self._include_damping_in_quad_change else a return jnp.dot(w, jnp.dot(a_final, w)) / 2 + jnp.dot(w, b) @utils.staged def _solve_quad_model( self, quad_model_parameters: Tuple[Array, Array, Array], damping: Array, vectors: Sequence[Params], fixed_coefficients: Optional[Sequence[Union[Numeric, None]]] = None, ) -> Tuple[Tuple[Array, ...], Array]: """Solves for the optimal learning rate and momentum of the quadratic model. The quadratic model is represented as: Q(w) = w^T V^T (C + damping * I) V w / 2.0 + w^T V^T g where (n - number of vectors, d - dimensions of each vector): w (n,) - the vector of free weights (learning rate and momentum) V (d, n) - the matrix of proposed vectors for each weight C (d, d) - the true curvature matrix (GGN/Fisher/Hessian) g (d,) - the true gradient damping - the damping value at the current iteration In the implementation we have: A = V^T C V D = V^T V b = V^T g Args: quad_model_parameters: The computed matrices A, D and vector b. damping: The damping to use for evaluating the quadratic model. vectors: The parameter-like vectors for which to evaluate. fixed_coefficients: A list of values and None indicating which weights are fixed, and the quadratic is solved only for those that aren't. Returns: A list of coefficients which are the solution (and include any values that are not None from fixed_weights) and the value of the quadratic model function for this solution (as a scalar). Raises: The function currently supports only up to two vectors, hence if you provide more, it will raise a ``NotImplementedError``. """ # TODO(jamesmartens,botev): it would be better if this method didn't need # to have 'vectors' passed. We could instead use the 'D' matrix to get the # to get the matrix for the l2 regularization. if fixed_coefficients is None: fixed_coefficients = (None,) * len(vectors) if len(vectors) != len(fixed_coefficients): raise ValueError("The length of `vectors` must be equal to the length of " "`fixed_coefficients`.") # pylint: disable=invalid-name A_no_diag, D, b = quad_model_parameters A = A_no_diag + self.compute_l2_quad_matrix(vectors) A_damped = A + damping * D # Sync. # TODO(jamesmartens, botev): we should perform this earlier since it's # dangerous to have the convention of doing it right before use (especially # since the convention everywhere else is to sync quantities immediately # after they are first computed). A, A_damped, b = utils.pmean_if_pmap((A, A_damped, b), self.pmap_axis_name) # This needs explicit annotation A_damped: Array if all(c is None for c in fixed_coefficients): # Adapt all coefficients if len(fixed_coefficients) == 1: # This special case arises at the first iteration, because all # velocities are zeros. special_case = jnp.logical_and(A_damped[0, 0] == 0, b[0] == 0) w = - lax.cond(special_case, lambda: b, lambda: b / A_damped[0]) elif len(fixed_coefficients) == 2: # This special case arises at the first iteration, because all # velocities are zeros. to_check = jnp.asarray([A_damped[0, 1], A_damped[1, 0], A_damped[1, 1], b[1]]) w = - lax.cond(jnp.all(to_check == 0), lambda: jnp.stack([b[0] / A_damped[0, 0], b[1]]), lambda: jnp.linalg.solve(A_damped, b)) else: raise NotImplementedError() elif all(c is not None for c in fixed_coefficients): # No coefficients adapted w = jnp.asarray(fixed_coefficients) elif len(vectors) == 2: # Exactly one adapted coefficient w = [None, None] index = fixed_coefficients.index(None) w[1 - index] = jnp.asarray([fixed_coefficients[1 - index]]) b_extra = A_damped[1 - index, index: index + 1] * w[1 - index] A_solve = A_damped[index: index + 1, index: index + 1] b_solve = b[index: index + 1] + b_extra # pylint: enable=invalid-name w[index] = - b_solve / A_solve[0] w = jnp.concatenate(w, axis=0) else: raise NotImplementedError() quadratic_value = self.compute_quadratic_model_value(A, A_damped, b, w) return tuple(w), quadratic_value @utils.staged def _compute_new_damping_and_rho( self, old_loss: Array, new_loss: Array, quad_change: Array, current_damping: Array, ) -> Tuple[Array, Array]: """Computes the reduction ratio and the updated value of the damping.""" # Reduction ratio rho = (new_loss - old_loss) / quad_change rho_not_nan = jnp.nan_to_num(rho, nan=-100.0) # Update damping should_increase = rho_not_nan < self._damping_lower_threshold increased_damping = current_damping / self.damping_decay_factor should_decrease = rho_not_nan > self._damping_upper_threshold decreased_damping = current_damping * self.damping_decay_factor # This is basically an if-else statement damping = (should_decrease * decreased_damping + should_increase * increased_damping + (1 - should_increase - should_decrease) * current_damping) return jnp.clip(damping, self._min_damping, self._max_damping), rho @utils.staged def weighted_sum_of_objects( self, objects: Sequence[utils.PyTree], coefficients: Sequence[Numeric], ) -> utils.PyTree: """Returns the weighted sum of the objects in the sequence.""" return utils.weighted_sum_of_objects(objects, coefficients) def convert_value_and_grad_to_value_func( value_and_grad_func: ValueAndGradFunc, has_aux: bool = False, ) -> ValueFunc: """Converts a value_and_grad function to value_func only. Args: value_and_grad_func: The function which computes the loss value and the gradients w.r.t. parameters. has_aux: Similar to the meaning in :func:`jax.grad`, whether the ``value_and_grad_func`` returns with the loss value any auxiliary data. Returns: A function that returns only the loss value. """ def value_func(*args, **kwargs) -> Array: out, _ = value_and_grad_func(*args, **kwargs) return out[0] if has_aux else out return value_func def make_func_args( params: Params, func_state: Optional[FuncState], rng: Optional[PRNGKey], batch: Batch, has_state: bool, has_rng: bool, ) -> FuncArgsVariants: """Constructs the arguments to the model function in the pre-assumed order. The model function is assumed to take arguments in the following order: params, func_state, rng, batch If it has no function state or does not use an rng, those two arguments are discarded. Args: params: The model parameters. func_state: The function state, if ``has_state`` is ``True``, ``None`` otherwise. rng: The PRNG, if ``has_rng`` is ``True``, ``None`` otherwise. batch: The batch of data. has_state: Whether the function has a function state. has_rng: Whether the function uses an rng. Returns: The arguments that need to be passed to the model function. """ if has_state and func_state is None: raise ValueError("`func_state=None`, but argument `has_state=True`.") if has_rng and rng is None: raise ValueError("`rng=None`, but argument `has_rng=True`.") if not has_state and not has_rng: return params, batch elif not has_rng: return params, func_state, batch elif not has_state: return params, rng, batch else: return params, func_state, rng, batch def extract_func_outputs( raw_outputs: FuncOutputs, has_aux: bool, has_state: bool, ) -> Tuple[Array, Optional[FuncState], Optional[FuncAux]]: """Converts the raw output of the model function into loss,func_state and aux. Args: raw_outputs: The direct output of the model function. has_aux: Whether the model function returns also some auxiliary data. has_state: Whether the model function has a function state. Returns: A triple ``(loss, func_state, aux)``. If the model function does not return any auxiliary data than ``aux`` will be ``None`` and if it does not have a state ``func_state`` will be ``None``. """ if not has_aux and not has_state: return raw_outputs, None, None loss, other = raw_outputs if has_aux and has_state: func_state, aux = other elif has_aux: func_state, aux = None, other else: func_state, aux = other, None return loss, func_state, aux
kfac-jax-main
kfac_jax/_src/optimizer.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC curvature approximation to single layer blocks.""" import abc import collections import functools import string from typing import Optional, Sequence, Any, Set, Tuple, Union, Dict, Mapping import jax import jax.numpy as jnp from kfac_jax._src import layers_and_loss_tags as tags from kfac_jax._src import patches_second_moment as psm from kfac_jax._src import tag_graph_matcher as tgm from kfac_jax._src import utils import numpy as np # Types for annotation Array = utils.Array Scalar = utils.Scalar Numeric = utils.Numeric PRNGKey = utils.PRNGKey Shape = utils.Shape DType = utils.DType ScalarOrSequence = Union[Scalar, Sequence[Scalar]] Cache = Dict[str, Union[Array, Dict[str, Array]]] # Special global variables # This is used for einsum strings _ALPHABET = string.ascii_lowercase # The default value that would be used for the argument # ``max_elements_for_vmap``, when it is set to ``None`` in the # ``Conv2DDiagonal`` and ``Conv2DFull` curvature blocks. _MAX_PARALLEL_ELEMENTS: int = 2 ** 23 # The default value that would be used for the argument # ``eigen_decomposition_threshold``, when it is set to ``None`` in any of the # curvature blocks that inherit from ``Full`. _DEFAULT_EIGEN_DECOMPOSITION_THRESHOLD = 5 def set_max_parallel_elements(value: int): """Sets the default value of maximum parallel elements in the module. This value is used to determine the parallel-to-memory tradeoff in the curvature estimation procedure of :class:`~Conv2DDiagonal` and :class:`~Conv2DFull`. See their corresponding docs for further details. Args: value: The default value for maximum number of parallel elements. """ global _MAX_PARALLEL_ELEMENTS _MAX_PARALLEL_ELEMENTS = value def get_max_parallel_elements() -> int: """Returns the default value of maximum parallel elements in the module. This value is used to determine the parallel-to-memory tradeoff in the curvature estimation procedure of :class:`~Conv2DDiagonal` and :class:`~Conv2DFull`. See their corresponding docs for further details. Returns: The default value for maximum number of parallel elements. """ return _MAX_PARALLEL_ELEMENTS def set_default_eigen_decomposition_threshold(value: int): """Sets the default value of the eigen decomposition threshold. This value is used in :class:`~Full` to determine when updating the cache, at what number of different powers to switch the implementation from a simple matrix power to an eigenvector decomposition. Args: value: The default value for eigen decomposition threshold. """ global _DEFAULT_EIGEN_DECOMPOSITION_THRESHOLD _DEFAULT_EIGEN_DECOMPOSITION_THRESHOLD = value def get_default_eigen_decomposition_threshold() -> int: """Returns the default value of the eigen decomposition threshold. This value is used in :class:`~Full` to determine when updating the cache, at what number of different powers to switch the implementation from a simple matrix power to an eigenvector decomposition. Returns: The default value of the eigen decomposition threshold. """ return _DEFAULT_EIGEN_DECOMPOSITION_THRESHOLD def _to_real_set( number_or_sequence: Optional[ScalarOrSequence] ) -> Set[Scalar]: """Converts the optional number or sequence to a set.""" if number_or_sequence is None: return set() elif isinstance(number_or_sequence, set): return number_or_sequence elif isinstance(number_or_sequence, (float, int)): return {number_or_sequence} # pytype: disable=bad-return-type elif (isinstance(number_or_sequence, collections.abc.Sequence) and all(isinstance(x, (int, float)) for x in number_or_sequence)): return set(number_or_sequence) else: raise ValueError(f"Expecting a real-number or a sequence of reals, but got " f"{type(number_or_sequence)}.") class CurvatureBlock(utils.Finalizable): """Abstract class for curvature approximation blocks. A CurvatureBlock defines a curvature matrix to be estimated, and gives methods to multiply powers of this with a vector. Powers can be computed exactly or with a class-determined approximation. Cached versions of the powers can be pre-computed to make repeated multiplications cheaper. During initialization, you would have to explicitly specify all powers that you will need to cache. """ @utils.register_state_class class State(utils.State): """Persistent state of the block. Any subclasses of :class:`~CurvatureBlock` should also internally extend this class, with any attributes needed for the curvature estimation. Attributes: cache: A dictionary, containing any state data that is updated on irregular intervals, such as inverses, eigenvalues, etc. Elements of this are updated via calls to :func:`~CurvatureBlock.update_cache`, and do not necessarily correspond to the most up-to-date curvature estimate. """ cache: Optional[Dict[str, Union[Array, Dict[str, Array]]]] def __init__(self, layer_tag_eq: tags.LayerTagEqn, name: str): """Initializes the block. Args: layer_tag_eq: The Jax equation corresponding to the layer tag that this block will approximate the curvature to. name: The name of this block. """ super().__init__() self._layer_tag_eq = layer_tag_eq self._name = name self.finalize() @property def layer_tag_primitive(self) -> tags.LayerTag: """The :class:`jax.core.Primitive` corresponding to the block's tag equation.""" primitive = self._layer_tag_eq.primitive assert isinstance(primitive, tgm.tags.LayerTag) return primitive @property def parameter_variables(self) -> Tuple[jax.core.Var, ...]: """The parameter variables of the underlying Jax equation.""" param_vars = [] for p in self.layer_tag_primitive.split_all_inputs( self._layer_tag_eq.invars)[2]: assert isinstance(p, jax.core.Var) param_vars.append(p) return tuple(param_vars) @property def outputs_shapes(self) -> Tuple[Shape, ...]: """The shapes of the output variables of the block's tag equation.""" output_vars = self.layer_tag_primitive.split_all_inputs( self._layer_tag_eq.invars)[0] return jax.tree_util.tree_map(lambda x: x.aval.shape, output_vars) @property def inputs_shapes(self) -> Tuple[Shape, ...]: """The shapes of the input variables of the block's tag equation.""" input_vars = self.layer_tag_primitive.split_all_inputs( self._layer_tag_eq.invars)[1] return jax.tree_util.tree_map(lambda x: x.aval.shape, input_vars) @property def parameters_shapes(self) -> Tuple[Shape, ...]: """The shapes of the parameter variables of the block's tag equation.""" return tuple(jax.tree_util.tree_map( lambda x: tuple(x.aval.shape), self.parameter_variables)) @property def dtype(self) -> DType: dtypes = set(p.aval.dtype for p in self.parameter_variables) # pytype: disable=attribute-error if len(dtypes) > 1: raise ValueError("Not all parameters are the same dtype.") return dtypes.pop() @property def parameters_canonical_order(self) -> Tuple[int, ...]: """The canonical order of the parameter variables.""" return tuple(np.argsort([p.count for p in self.parameter_variables])) @property def layer_tag_extra_params(self) -> Dict[str, Any]: """Any extra parameters of passed into the Jax primitive of this block.""" return self._layer_tag_eq.params @property def number_of_parameters(self) -> int: """Number of parameter variables of this block.""" return len(self.parameters_shapes) @property def dim(self) -> int: """The number of elements of all parameter variables together.""" return sum(utils.product(shape) for shape in self.parameters_shapes) def scale(self, state: "CurvatureBlock.State", use_cache: bool) -> Numeric: """A scalar pre-factor of the curvature approximation. Importantly, all methods assume that whenever a user requests cached values, any state dependant scale is taken into account by the cache (e.g. either stored explicitly and used or mathematically added to values). Args: state: The state for this block. use_cache: Whether the method requesting this is using cached values or not. Returns: A scalar value to be multiplied with any unscaled block representation. """ if use_cache: return self.fixed_scale() return self.fixed_scale() * self.state_dependent_scale(state) def fixed_scale(self) -> Numeric: """A fixed scalar pre-factor of the curvature (e.g. constant).""" return 1.0 def state_dependent_scale(self, state: "CurvatureBlock.State") -> Numeric: """A scalar pre-factor of the curvature, computed from the most fresh curvature estimate.""" del state # Unused return 1.0 def __str__(self): return f"{self._name!r}[{self.parameters_shapes!r}]" @utils.auto_scope_method def init( self, rng: PRNGKey, exact_powers_to_cache: Optional[ScalarOrSequence], approx_powers_to_cache: Optional[ScalarOrSequence], cache_eigenvalues: bool, ) -> "CurvatureBlock.State": """Initializes the state for this block. Args: rng: The PRNGKey which to be used for any randomness of the initialization exact_powers_to_cache: A single value, or multiple values in a list, which specify which exact matrix powers the block should be caching. Matrix powers, which are expected to be used in :func:`~CurvatureBlock.multiply_matpower`, :func:`~CurvatureBlock.multiply_inverse` or :func:`~CurvatureBlock.multiply` with ``exact_power=True`` and ``use_cached=True`` must be provided here. approx_powers_to_cache: A single value, or multiple values in a list, which specify approximate matrix powers the block should be caching. Matrix powers, which are expected to be used in :func:`~CurvatureBlock.multiply_matrix_power`, :func:`~CurvatureBlock.multiply_inverse` or :func:`~CurvatureBlock.multiply` with ``exact_power=False`` and ``use_cached=True`` must be provided here. cache_eigenvalues: Specifies whether the block should be caching the eigenvalues of its approximate curvature. Returns: A dictionary with the initialized state. """ return self._init( rng=rng, exact_powers_to_cache=_to_real_set(exact_powers_to_cache), approx_powers_to_cache=_to_real_set(approx_powers_to_cache), cache_eigenvalues=cache_eigenvalues) @abc.abstractmethod def _init( self, rng: PRNGKey, exact_powers_to_cache: Set[Scalar], approx_powers_to_cache: Set[Scalar], cache_eigenvalues: bool, ) -> "CurvatureBlock.State": """The non-public interface of ``init``.""" @abc.abstractmethod def sync( self, state: "CurvatureBlock.State", pmap_axis_name: str, ) -> "CurvatureBlock.State": """Syncs the state across different devices (does not sync the cache).""" @utils.auto_scope_method def multiply_matpower( self, state: "CurvatureBlock.State", vector: Sequence[Array], identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: """Computes ``(BlockMatrix + identity_weight I)**power`` times ``vector``. Args: state: The state for this block. vector: A tuple of arrays that should have the same shapes as the block's parameters_shapes, which represent the vector you want to multiply. identity_weight: A scalar specifying the weight on the identity matrix that is added to the block matrix before raising it to a power. If ``use_cached=False`` it is guaranteed that this argument will be used in the computation. When returning cached values, this argument *may* be ignored in favor whatever value was last passed to :func:`~CurvatureBlock.update_cache`. The precise semantics of this depend on the concrete subclass and its particular behavior in regard to caching. power: The power to which to raise the matrix. exact_power: Specifies whether to compute the exact matrix power of ``BlockMatrix + identity_weight I``. When this argument is ``False`` the exact behaviour will depend on the concrete subclass and the result will *in general* be an approximation to ``(BlockMatrix + identity_weight I)^power``, although some subclasses may still compute the exact matrix power. use_cached: Whether to use a cached version for computing the product or to use the most recent curvature estimates. The cached version is going to be *at least* as fresh as the value provided to the last call to :func:`~CurvatureBlock.update_cache` with the same value of ``power`` Returns: A tuple of arrays, representing the result of the matrix-vector product. """ scale = self.scale(state, use_cached) result = self._multiply_matpower_unscaled( state=state, vector=vector, identity_weight=identity_weight / scale, power=power, exact_power=exact_power, use_cached=use_cached, ) return utils.scalar_mul(result, jnp.power(scale, power)) @abc.abstractmethod def _multiply_matpower_unscaled( self, state: "CurvatureBlock.State", vector: Sequence[Array], identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: """Performs matrix-vector multiplication, ignoring ``self.scale``.""" def multiply( self, state: "CurvatureBlock.State", vector: Sequence[Array], identity_weight: Numeric, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: """Computes ``(BlockMatrix + identity_weight I)`` times ``vector``.""" return self.multiply_matpower( state=state, vector=vector, identity_weight=identity_weight, power=1, exact_power=exact_power, use_cached=use_cached, ) def multiply_inverse( self, state: "CurvatureBlock.State", vector: Sequence[Array], identity_weight: Numeric, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: """Computes ``(BlockMatrix + identity_weight I)^-1`` times ``vector``.""" return self.multiply_matpower( state=state, vector=vector, identity_weight=identity_weight, power=-1, exact_power=exact_power, use_cached=use_cached, ) @utils.auto_scope_method def eigenvalues( self, state: "CurvatureBlock.State", use_cached: bool, ) -> Array: """Computes the eigenvalues for this block approximation. Args: state: The state dict for this block. use_cached: Whether to use a cached versions of the eigenvalues or to use the most recent curvature estimates to compute them. The cached version are going to be *at least* as fresh as the last time you called :func:`~CurvatureBlock.update_cache` with ``eigenvalues=True``. Returns: An array containing the eigenvalues of the block. """ eigenvalues = self._eigenvalues_unscaled(state, use_cached) assert eigenvalues.size == self.dim return self.scale(state, use_cached) * eigenvalues @abc.abstractmethod def _eigenvalues_unscaled( self, state: "CurvatureBlock.State", use_cached: bool, ) -> Array: """Computes the eigenvalues for this block, ignoring `self.scale`.""" @abc.abstractmethod def update_curvature_matrix_estimate( self, state: "CurvatureBlock.State", estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> "CurvatureBlock.State": """Updates the block's curvature estimates using the ``info`` provided. Each block *in general* estimates a moving average of its associated curvature matrix. If you don't want a moving average you can set ``ema_old=0`` and ``ema_new=1``. Args: state: The state dict for this block to update. estimation_data: A map containing data used for updating the curvature matrix estimate for this block. This can be computed by calling the function returned from :func:`~layer_tags_vjp`. Please see its implementation for more details on the name of the fields and how they are constructed. ema_old: Specifies the weight of the old value when computing the updated estimate in the moving average. ema_new: Specifies the weight of the new value when computing the updated estimate in the moving average. batch_size: The batch size used in computing the values in ``info``. """ @utils.auto_scope_method def update_cache( self, state: "CurvatureBlock.State", identity_weight: Numeric, exact_powers: Optional[ScalarOrSequence], approx_powers: Optional[ScalarOrSequence], eigenvalues: bool, ) -> "CurvatureBlock.State": """Updates the cached estimates of the different powers specified. Args: state: The state dict for this block to update. identity_weight: The weight of the identity added to the block's curvature matrix before computing the cached matrix power. exact_powers: Specifies any cached exact matrix powers to be updated. approx_powers: Specifies any cached approximate matrix powers to be updated. eigenvalues: Specifies whether to update the cached eigenvalues of the block. If they have not been cached before, this will create an entry with them in the block's cache. Returns: The updated state. """ return self._update_cache( state=state, identity_weight=identity_weight / self.scale(state, False), exact_powers=_to_real_set(exact_powers), approx_powers=_to_real_set(approx_powers), eigenvalues=eigenvalues, ) @abc.abstractmethod def _update_cache( self, state: "CurvatureBlock.State", identity_weight: Numeric, exact_powers: Set[Scalar], approx_powers: Set[Scalar], eigenvalues: bool, ) -> "CurvatureBlock.State": """The cache updating function, ignoring ``self.scale``.""" @utils.auto_scope_method def to_dense_matrix(self, state: "CurvatureBlock.State") -> Array: """Returns a dense representation of the approximate curvature matrix.""" return self.scale(state, False) * self._to_dense_unscaled(state) @abc.abstractmethod def _to_dense_unscaled(self, state: "CurvatureBlock.State") -> Array: """A dense representation of the curvature, ignoring ``self.scale``.""" class ScaledIdentity(CurvatureBlock): """A block that assumes that the curvature is a scaled identity matrix.""" def __init__( self, layer_tag_eq: tags.LayerTagEqn, name: str, scale: Numeric = 1.0, ): """Initializes the block. Args: layer_tag_eq: The Jax equation corresponding to the layer tag, that this block will approximate the curvature to. name: The name of this block. scale: The scale of the identity matrix. """ self._scale = scale super().__init__(layer_tag_eq, name) def fixed_scale(self) -> Numeric: return self._scale def _init( self, rng: PRNGKey, exact_powers_to_cache: Set[Scalar], approx_powers_to_cache: Set[Scalar], cache_eigenvalues: bool, ) -> CurvatureBlock.State: del rng, exact_powers_to_cache, approx_powers_to_cache # Unused return CurvatureBlock.State( cache=None, ) def sync( self, state: CurvatureBlock.State, pmap_axis_name: str, ) -> CurvatureBlock.State: return state def _multiply_matpower_unscaled( self, state: CurvatureBlock.State, vector: Sequence[Array], identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: del exact_power, use_cached # Unused identity_weight = identity_weight + 1.0 if power == 1: return jax.tree_util.tree_map(lambda x: identity_weight * x, vector) elif power == -1: return jax.tree_util.tree_map(lambda x: x / identity_weight, vector) else: identity_weight = jnp.power(identity_weight, power) return jax.tree_util.tree_map(lambda x: identity_weight * x, vector) def _eigenvalues_unscaled( self, state: "CurvatureBlock.State", use_cached: bool, ) -> Array: return jnp.ones([self.dim]) @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: CurvatureBlock.State, estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> CurvatureBlock.State: return state.copy() def _update_cache( self, state: CurvatureBlock.State, identity_weight: Numeric, exact_powers: Set[Scalar], approx_powers: Set[Scalar], eigenvalues: bool, ) -> CurvatureBlock.State: return state.copy() def _to_dense_unscaled(self, state: CurvatureBlock.State) -> Array: del state # not used return jnp.eye(self.dim) class Diagonal(CurvatureBlock, abc.ABC): """An abstract class for approximating only the diagonal of curvature.""" @utils.register_state_class class State(CurvatureBlock.State): """Persistent state of the block. Attributes: diagonal_factors: A tuple of the moving averages of the estimated diagonals of the curvature for each parameter that is part of the associated layer. """ diagonal_factors: Tuple[utils.WeightedMovingAverage] def _init( self, rng: PRNGKey, exact_powers_to_cache: Set[Scalar], approx_powers_to_cache: Set[Scalar], cache_eigenvalues: bool, ) -> "Diagonal.State": del rng return Diagonal.State( cache=None, diagonal_factors=tuple( utils.WeightedMovingAverage.zeros_array(shape, self.dtype) for shape in self.parameters_shapes ), ) def sync( self, state: "Diagonal.State", pmap_axis_name: str, ) -> "Diagonal.State": # Copy this first since we mutate it later in this function. state = state.copy() for factor in state.diagonal_factors: factor.sync(pmap_axis_name) return state def _multiply_matpower_unscaled( self, state: "Diagonal.State", vector: Sequence[Array], identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: factors = tuple(f.value + identity_weight for f in state.diagonal_factors) assert len(factors) == len(vector) if power == 1: return tuple(f * v for f, v in zip(factors, vector)) elif power == -1: return tuple(v / f for f, v in zip(factors, vector)) else: return tuple(jnp.power(f, power) * v for f, v in zip(factors, vector)) def _eigenvalues_unscaled( self, state: "Diagonal.State", use_cached: bool, ) -> Array: return jnp.concatenate([f.value.flatten() for f in state.diagonal_factors], axis=0) def _update_cache( self, state: "Diagonal.State", identity_weight: Numeric, exact_powers: Set[Scalar], approx_powers: Set[Scalar], eigenvalues: bool, ) -> "Diagonal.State": return state.copy() def _to_dense_unscaled(self, state: "Diagonal.State") -> Array: # Extract factors in canonical order factors = [state.diagonal_factors[i].value.flatten() for i in self.parameters_canonical_order] # Construct diagonal matrix return jnp.diag(jnp.concatenate(factors, axis=0)) class Full(CurvatureBlock, abc.ABC): """An abstract class for approximating the block matrix with a full matrix.""" @utils.register_state_class class State(CurvatureBlock.State): """Persistent state of the block. Attributes: matrix: A moving average of the estimated curvature matrix for all parameters that are part of the associated layer. """ matrix: utils.WeightedMovingAverage def __init__( self, layer_tag_eq: tags.LayerTagEqn, name: str, eigen_decomposition_threshold: Optional[int] = None, ): """Initializes the block. Args: layer_tag_eq: The Jax equation corresponding to the layer tag that this block will approximate the curvature to. name: The name of this block. eigen_decomposition_threshold: During calls to ``init`` and ``update_cache`` if higher number of matrix powers than this threshold are requested, instead of computing individual approximate powers, will directly compute the eigen-decomposition instead (which provide access to any matrix power). If this is ``None`` will use the value returned from :func:`~get_default_eigen_decomposition_threshold()`. """ if eigen_decomposition_threshold is None: threshold = get_default_eigen_decomposition_threshold() self._eigen_decomposition_threshold = threshold else: self._eigen_decomposition_threshold = eigen_decomposition_threshold super().__init__(layer_tag_eq, name) def parameters_list_to_single_vector( self, parameters_shaped_list: Sequence[Array], ) -> Array: """Converts values corresponding to parameters of the block to vector.""" if len(parameters_shaped_list) != self.number_of_parameters: raise ValueError(f"Expected a list of {self.number_of_parameters} values," f" but got {len(parameters_shaped_list)} instead.") for array, shape in zip(parameters_shaped_list, self.parameters_shapes): if array.shape != shape: raise ValueError(f"Expected a value of shape {shape}, but got " f"{array.shape} instead.") return jnp.concatenate([v.flatten() for v in parameters_shaped_list]) def single_vector_to_parameters_list( self, vector: Array, ) -> Tuple[Array, ...]: """Reverses the transformation ``self.parameters_list_to_single_vector``.""" if vector.ndim != 1: raise ValueError(f"Expecting a vector, got {vector.ndim}-tensor.") if vector.size != self.dim: raise ValueError(f"Expected a vector of size {self.dim}, but got " f"{vector.size} instead.") parameters_shaped_list = [] index = 0 for shape in self.parameters_shapes: size = utils.product(shape) parameters_shaped_list.append(vector[index: index + size].reshape(shape)) index += size assert index == self.dim return tuple(parameters_shaped_list) def _init( self, rng: PRNGKey, exact_powers_to_cache: Set[Scalar], approx_powers_to_cache: Set[Scalar], cache_eigenvalues: bool, ) -> "Full.State": del rng # This block does not have any notion of "approximate" powers exact_powers_to_cache = exact_powers_to_cache | approx_powers_to_cache cache = {} if len(exact_powers_to_cache) > self._eigen_decomposition_threshold: cache["eigenvalues"] = jnp.zeros([self.dim], self.dtype) cache["eigen_vectors"] = jnp.zeros([self.dim, self.dim], self.dtype) elif cache_eigenvalues: cache["eigenvalues"] = jnp.zeros([self.dim], self.dtype) if len(exact_powers_to_cache) <= self._eigen_decomposition_threshold: for power in exact_powers_to_cache: cache[str(power)] = jnp.zeros([self.dim, self.dim], self.dtype) return Full.State( cache=cache, matrix=utils.WeightedMovingAverage.zeros_array( [self.dim, self.dim], self.dtype), ) def sync( self, state: "Full.State", pmap_axis_name: str, ) -> "Full.State": # Copy this first since we mutate it later in this function. state = state.copy() state.matrix.sync(pmap_axis_name) return state def _multiply_matpower_unscaled( self, state: "Full.State", vector: Sequence[Array], identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: vector = self.parameters_list_to_single_vector(vector) if power == 1: result = jnp.matmul(state.matrix.value, vector) + identity_weight * vector elif not use_cached: matrix = state.matrix.value + identity_weight * jnp.eye(self.dim) if power == -1: result = jnp.linalg.solve(matrix, vector) else: result = jnp.matmul(jnp.linalg.matrix_power(matrix, power), vector) else: if str(power) in state.cache: result = jnp.matmul(state.cache[str(power)], vector) else: s = state.cache["eigenvalues"] q = state.cache["eigen_vectors"] result = jnp.matmul(jnp.transpose(q), vector) result = jnp.power(s + identity_weight, power) * result result = jnp.matmul(q, result) return self.single_vector_to_parameters_list(result) def _eigenvalues_unscaled( self, state: "Full.State", use_cached: bool, ) -> Array: if not use_cached: return utils.safe_psd_eigh(state.matrix.value)[0] else: return state.cache["eigenvalues"] def _update_cache( self, state: "Full.State", identity_weight: Numeric, exact_powers: Set[Scalar], approx_powers: Set[Scalar], eigenvalues: bool, ) -> "Full.State": # Copy this first since we mutate it later in this function. state = state.copy() scale = self.state_dependent_scale(state) # This block does not have any notion of "approximate" powers exact_powers = exact_powers | approx_powers if len(exact_powers) > self._eigen_decomposition_threshold: s, q = utils.safe_psd_eigh(state.matrix.value) state.cache = dict(eigenvalues=scale * s, eigen_vectors=q) else: if eigenvalues: state.cache["eigenvalues"] = scale * utils.safe_psd_eigh( state.matrix.value)[0] for power in exact_powers: if power == -1: state.cache[str(power)] = utils.psd_inv_cholesky( state.matrix.value + identity_weight * jnp.eye(self.dim)) / scale else: matrix = state.matrix.value + identity_weight * jnp.eye(self.dim) state.cache[str(power)] = ( (scale ** power) * jnp.linalg.matrix_power(matrix, power)) return state def _to_dense_unscaled(self, state: "Full.State") -> Array: # Permute the matrix according to the parameters canonical order return utils.block_permuted( state.matrix.value, block_sizes=[utils.product(shape) for shape in self.parameters_shapes], block_order=self.parameters_canonical_order ) class KroneckerFactored(CurvatureBlock, abc.ABC): """An abstract class for approximating the block with a Kronecker product.""" @utils.register_state_class class State(CurvatureBlock.State): """Persistent state of the block. Attributes: factors: A tuple of the moving averages of the estimated factors of the curvature for each axis group. """ factors: Tuple[utils.WeightedMovingAverage, ...] @classmethod def from_dict(cls, dict_rep: Dict[str, Any]) -> "KroneckerFactored.State": class_name = dict_rep.pop("__class__", cls.__name__) assert class_name == cls.__name__ return cls( factors=tuple( utils.WeightedMovingAverage.from_dict(rep) for rep in dict_rep["factor"] ) ) def __init__( self, layer_tag_eq: tags.LayerTagEqn, name: str, axis_groups: Optional[Sequence[Sequence[int]]] = None, ): self._layer_tag_eq = layer_tag_eq if axis_groups is None: self.axis_groups = tuple((i,) for i in range(self.array_ndim)) else: self.axis_groups = tuple(tuple(g) for g in axis_groups) all_axis = sum(self.axis_groups, ()) # Make sure the axis groups are sorted if sorted(all_axis) != list(range(min(all_axis), max(all_axis) + 1)): # We currently don't support out of order axis groups raise NotImplementedError() super().__init__(layer_tag_eq, name) @abc.abstractmethod def parameters_shaped_list_to_array( self, parameters_shaped_list: Sequence[Array], ) -> Array: """Combines all parameters to a single non axis grouped array.""" @abc.abstractmethod def array_to_parameters_shaped_list(self, array: Array) -> Tuple[Array, ...]: """An inverse transformation of ``self.parameters_shaped_list_to_array``.""" @property def array_shape(self) -> Shape: """The shape of the single non axis grouped array.""" avals = [jnp.zeros(v.aval.shape) for v in self.parameter_variables] return self.parameters_shaped_list_to_array(avals).shape @property def array_ndim(self) -> int: """The number of dimensions of the single non axis grouped array.""" return len(self.array_shape) @property def grouped_array_shape(self) -> Shape: """The shape of the single axis grouped array.""" return tuple( utils.product([self.array_shape[i] for i in group]) for group in self.axis_groups ) @property def grouped_array_ndim(self) -> int: """The number of dimensions of the grouped array.""" return len(self.axis_groups) def parameter_shaped_list_to_grouped_array( self, parameters_shaped_list: Sequence[Array], ) -> Array: """Combines all parameters to a single grouped array.""" array = self.parameters_shaped_list_to_array(parameters_shaped_list) return jnp.reshape(array, self.grouped_array_shape) def grouped_array_to_parameters_shaped_list( self, grouped_array: Array, ) -> Tuple[Array, ...]: """An inverse transformation of ``self.parameter_shaped_list_to_grouped_array``.""" array = jnp.reshape(grouped_array, self.array_shape) return self.array_to_parameters_shaped_list(array) def _init( self, rng: PRNGKey, exact_powers_to_cache: Set[Scalar], approx_powers_to_cache: Set[Scalar], cache_eigenvalues: bool, ) -> "KroneckerFactored.State": cache = {} factors = [] for i, d in enumerate(self.grouped_array_shape): factors.append( utils.WeightedMovingAverage.zeros_array((d, d), self.dtype) ) if cache_eigenvalues or exact_powers_to_cache: cache[f"{i}_factor_eigenvalues"] = jnp.zeros((d,), dtype=self.dtype) if exact_powers_to_cache: cache[f"{i}_factor_eigen_vectors"] = jnp.zeros((d, d), dtype=self.dtype) for power in approx_powers_to_cache: if power != -1: raise NotImplementedError( f"Approximations for power {power} is not yet implemented." ) if str(power) not in cache: cache[str(power)] = {} cache[str(power)][f"{i}_factor"] = jnp.zeros((d, d), dtype=self.dtype) return KroneckerFactored.State( cache=cache, factors=tuple(factors), ) def sync( self, state: "KroneckerFactored.State", pmap_axis_name: str, ) -> "KroneckerFactored.State": # Copy this first since we mutate it later in this function. state = state.copy() for factor in state.factors: factor.sync(pmap_axis_name) return state def _multiply_matpower_unscaled( self, state: "KroneckerFactored.State", vector: Sequence[Array], identity_weight: Numeric, power: Scalar, exact_power: bool, use_cached: bool, ) -> Tuple[Array, ...]: assert len(state.factors) == len(self.axis_groups) vector = self.parameter_shaped_list_to_grouped_array(vector) if power == 1: factors = [f.value for f in state.factors] if exact_power: result = utils.kronecker_product_axis_mul_v(factors, vector) result = result + identity_weight * vector else: # If compute pi_adjusted_kronecker_factors used a more expensive matrix # norm in its computation, it might make sense to cache it. But we # currently don't do that. result = utils.kronecker_product_axis_mul_v( utils.pi_adjusted_kronecker_factors(*factors, damping=identity_weight), vector) elif exact_power: if use_cached: s = [ state.cache[f"{i}_factor_eigenvalues"] for i in range(len(state.factors)) ] q = [ state.cache[f"{i}_factor_eigen_vectors"] for i in range(len(state.factors)) ] else: s, q = zip( *[utils.safe_psd_eigh(factor.value) for factor in state.factors] ) eigenvalues = utils.outer_product(*s) + identity_weight eigenvalues = jnp.power(eigenvalues, power) result = utils.kronecker_eigen_basis_axis_mul_v(q, eigenvalues, vector) else: if power != -1: raise NotImplementedError( f"Approximations for power {power} is not yet implemented." ) if use_cached: factors = [ state.cache[str(power)][f"{i}_factor"] for i in range(len(state.factors)) ] else: factors = utils.pi_adjusted_kronecker_inverse( *[factor.value for factor in state.factors], damping=identity_weight, ) result = utils.kronecker_product_axis_mul_v(factors, vector) return self.grouped_array_to_parameters_shaped_list(result) def _eigenvalues_unscaled( self, state: "KroneckerFactored.State", use_cached: bool, ) -> Array: assert len(state.factors) == len(self.axis_groups) if use_cached: s = [ state.cache[f"{i}_factor_eigenvalues"] for i in range(len(state.factors)) ] else: s_q = [utils.safe_psd_eigh(factor.value) for factor in state.factors] s, _ = zip(*s_q) return utils.outer_product(*s) @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: "KroneckerFactored.State", estimation_data: Mapping[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> "KroneckerFactored.State": assert len(state.factors) == len(self.axis_groups) # This function call will return a copy of state: return self._update_curvature_matrix_estimate( state, estimation_data, ema_old, ema_new, batch_size ) def _update_cache( # pytype: disable=signature-mismatch # numpy-scalars self, state: "KroneckerFactored.State", identity_weight: Numeric, exact_powers: Numeric, approx_powers: Numeric, eigenvalues: bool, ) -> "KroneckerFactored.State": assert len(state.factors) == len(self.axis_groups) # Copy this first since we mutate it later in this function. state = state.copy() scale = self.state_dependent_scale(state) factor_scale = jnp.power(scale, 1.0 / len(self.axis_groups)) if eigenvalues or exact_powers: s_q = [utils.safe_psd_eigh(factor.value) for factor in state.factors] s, q = zip(*s_q) for i in range(len(state.factors)): state.cache[f"{i}_factor_eigenvalues"] = factor_scale * s[i] if exact_powers: state.cache[f"{i}_factor_eigen_vectors"] = q[i] for power in approx_powers: if power != -1: raise NotImplementedError( f"Approximations for power {power} is not yet implemented." ) cache = state.cache[str(power)] # This computes the approximate inverse factors using the generalization # of the pi-adjusted inversion from the original KFAC paper. inv_factors = utils.pi_adjusted_kronecker_inverse( *[factor.value for factor in state.factors], damping=identity_weight, ) for i in range(len(state.factors)): cache[f"{i}_factor"] = inv_factors[i] / factor_scale return state class TwoKroneckerFactored(KroneckerFactored): """A Kronecker factored block for layers with weights and an optional bias.""" def __init__( self, layer_tag_eq: tags.LayerTagEqn, name: str, ): super().__init__(layer_tag_eq, name, ((0,), (1,))) @property def has_bias(self) -> bool: """Whether this layer's equation has a bias.""" return len(self._layer_tag_eq.invars) == 4 def parameters_shaped_list_to_array( self, parameters_shaped_list: Sequence[Array], ) -> Array: for p, s in zip(parameters_shaped_list, self.parameters_shapes): assert p.shape == s if self.has_bias: w, b = parameters_shaped_list return jnp.concatenate([w.reshape([-1, w.shape[-1]]), b[None]], axis=0) else: # This correctly reshapes the parameters of both dense and conv2d blocks [w] = parameters_shaped_list return w.reshape([-1, w.shape[-1]]) def array_to_parameters_shaped_list(self, array: Array) -> Tuple[Array, ...]: if self.has_bias: w, b = array[:-1], array[-1] return w.reshape(self.parameters_shapes[0]), b else: return tuple([array.reshape(self.parameters_shapes[0])]) def _to_dense_unscaled(self, state: "KroneckerFactored.State") -> Array: assert 0 < self.number_of_parameters <= 2 inputs_factor = state.factors[0].value if self.has_bias and self.parameters_canonical_order[0] != 0: # Permute the matrix according to the parameters canonical order inputs_factor = utils.block_permuted( state.factors[0].value, block_sizes=[state.factors[0].raw_value.shape[0] - 1, 1], block_order=(1, 0), ) return jnp.kron(inputs_factor, state.factors[1].value) class NaiveDiagonal(Diagonal): """Approximates the diagonal of the curvature with in the most obvious way. The update to the curvature estimate is computed by ``(sum_i g_i) ** 2 / N``. where `g_i` is the gradient of each individual data point, and ``N`` is the batch size. """ @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: "NaiveDiagonal.State", estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> "NaiveDiagonal.State": # Copy this first since we mutate it later in this function. state = state.copy() for factor, dw in zip(state.diagonal_factors, estimation_data["params_tangent"]): factor.update(dw * dw / batch_size, ema_old, ema_new) return state class NaiveFull(Full): """Approximates the full curvature with in the most obvious way. The update to the curvature estimate is computed by ``(sum_i g_i) (sum_i g_i)^T / N``, where ``g_i`` is the gradient of each individual data point, and ``N`` is the batch size. """ @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: Full.State, estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> Full.State: # Copy this first since we mutate it later in this function. state = state.copy() params_grads = jax.tree_util.tree_leaves(estimation_data["params_tangent"]) params_grads = jax.tree_map(lambda x: x.flatten(), params_grads) grads = jnp.concatenate(params_grads, axis=0) state.matrix.update(jnp.outer(grads, grads) / batch_size, ema_old, ema_new) return state # _____ # | __ \ # | | | | ___ _ __ ___ ___ # | | | |/ _ \ '_ \/ __|/ _ \ # | |__| | __/ | | \__ \ __/ # |_____/ \___|_| |_|___/\___| # class DenseDiagonal(Diagonal): """A `Diagonal` block specifically for dense layers.""" @property def has_bias(self) -> bool: """Whether the layer has a bias parameter.""" return len(self.parameters_shapes) == 2 @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: "Diagonal.State", estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> "Diagonal.State": # Copy this first since we mutate it later in this function. state = state.copy() x, = estimation_data["inputs"] dy, = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) diagonals = (jnp.matmul((x * x).T, dy * dy) / batch_size,) if self.has_bias: diagonals += (jnp.mean(dy * dy, axis=0),) assert len(diagonals) == self.number_of_parameters for diagonal_factor, diagonal in zip(state.diagonal_factors, diagonals): diagonal_factor.update(diagonal, ema_old, ema_new) return state class DenseFull(Full): """A `Full` block specifically for dense layers.""" @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: "Full.State", estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> "Full.State": # Copy this first since we mutate it later in this function. state = state.copy() x, = estimation_data["inputs"] dy, = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) params_tangents = x[:, :, None] * dy[:, None, :] if self.number_of_parameters == 2: params_tangents = jnp.concatenate([params_tangents, dy[:, None]], axis=1) params_tangents = jnp.reshape(params_tangents, [batch_size, -1]) matrix_update = jnp.matmul(params_tangents.T, params_tangents) / batch_size state.matrix.update(matrix_update, ema_old, ema_new) return state class DenseTwoKroneckerFactored(TwoKroneckerFactored): """A :class:`~TwoKroneckerFactored` block specifically for dense layers.""" @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: KroneckerFactored.State, estimation_data: Mapping[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> KroneckerFactored.State: # Copy this first since we mutate it later in this function. state = state.copy() [x] = estimation_data["inputs"] [dy] = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) if self.has_bias: x_one = jnp.ones_like(x[:, :1]) x = jnp.concatenate([x, x_one], axis=1) input_stats = jnp.einsum("ay,az->yz", x, x) / batch_size output_stats = jnp.einsum("ay,az->yz", dy, dy) / batch_size state.factors[0].update(input_stats, ema_old, ema_new) state.factors[1].update(output_stats, ema_old, ema_new) return state # _____ ___ _____ # / ____| |__ \| __ \ # | | ___ _ ____ __ ) | | | | # | | / _ \| '_ \ \ / // /| | | | # | |___| (_) | | | \ V // /_| |__| | # \_____\___/|_| |_|\_/|____|_____/ # class Conv2DDiagonal(Diagonal): """A :class:`~Diagonal` block specifically for 2D convolution layers.""" def __init__( self, layer_tag_eq: tags.LayerTagEqn, name: str, max_elements_for_vmap: Optional[int] = None, ): """Initializes the block. Since there is no 'nice' formula for computing the average of the tangents for a 2D convolution, what we do is that we have a function - ``self.conv2d_tangent_squared`` - that computes for a single feature map the square of the tangents for the kernel of the convolution. To average over the batch we have two choices - vmap or loop over the batch sequentially using scan. This utility function provides a trade-off by being able to specify the maximum number of batch size that we can vmap over. This means that the maximum memory usage will be ``max_batch_size_for_vmap`` times the memory needed when calling ``self.conv2d_tangent_squared``. And the actual ``vmap`` will be called ``ceil(total_batch_size / max_batch_size_for_vmap)`` number of times in a loop to find the final average. Args: layer_tag_eq: The Jax equation corresponding to the layer tag, that this block will approximate the curvature to. name: The name of this block. max_elements_for_vmap: The threshold used for determining how much computation to the in parallel and how much in serial manner. If ``None`` will use the value returned by :func:`~get_max_parallel_elements`. """ self._averaged_kernel_squared_tangents = utils.loop_and_parallelize_average( func=self.conv2d_tangent_squared, max_parallel_size=max_elements_for_vmap or get_max_parallel_elements(), ) super().__init__(layer_tag_eq, name) @property def has_bias(self) -> bool: return len(self.parameters_shapes) == 2 def conv2d_tangent_squared( self, image_features_map: Array, output_tangent: Array, ) -> Array: """Computes the elementwise square of a tangent for a single feature map.""" extra_params = {k: v for k, v in self.layer_tag_extra_params.items() if k not in ("lhs_shape", "rhs_shape")} _, vjp = jax.vjp( functools.partial( jax.lax.conv_general_dilated, **extra_params ), image_features_map[None], jnp.zeros(self.parameters_shapes[0]) ) return jnp.square(vjp(output_tangent[None])[1]) @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: Diagonal.State, estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> Diagonal.State: # Copy this first since we mutate it later in this function. state = state.copy() x, = estimation_data["inputs"] dy, = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) diagonals = (self._averaged_kernel_squared_tangents(x, dy),) if self.has_bias: sum_axis = tuple(range(1, dy.ndim - len(self.parameters_shapes[1]))) bias_dy = jnp.sum(dy, axis=sum_axis) diagonals += (jnp.mean(bias_dy * bias_dy, axis=0),) assert len(diagonals) == self.number_of_parameters for diagonal_factor, diagonal in zip(state.diagonal_factors, diagonals): diagonal_factor.update(diagonal, ema_old, ema_new) return state class Conv2DFull(Full): """A :class:`~Full` block specifically for 2D convolution layers.""" def __init__( self, layer_tag_eq: tags.LayerTagEqn, name: str, max_elements_for_vmap: Optional[int] = None, ): """Initializes the block. Since there is no 'nice' formula for computing the average of the tangents for a 2D convolution, what we do is that we have a function - ``self.conv2d_tangent_squared`` - that computes for a single feature map the square of the tangents for the kernel of the convolution. To average over the batch we have two choices - vmap or loop over the batch sequentially using scan. This utility function provides a trade-off by being able to specify the maximum batch that that will be handled in a single iteration of the loop. This means that the maximum memory usage will be ``max_batch_size_for_vmap`` times the memory needed when calling ``self.conv2d_tangent_squared``. And the actual ``vmap`` will be called ``ceil(total_batch_size / max_batch_size_for_vmap)`` number of times in a loop to find the final average. Args: layer_tag_eq: The Jax equation corresponding to the layer tag, that this block will approximate the curvature to. name: The name of this block. max_elements_for_vmap: The threshold used for determining how much computation to the in parallel and how much in serial manner. If ``None`` will use the value returned by :func:`~get_max_parallel_elements`. """ self._averaged_tangents_outer_product = utils.loop_and_parallelize_average( func=self.conv2d_tangent_outer_product, max_parallel_size=max_elements_for_vmap or get_max_parallel_elements(), ) super().__init__(layer_tag_eq, name) def conv2d_tangent_outer_product( self, inputs: Array, tangent_of_outputs: Array, ) -> Array: """Computes the outer product of a tangent for a single feature map.""" extra_params = {k: v for k, v in self.layer_tag_extra_params.items() if k not in ("lhs_shape", "rhs_shape")} _, vjp = jax.vjp( functools.partial( jax.lax.conv_general_dilated, **extra_params ), inputs[None], jnp.zeros(self.parameters_shapes[0]) ) tangents = (vjp(tangent_of_outputs[None])[1],) if self.number_of_parameters == 2: num_axis = tangent_of_outputs.ndim - len(self.parameters_shapes[1]) sum_axis = tuple(range(num_axis)) tangents += (jnp.sum(tangent_of_outputs, axis=sum_axis),) flat_tangents = self.parameters_list_to_single_vector(tangents) return jnp.outer(flat_tangents, flat_tangents) @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: Full.State, estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> Full.State: # Copy this first since we mutate it later in this function. state = state.copy() x, = estimation_data["inputs"] dy, = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) matrix_update = self._averaged_tangents_outer_product(x, dy) state.matrix.update(matrix_update, ema_old, ema_new) return state class Conv2DTwoKroneckerFactored(TwoKroneckerFactored): """A :class:`~TwoKroneckerFactored` block specifically for 2D convolution layers.""" def fixed_scale(self) -> Numeric: return float(self.num_locations) @property def kernel_output_axis(self) -> int: return self._layer_tag_eq.params["dimension_numbers"].rhs_spec[0] @property def outputs_channel_index(self) -> int: """The ``channels`` index in the outputs of the layer.""" return self._layer_tag_eq.params["dimension_numbers"].out_spec[1] @property def inputs_channel_index(self) -> int: """The ``channels`` index in the inputs of the layer.""" return self._layer_tag_eq.params["dimension_numbers"].lhs_spec[1] @property def weights_output_channel_index(self) -> int: """The ``channels`` index in weights of the layer.""" return self._layer_tag_eq.params["dimension_numbers"].rhs_spec[0] @property def weights_spatial_shape(self) -> Shape: spatial_index = self._layer_tag_eq.params["dimension_numbers"].rhs_spec[2:] return tuple(self.parameters_shapes[0][i] for i in spatial_index) @property def weights_spatial_size(self) -> int: """The spatial filter size of the weights.""" return utils.product(self.weights_spatial_shape) # pytype: disable=bad-return-type # numpy-scalars @property def inputs_spatial_shape(self) -> Shape: spatial_index = self._layer_tag_eq.params["dimension_numbers"].lhs_spec[2:] return tuple(self.inputs_shapes[0][i] for i in spatial_index) @property def num_locations(self) -> int: """The number of spatial locations that each filter is applied to.""" return psm.num_conv_locations( self.inputs_spatial_shape, self.weights_spatial_shape, self._layer_tag_eq.params["window_strides"], self._layer_tag_eq.params["padding"]) def input_size(self) -> int: if self.has_bias: return self.num_inputs_channels * self.weights_spatial_size + 1 else: return self.num_inputs_channels * self.weights_spatial_size def output_size(self) -> int: return self.num_outputs_channels @property def num_inputs_channels(self) -> int: """The number of channels in the inputs to the layer.""" return self._layer_tag_eq.invars[0].aval.shape[ # pytype: disable=attribute-error self.inputs_channel_index] @property def num_outputs_channels(self) -> int: """The number of channels in the outputs to the layer.""" return self._layer_tag_eq.invars[1].aval.shape[ # pytype: disable=attribute-error self.weights_output_channel_index] def compute_inputs_stats( self, inputs: Array, weighting_array: Optional[Array] = None, ) -> Array: """Computes the statistics for the inputs factor.""" batch_size = inputs.shape[0] input_cov_m, input_cov_v = psm.patches_moments( inputs, kernel_spatial_shape=self.weights_spatial_shape, strides=self._layer_tag_eq.params["window_strides"], padding=self._layer_tag_eq.params["padding"], data_format=None, dim_numbers=self._layer_tag_eq.params["dimension_numbers"], precision=self._layer_tag_eq.params.get("precision"), weighting_array=weighting_array, ) # Flatten the kernel and channels dimensions k, h, c = input_cov_v.shape input_cov_v = jnp.reshape(input_cov_v, (k * h * c,)) input_cov_m = jnp.reshape(input_cov_m, (k * h * c, k * h * c)) # Normalize by the `batch size` * `num_locations` normalizer = batch_size * self.num_locations input_cov_m = input_cov_m / normalizer input_cov_v = input_cov_v / normalizer if not self.has_bias: return input_cov_m if weighting_array is None: corner = jnp.ones([1], dtype=input_cov_m.dtype) else: corner = jnp.mean(weighting_array).reshape([1]) input_cov = jnp.concatenate([input_cov_m, input_cov_v[None]], axis=0) input_cov_v = jnp.concatenate([input_cov_v, corner], axis=0) return jnp.concatenate([input_cov, input_cov_v[:, None]], axis=1) def compute_outputs_stats(self, tangent_of_output: Array) -> Array: """Computes the statistics for the outputs factor.""" lhs_str = utils.replace_char(_ALPHABET[:4], "y", self.outputs_channel_index) rhs_str = utils.replace_char(_ALPHABET[:4], "z", self.outputs_channel_index) ein_str = f"{lhs_str},{rhs_str}->yz" stats = jnp.einsum(ein_str, tangent_of_output, tangent_of_output) # Normalize by the `batch size` * `num_locations` normalizer = tangent_of_output.shape[0] * self.num_locations return stats / normalizer @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: TwoKroneckerFactored.State, estimation_data: Mapping[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> TwoKroneckerFactored.State: # Copy this first since we mutate it later in this function. state = state.copy() [x] = estimation_data["inputs"] [dy] = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) input_stats = self.compute_inputs_stats(x) output_stats = self.compute_outputs_stats(dy) state.factors[0].update(input_stats, ema_old, ema_new) state.factors[1].update(output_stats, ema_old, ema_new) return state # _____ _ _ _____ _ _ __ _ # / ____| | | /\ | |/ ____| | (_)/ _| | # | (___ ___ __ _| | ___ / \ _ __ __| | (___ | |__ _| |_| |_ # \___ \ / __/ _` | |/ _ \ / /\ \ | '_ \ / _` |\___ \| '_ \| | _| __| # ____) | (_| (_| | | __// ____ \| | | | (_| |____) | | | | | | | |_ # |_____/ \___\__,_|_|\___/_/ \_\_| |_|\__,_|_____/|_| |_|_|_| \__| # def compatible_shapes(ref_shape, target_shape): if len(target_shape) > len(ref_shape): raise ValueError("Target shape should be smaller.") for ref_d, target_d in zip(reversed(ref_shape), reversed(target_shape)): if ref_d != target_d and target_d != 1: raise ValueError(f"{target_shape} is incompatible with {ref_shape}.") def compatible_sum(tensor, target_shape, skip_axes): """Compute sum over ``tensor`` to achieve shape given by ``target_shape``.""" compatible_shapes(tensor.shape, target_shape) n = tensor.ndim - len(target_shape) axis = [i + n for i, t in enumerate(target_shape) if t == 1 and i + n not in skip_axes] tensor = jnp.sum(tensor, axis=axis, keepdims=True) axis = [i for i in range(tensor.ndim - len(target_shape)) if i not in skip_axes] return jnp.sum(tensor, axis=axis) class ScaleAndShiftDiagonal(Diagonal): """A diagonal approximation specifically for a scale and shift layers.""" @property def has_scale(self) -> bool: """Whether this layer's equation has a scale.""" return self._layer_tag_eq.params["has_scale"] @property def has_shift(self) -> bool: """Whether this layer's equation has a shift.""" return self._layer_tag_eq.params["has_shift"] @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: Diagonal.State, estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> Diagonal.State: # Copy this first since we mutate it later in this function. state = state.copy() x, = estimation_data["inputs"] dy, = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) if self.has_scale: assert (state.diagonal_factors[0].raw_value.shape == self.parameters_shapes[0]) scale_shape = estimation_data["params"][0].shape d_scale = compatible_sum(x * dy, scale_shape, skip_axes=[0]) scale_diag_update = jnp.sum( d_scale * d_scale, axis=0, keepdims=d_scale.ndim == len(scale_shape) ) / batch_size state.diagonal_factors[0].update(scale_diag_update, ema_old, ema_new) if self.has_shift: shift_shape = estimation_data["params"][-1].shape d_shift = compatible_sum(dy, shift_shape, skip_axes=[0]) shift_diag_update = jnp.sum( d_shift * d_shift, axis=0, keepdims=d_shift.ndim == len(shift_shape) ) / batch_size state.diagonal_factors[-1].update(shift_diag_update, ema_old, ema_new) return state class ScaleAndShiftFull(Full): """A full dense approximation specifically for a scale and shift layers.""" @property def _has_scale(self) -> bool: """Whether this layer's equation has a scale.""" return self._layer_tag_eq.params["has_scale"] @property def _has_shift(self) -> bool: """Whether this layer's equation has a shift.""" return self._layer_tag_eq.params["has_shift"] @utils.auto_scope_method def update_curvature_matrix_estimate( self, state: Full.State, estimation_data: Dict[str, Sequence[Array]], ema_old: Numeric, ema_new: Numeric, batch_size: Numeric, ) -> Full.State: # Copy this first since we mutate it later in this function. state = state.copy() x, = estimation_data["inputs"] dy, = estimation_data["outputs_tangent"] assert utils.first_dim_is_size(batch_size, x, dy) tangents = [] if self._has_scale: # Scale tangent scale_shape = estimation_data["params"][0].shape d_scale = compatible_sum(x * dy, scale_shape, skip_axes=[0]) d_scale = d_scale.reshape([batch_size, -1]) tangents.append(d_scale) if self._has_shift: # Shift tangent shift_shape = estimation_data["params"][-1].shape d_shift = compatible_sum(dy, shift_shape, skip_axes=[0]) d_shift = d_shift.reshape([batch_size, -1]) tangents.append(d_shift) tangents = jnp.concatenate(tangents, axis=1) matrix_update = jnp.matmul(tangents.T, tangents) / batch_size state.matrix.update(matrix_update, ema_old, ema_new) return state
kfac-jax-main
kfac_jax/_src/curvature_blocks.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC tracing functionality for functions needed for curvature estimation.""" import functools from typing import Any, Callable, Sequence, TypeVar, Tuple, Union, Dict, List import jax import jax.numpy as jnp from kfac_jax._src import layers_and_loss_tags as tags from kfac_jax._src import loss_functions from kfac_jax._src import tag_graph_matcher as tgm from kfac_jax._src import utils from typing_extensions import TypeAlias # Types for annotations Array = utils.Array Shape = utils.Shape Params = utils.Params FuncArgs = utils.FuncArgs FuncOuts = utils.FuncOuts Var = jax.core.Var T = TypeVar("T") # J = TypeVar("J", jax.core.Jaxpr, jax.core.ClosedJaxpr) ProcJaxpr: TypeAlias = "ProcessedJaxpr" TaggedFunction = Callable[..., Tuple[loss_functions.LossFunction, ...]] FuncWithTags = Callable[..., Any] LossTagInputs = Tuple[Array, ...] LayerTagInputs = Tuple[Array, ...] FunctionTransformation = Callable[..., Union[ProcJaxpr, T]] TransformedFunction = Callable[..., Union[ProcJaxpr, T]] LossTagsVjp = Tuple[ Tuple[loss_functions.LossFunction, ...], Callable[[Sequence[LossTagInputs]], Params] ] LossTagsJvp = Tuple[ Tuple[loss_functions.LossFunction, ...], Tuple[LossTagInputs, ...], ] LayerTagVjp = Tuple[ Tuple[loss_functions.LossFunction, ...], Callable[[Tuple[LossTagInputs, ...]], Tuple[Dict[str, Array], ...]] ] JaxprOrClosedJaxpr = Union[jax.core.Jaxpr, jax.core.ClosedJaxpr] def shape_and_type(x: Array) -> Tuple[Shape, jnp.dtype]: """Returns the shape and type of the given array.""" return x.shape, x.dtype def make_cache_key( func_args: FuncArgs, *args: Any ) -> Tuple[utils.PyTreeDef, Tuple[Tuple[Shape, jnp.dtype], ...]]: """Creates a key for caching Jax function arguments.""" args_flat, tree_structure = jax.tree_util.tree_flatten((func_args, args)) return tree_structure, tuple(map(shape_and_type, args_flat)) def extract_tags( jaxpr: jax.core.Jaxpr ) -> Tuple[Tuple[tags.LayerTagEqn, ...], Tuple[tags.LossTagEqn, ...]]: """Extracts the layer and the loss tags from the given Jaxpr.""" return (tuple(eqn for eqn in jaxpr.eqns if isinstance(eqn.primitive, tags.LayerTag)), tuple(eqn for eqn in jaxpr.eqns if isinstance(eqn.primitive, tags.LossTag))) def order_layer_tags( params_vars_flat: Sequence[Var], layer_tags: Sequence[tags.LayerTagEqn], allow_left_out_params: bool = False, ) -> Tuple[Tuple[tags.LayerTagEqn, ...], Tuple[Tuple[int, ...], ...]]: """Sorts the layer tags based on the index of the parameters they contain. Args: params_vars_flat: A sequence of all parameter variables. layer_tags: A sequence of all layer tags. allow_left_out_params: Whether to raise an error if there are any parameter variables which are not part of a layer tag. Returns: A pair of tuples ``(layer_tags, tags_indices)``, where ``layer_tags`` has the ordered sequence of the input ``layer_tags`` and ``tags_indices`` contains a sequence of tuples, where each tuple has the indices of the parameters associated with the corresponding layer tag. """ tags_param_indices = [] used_indices = set() for eqn in layer_tags: # Collect the equation parameter indices _, _, tag_vars = eqn.primitive.split_all_inputs(eqn.invars) vars_indices = tuple(params_vars_flat.index(v) for v in tag_vars) if any(i in used_indices for i in vars_indices): raise ValueError("Reusing variable in a second block.") used_indices = used_indices.union(vars_indices) tags_param_indices.append(vars_indices) left_out_indices = set(range(len(params_vars_flat))) - used_indices if left_out_indices and not allow_left_out_params: raise ValueError("The following parameter indices were not assigned a " f"block: {left_out_indices}.") if not layer_tags: return (), () else: # Sort by the vars minimum index sorted_index_and_blocks = sorted(zip(layer_tags, tags_param_indices), key=lambda x: min(x[1])) return tuple(zip(*sorted_index_and_blocks)) class ProcessedJaxpr(utils.Finalizable): """A wrapper around Jaxpr, with useful additional data. Attributes: jaxpr: The original Jaxpr that is being wrapped. consts: The constants returned from the tracing of the original Jaxpr. in_tree: The PyTree structure of the inputs to the function that the original Jaxpr has been created from. params_index: Specifies, which inputs to the function are to be considered a parameter variable. Specifically - ``inputs[params_index]``. loss_tags: A tuple of all of the loss tags in the original Jaxpr. layer_tags: A sorted tuple of all of the layer tags in the original Jaxpr. The sorting order is based on the indices of the parameters associated with each layer tag. layer_indices: A sequence of tuples, where each tuple has the indices of the parameters associated with the corresponding layer tag. """ def __init__( self, jaxpr: jax.core.Jaxpr, consts: Sequence[Any], in_tree: utils.PyTreeDef, params_index: int, allow_left_out_params: bool = False, ): """Initializes the instance. Args: jaxpr: The raw Jaxpr. consts: The constants needed for evaluation of the raw Jaxpr. in_tree: The PyTree structure of the inputs to the function that the ``jaxpr`` has been created from. params_index: Specifies, which inputs to the function are to be considered a parameter variable. Specifically - ``inputs[params_index]``. allow_left_out_params: Whether to raise an error if any of the parameter variables is not included in any layer tag. """ super().__init__() self.jaxpr = jaxpr self.consts = consts self.in_tree = in_tree self.params_index = params_index self.layer_tags, self.loss_tags = extract_tags(jaxpr) self.layer_tags, self.layer_indices = order_layer_tags( params_vars_flat=self.params_vars_flat, layer_tags=self.layer_tags, allow_left_out_params=allow_left_out_params, ) self.finalize() @property def in_vars_flat(self) -> List[Var]: """A flat list of all of the abstract input variables.""" return self.jaxpr.invars @property def in_vars(self) -> utils.PyTree[Var]: """The abstract input variables, as an un-flatten structure.""" return jax.tree_util.tree_unflatten(self.in_tree, self.in_vars_flat) @property def params_vars(self) -> utils.PyTree[Var]: """The abstract parameter variables, as an un-flatten structure.""" return self.in_vars[self.params_index] @property def params_vars_flat(self) -> List[Var]: """A flat list of all abstract parameter variables.""" return jax.tree_util.tree_leaves(self.params_vars) @property def params_tree(self) -> utils.PyTreeDef: """The PyTree structure of the parameter variables.""" return jax.tree_util.tree_structure(self.params_vars) @classmethod def make_from_func( cls, func: utils.Func, func_args: FuncArgs, params_index: int = 0, auto_register_tags: bool = True, allow_left_out_params: bool = False, ** auto_registration_kwargs: Any, ) -> ProcJaxpr: """Constructs a :class:`~ProcessedJaxpr` from a the given function. Args: func: The model function, which will be traced. func_args: Function arguments to use for tracing. params_index: The variables from the function arguments which are at this index (e.g. ``func_args[params_index]``) are to be considered model parameters. auto_register_tags: Whether to run an automatic layer registration on the function (e.g. :func:`~auto_register_tags`). allow_left_out_params: If this is set to ``False`` an error would be raised if there are any model parameters that have not be assigned to a layer tag. **auto_registration_kwargs: Any additional keyword arguments, to be passed to the automatic registration pass. Returns: A :class:`~ProcessedJaxpr` representing the model function. """ func_args = tuple(func_args) if auto_register_tags: func = tgm.auto_register_tags( func=func, func_args=func_args, params_index=params_index, **auto_registration_kwargs) typed_jaxpr = jax.make_jaxpr(func)(*func_args) jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals in_tree = jax.tree_util.tree_structure(func_args) return ProcessedJaxpr( jaxpr=jaxpr, consts=consts, in_tree=in_tree, params_index=params_index, allow_left_out_params=allow_left_out_params, ) def __eq__(self, other: ProcJaxpr) -> bool: """Compares two ProcessedJaxpr instances by tree structure.""" # Verify whether input trees are equivalent if self.in_tree != other.in_tree: return False # Verify whether layer indices are equivalent if len(self.layer_indices) != len(other.layer_indices): return False for ref_l_index, l_index in zip(self.layer_indices, other.layer_indices): if len(ref_l_index) != len(l_index): return False if any(p_i != p_j for p_i, p_j in zip(ref_l_index, l_index)): return False # Verify layer tags are equivalent if len(self.layer_tags) != len(other.layer_tags): return False if any(ref_tag.primitive != tag.primitive for ref_tag, tag in zip(self.layer_tags, other.layer_tags)): return False # Verify whether parameter shapes are equivalent if any(p_i.aval.shape != p_j.aval.shape for p_i, p_j in zip(self.params_vars_flat, other.params_vars_flat)): return False return True def cached_transformation( func: utils.Func, transformation: FunctionTransformation[T], params_index: int = 0, auto_register_tags: bool = True, allow_left_out_params: bool = False, allow_no_losses: bool = False, raise_error_on_diff_jaxpr: bool = True, **auto_registration_kwargs: Any, ) -> TransformedFunction[T]: """Caches ``transformation(preprocessed_jaxpr, func_args, *args)``. The caching mechanism uses the ``func_args`` PyTree, dtypes and shapes for hashing. Args: func: The main model function, which will be transformed. transformation: The actual transformation of ``func``. params_index: The variables from the function arguments which are at this index (e.g. ``func_args[params_index]``) are to be considered model parameters. auto_register_tags: Whether to run an automatic layer registration on the function (e.g. :func:`~auto_register_tags`). allow_left_out_params: If this is set to ``False`` an error would be raised if there are any model parameters that have not be assigned to a layer tag. allow_no_losses: If this is set to ``False`` an error would be raised if no registered losses have been found when tracing the function. raise_error_on_diff_jaxpr: Whether to raise an exception if the function has been traced before, with different arguments, and the new Jaxpr graph differs in more than just the shapes and dtypes of the Jaxpr equations. **auto_registration_kwargs: Any additional keyword arguments, to be passed to the automatic registration pass. Returns: A function with a signature ``f(func_args, *args, return_only_jaxpr)`` which evaluates the transformation of ``func`` at ``func_args``. The extra ``args`` are any additional array arguments passed to the transformation, while the last flag indicates whether to just return the :class:`~ProcessedJaxpr` instead of the transformation output. """ cache = {} @functools.wraps(transformation) def wrapped_transformation( func_args: FuncArgs, *args: Any, return_only_jaxpr: bool = False, ) -> Union[ProcessedJaxpr, T]: # Construct a key and check cache for hits key = make_cache_key(func_args) jaxpr, f = cache.get(key, (None, None)) if jaxpr is None: assert f is None # Process the function jaxpr = ProcessedJaxpr.make_from_func( func=func, func_args=func_args, params_index=params_index, auto_register_tags=auto_register_tags, allow_left_out_params=allow_left_out_params, **auto_registration_kwargs ) if not allow_no_losses and not jaxpr.loss_tags: raise ValueError("No registered losses have been found during tracing.") if cache and raise_error_on_diff_jaxpr: # If any previous `ProcessedJaxpr` exists verify that it is equivalent ref_jaxpr, _ = cache[next(iter(cache))] if ref_jaxpr != jaxpr: raise ValueError("The consecutive tracing of the provided function " "yielded a non-equivalent `ProcessedJaxpr`.") f = functools.partial(transformation, jaxpr) cache[key] = (jaxpr, f) if return_only_jaxpr: return jaxpr else: return f(func_args, *args) return wrapped_transformation def construct_compute_losses_inputs( jaxpr: jax.core.Jaxpr, consts: Sequence[Any], num_losses: int, primal_func_args: FuncArgs, params_index: int ) -> Callable[ [Params], Tuple[Tuple[LossTagInputs, ...], Tuple[LossTagInputs, ...]] ]: """Constructs a function that computes the inputs to all loss tags. The returned function takes as input only the parameters, as specified by ``params_index``, and returns a tuple containing the input values to the first ``num_losses`` loss tags in the Jaxpr. This is done by iterating sequentially over all equations in the Jaxpr, evaluating each equation, until the correct number of loss tags have been discovered and returning the values of their inputs. Args: jaxpr: The Jaxpr to be iterated over. consts: Any constants to be used for the computation (see docs on Jaxpr). num_losses: The number of loss tags after which to terminate iteration. If the Jaxpr has less loss tags, it will return all of them. primal_func_args: The concrete values for the inputs to the Jaxpr. params_index: The variables from the function arguments which are at this index (e.g. ``func_args[params_index]``) are to be considered model parameters. Returns: A function which computes the inputs to the first ``num_losses`` loss tags. """ def forward_compute_losses( primal_params: Params ) -> Tuple[Tuple[LossTagInputs, ...], Tuple[LossTagInputs, ...]]: """Computes and returns the inputs to the first ``num_losses`` loss tags.""" # Check the provided inputs match the original primals. local_func_args = list(primal_func_args) original_params = local_func_args[params_index] if not utils.abstract_objects_equal(original_params, primal_params): raise ValueError("The `primal_params` should have the same abstract " "structure as the original parameters passed in to the " "function.") local_func_args[params_index] = primal_params flat_args = jax.tree_util.tree_leaves(local_func_args) # Mapping from variable -> value env = {} read = functools.partial(tgm.read_env, env) write = functools.partial(tgm.write_env, env) # Bind args and consts to environment write(jaxpr.invars, flat_args) write(jaxpr.constvars, consts) # Loop through equations and evaluate primitives using `bind` losses_so_far = 0 losses_p_deps = [] losses_inputs = [] for eqn in jaxpr.eqns: write(eqn.outvars, tgm.eval_jaxpr_eqn(eqn, read(eqn.invars))) if isinstance(eqn.primitive, tags.LossTag): losses_inputs.append(read(eqn.invars)) losses_p_deps.append(eqn.primitive.extract_parameter_dependants( *losses_inputs[-1], **eqn.params)) losses_so_far += 1 if num_losses is not None and losses_so_far == num_losses: break return tuple(losses_p_deps), tuple(losses_inputs) return forward_compute_losses def _loss_tags_vjp( p_jaxpr: ProcessedJaxpr, primal_func_args: FuncArgs, ) -> LossTagsVjp: """Computes a (backward-mode) vector-Jacobian product w.r.t. all loss tags. The function has similar interface to :func:`jax.vjp`. It takes as inputs the concrete values of the primals at which the Jacobian will be evaluated. It returns a pair of ``(losses, losses_vjp)``, where losses is a tuple of :class:`~LossFunction` objects and ``vjp_func`` is a function taking as inputs the concrete values of the tangents of the inputs for each loss tag (corresponding to a loss object in ``losses``) and returns the corresponding tangents of the parameters. Args: p_jaxpr: The :class:``~ProcessedJaxpr`` representing the model function. This must include at least one loss tag. primal_func_args: The primals at which to evaluate the Jacobian. Returns: The computed ``losses`` and ``losses_vjp`` pair. """ if not p_jaxpr.loss_tags: raise ValueError("The provided `ProcessedJaxpr` has no loss tags.") losses_func = construct_compute_losses_inputs( jaxpr=p_jaxpr.jaxpr, consts=p_jaxpr.consts, num_losses=len(p_jaxpr.loss_tags), primal_func_args=primal_func_args, params_index=p_jaxpr.params_index) primal_params = primal_func_args[p_jaxpr.params_index] (_, losses_inputs), full_vjp_func = jax.vjp(losses_func, primal_params) losses = tuple(tag.primitive.loss(*inputs, **tag.params) for tag, inputs in zip(p_jaxpr.loss_tags, losses_inputs)) zero_tangents = jax.tree_util.tree_map(jnp.zeros_like, losses_inputs) def losses_vjp_func(losses_tangents: Sequence[LossTagInputs]) -> Params: """Computes the vector-Jacobian product w.r.t. the parameters. Args: losses_tangents: The tangents to all loss tag's inputs. Returns: The parameters' tangents, as a result of the vector-Jacobian product. """ if len(losses_tangents) != len(p_jaxpr.loss_tags): raise ValueError("The argument `tangents` must be a sequence of the " "tangents to each loss tag in the same order as the " "loss objects that have been returned. The number of " f"loss_tags is {len(p_jaxpr.loss_tags)}, but the length " f"of `tangents` is {len(losses_tangents)}.") for i, loss_tangents in enumerate(losses_tangents): if not isinstance(loss_tangents, Sequence): raise ValueError("Each element of the argument `tangents` must be " f"a sequence, but tangents[{i}] has type " f"{type(loss_tangents)}.") # The tangents of the second entry are always zero, as we compute this only # for the parameter dependent arrays. params_tangents, = full_vjp_func((losses_tangents, zero_tangents)) return params_tangents return losses, losses_vjp_func def _loss_tags_jvp( p_jaxpr: ProcessedJaxpr, primal_func_args: FuncArgs, params_tangents: Params, ) -> LossTagsJvp: """Computes a (forward-mode) Jacobian-vector product w.r.t. all loss tags. The function has similar interface to :func:`jax.jvp`. It takes as inputs the concrete values of the primals at which the Jacobian will be evaluated at and the concrete values of the tangents for the **parameters**, as specified by ``processed_jaxpr.params_index``. It returns a pair of ``(losses, losses_tangents)``, where ``losses`` is a tuple of :class:`~LossFunction` objects, and ``losses_tangents`` is a tuple containing the tangents of the inputs for each loss tag (corresponding to a loss object in ``losses``). Args: p_jaxpr: The :class:`~ProcessedJaxpr` representing the model function. This must include at least one loss tag. primal_func_args: The primals at which to evaluate the Jacobian. params_tangents: The vector of tangents which to multiply with the Jacobian. Returns: The computed ``losses`` and ``losses_tangents`` pair. """ if not p_jaxpr.loss_tags: raise ValueError("The provided `ProcessedJaxpr` has no loss tags.") losses_func = construct_compute_losses_inputs( jaxpr=p_jaxpr.jaxpr, consts=p_jaxpr.consts, num_losses=len(p_jaxpr.loss_tags), primal_func_args=primal_func_args, params_index=p_jaxpr.params_index) primal_params = (primal_func_args[p_jaxpr.params_index],) tangents = (params_tangents,) (primals_out, tangents_out) = jax.jvp(losses_func, primal_params, tangents) _, losses_inputs_primals = primals_out losses_tangents, _ = tangents_out losses = tuple( tag.primitive.loss(*inputs, **tag.params) for tag, inputs in zip(p_jaxpr.loss_tags, losses_inputs_primals) ) return losses, losses_tangents def _loss_tags_hvp( processed_jaxpr: ProcessedJaxpr, primal_func_args: FuncArgs, params_tangents: Params, ) -> Tuple[Params, Tuple[loss_functions.LossFunction, ...]]: """Computes a Hessian-vector product of the function w.r.t. all loss tags. The function takes as inputs the concrete values of the primals for the function arguments at which the Hessian will be evaluated at and the concrete values of the tangents for the **parameters**, as specified by ``processed_jaxpr.params_index``. It returns the product of the Hessian with this tangents via backward-over-forward mode. Args: processed_jaxpr: The :class:`~ProcessedJaxpr` representing the model function. This must include at least one loss tag. primal_func_args: The primals at which to evaluate the Hessian. params_tangents: The vector of tangents which to multiply with the Hessian. Returns: The parameter-structured vector representing the Hessian-vector product and the resulting :class:`~LossFunction` objects that correspond to every loss tag. """ if not processed_jaxpr.loss_tags: raise ValueError("The provided `ProcessedJaxpr` has no loss tags.") losses_func = construct_compute_losses_inputs( jaxpr=processed_jaxpr.jaxpr, consts=processed_jaxpr.consts, num_losses=len(processed_jaxpr.loss_tags), primal_func_args=primal_func_args, params_index=processed_jaxpr.params_index) def compute_losses( param_primals: Params ) -> Tuple[loss_functions.LossFunction, ...]: """Computes the sum of all losses as a scalar.""" _, loss_inputs = losses_func(param_primals) return tuple(tag.primitive.loss(*inputs, **tag.params) for tag, inputs in zip(processed_jaxpr.loss_tags, loss_inputs)) def losses_sum(param_primals: Params) -> Array: # This computes the sum of losses evaluated. Makes it easier because we can # now use jax.grad rather than jax.vjp for taking derivatives. return sum(jnp.sum(loss.evaluate()) for loss in compute_losses(param_primals)) # Directional derivative function df_dot_dv = lambda p: (jax.jvp(losses_sum, [p], [params_tangents])[1]) hvp = jax.grad(df_dot_dv)(primal_func_args[processed_jaxpr.params_index]) return hvp, compute_losses(primal_func_args[processed_jaxpr.params_index]) def _layer_tag_vjp( processed_jaxpr: ProcessedJaxpr, primal_func_args: FuncArgs, ) -> LayerTagVjp: """Computes primal values and tangents w.r.t. all layer tags. The function has similar interface to :func:`jax.vjp`. It takes as inputs the concrete values of the primals at which the Jacobian will be evaluated. It returns a pair of ``(losses, vjp_func)``, where losses is a tuple of :class:`~LossFunction` objects and ``vjp_func`` is a function taking as inputs the concrete values of the tangents of the inputs for each loss tag (corresponding to a loss object in ``losses``) and returns a list of quantities computed for each layer tag in ``processed_jaxpr``. Each entry of the list is a dictionary with the following self-explanatory keys: ``inputs, outputs, params, outputs_tangents, params_tangents``. Args: processed_jaxpr: The :class:`~ProcessedJaxpr` representing the model function. This must include at least one loss tag. primal_func_args: The primals at which to evaluate the Hessian. Returns: The computed ``losses`` and ``vjp_func`` pair. """ layer_vars_flat = jax.tree_util.tree_leaves( [tag.invars for tag in processed_jaxpr.layer_tags]) layer_input_vars = tuple(set(layer_vars_flat)) def forward() -> Tuple[Array, ...]: """Computes the values of all inputs to all **layer** tags.""" own_func_args = primal_func_args # Mapping from variable -> value env = {} read = functools.partial(tgm.read_env, env) write = functools.partial(tgm.write_env, env) # Bind args and consts to environment write(processed_jaxpr.jaxpr.invars, jax.tree_util.tree_leaves(own_func_args)) write(processed_jaxpr.jaxpr.constvars, processed_jaxpr.consts) # Loop through equations and evaluate them num_losses_passed = 0 for eqn in processed_jaxpr.jaxpr.eqns: write(eqn.outvars, tgm.eval_jaxpr_eqn(eqn, read(eqn.invars))) if isinstance(eqn.primitive, tags.LossTag): num_losses_passed += 1 if num_losses_passed == len(processed_jaxpr.loss_tags): break assert num_losses_passed == len(processed_jaxpr.loss_tags) return read(layer_input_vars) def forward_aux( aux: Dict[Var, Array] ) -> Tuple[Tuple[LossTagInputs, ...], Tuple[LossTagInputs, ...]]: """Computes the inputs and kwargs of all **loss** tags. Args: aux: A mapping from an Jaxpr variable to an additional auxiliary value. For each variable in this mapping, we add to the value computed during standard evaluation the auxiliary value. This is done in order to be able to compute gradients wrt all intermediate expressions corresponding to the Jaxpr variables in this mapping Returns: The pair of ``(losses_inputs, losses_kwargs)`` where ``losses_inputs`` is a tuple of the input values for each loss tag, and ``losses_kwargs`` is a tuple of the kwargs values of each loss tag. """ own_func_args = primal_func_args # Mapping from variable -> value env = {} read = functools.partial(tgm.read_env, env) def write(var, val): tgm.write_env(env, var, val) # pytype: disable=wrong-arg-types # numpy-scalars if not isinstance(var, list): var = [var] assert isinstance(var, list) for v in var: if not isinstance(v, jax.core.Literal) and v in aux: env[v] = env[v] + aux[v] # Bind args and consts to environment write(processed_jaxpr.jaxpr.invars, jax.tree_util.tree_leaves(own_func_args)) write(processed_jaxpr.jaxpr.constvars, processed_jaxpr.consts) # Loop through equations and evaluate primitives using `bind` num_losses_passed = 0 losses_p_dependants = [] losses_inputs_values = [] for eqn in processed_jaxpr.jaxpr.eqns: input_values = tuple(read(eqn.invars)) write(eqn.outvars, tgm.eval_jaxpr_eqn(eqn, read(eqn.invars))) if isinstance(eqn.primitive, tags.LossTag): loss = eqn.primitive.loss(*input_values, **eqn.params) losses_p_dependants.append(loss.parameter_dependants) losses_inputs_values.append(input_values) num_losses_passed += 1 if num_losses_passed == len(processed_jaxpr.loss_tags): break assert num_losses_passed == len(processed_jaxpr.loss_tags) # Read the inputs to the loss functions, but also return the target values return tuple(losses_p_dependants), tuple(losses_inputs_values) # First compute the primal values for the inputs to all layer tags layer_input_values = forward() primals_dict = dict(zip(layer_input_vars, layer_input_values)) # Update with the values of all parameters, which are inputs to the function primals_dict.update(zip(processed_jaxpr.jaxpr.invars, jax.tree_util.tree_leaves(primal_func_args))) # Create auxiliary values all equal to zero. aux_values = jax.tree_util.tree_map(jnp.zeros_like, layer_input_values) # Create a mapping from all layer tag inputs to the zero values aux_dict = dict(zip(layer_input_vars, aux_values)) # These values would now allow us to compute gradients wrt the layer tags # inputs, which are intermediate expressions in the Jaxpr. _, aux_vjp, losses_inputs = jax.vjp( forward_aux, aux_dict, has_aux=True) # Compute the actual loss objects. losses = tuple(tag.primitive.loss(*inputs, **tag.params) for tag, inputs in zip(processed_jaxpr.loss_tags, losses_inputs)) def vjp_func( tangents: Tuple[LossTagInputs, ...] ) -> Tuple[Dict[str, Array], ...]: """Computes a (reverse-mode) vector-Jacobian product w.r.t. all layer tags. Args: tangents: The concrete tangent values for the tangents of the inputs to all **loss** tags. Returns: A tuple containing both the primal and tangent values for the inputs to all **layer** tags. The values are provided as a dictionary with keys: ``inputs, outputs, params, outputs_tangent, params_tangent``. """ all_tangents = aux_vjp(tangents) tangents_dict, inputs_tangents = all_tangents[0], all_tangents[1:] inputs_tangents = jax.tree_util.tree_leaves(inputs_tangents) tangents_dict.update(zip(processed_jaxpr.jaxpr.invars, inputs_tangents)) read_primals = functools.partial(tgm.read_env, primals_dict) read_tangents = functools.partial(tgm.read_env, tangents_dict) layers_info = [] for tag in processed_jaxpr.layer_tags: info = {} primals = jax.util.safe_map(read_primals, tuple(tag.invars)) (info["outputs"], info["inputs"], info["params"]) = tag.primitive.split_all_inputs(primals) # Due to the ability to preprocess inputs for tags the input gradients # could be potentially wrong (e.g. zero) so we don't include them. tangents = jax.util.safe_map(read_tangents, tuple(tag.invars)) # inputs_tangent won't be correct for BN layers, but that won't matter (info["outputs_tangent"], info["inputs_tangent"], info["params_tangent"]) = tag.primitive.split_all_inputs(tangents) layers_info.append(info) return tuple(layers_info) # pytype: disable=bad-return-type # numpy-scalars return losses, vjp_func def loss_tags_vjp( func: utils.Func, params_index: int = 0, ) -> TransformedFunction[LossTagsVjp]: """Creates a function for the vector-Jacobian product w.r.t. all loss tags. The returned function has a similar interface to :func:`jax.vjp`. It takes as inputs the concrete values of the primals at which the Jacobian will be evaluated. It returns a pair ``(losses, losses_vjp)``, where losses is a tuple of :class:`~LossFunction` objects and ``vjp_func`` is a function taking as inputs the concrete values of the tangents of the inputs for each loss tag (corresponding to a loss object in ``losses``) and returns the corresponding tangents of the parameters. Args: func: The model function, which must include at least one loss registration. params_index: The variables from the function arguments which are at this index (e.g. `func_args[params_index]`) are to be considered model parameters. Returns: A function that computes the vector-Jacobian product with signature `Callable[[FuncArgs], LossTagsVjp]`. """ # Note that this function is independent of any layer tags, hence we can avoid # calling the auto registration. return cached_transformation( func=func, transformation=_loss_tags_vjp, verifier=lambda: None, params_index=params_index, auto_register_tags=False, allow_left_out_params=True, ) def loss_tags_jvp( func: utils.Func, params_index: int = 0, ) -> ...: """Creates a function for the Jacobian-vector product w.r.t. all loss tags. The returned function has a similar interface to :func:`jax.jvp`. It takes as inputs the concrete values of the primals at which the Jacobian will be evaluated at and the concrete values of the tangents for the **parameters**, as specified by ``processed_jaxpr.params_index``. It returns a pair ``(losses, losses_tangents)``, where ``losses`` is a tuple of :class:`~LossFunction` objects, and ``losses_tangents`` is a tuple containing the tangents of the inputs for each loss tag (corresponding to a loss object in ``losses``). Args: func: The model function, which must include at least one loss registration. params_index: The variables from the function arguments which are at this index (e.g. `func_args[params_index]`) are to be considered model parameters. Returns: A function that computes the Jacobian-vector product with signature `Callable[[FuncArgs, Params], LossTagsVjp]`. """ # Note that this function is independent of any layer tags, hence we can avoid # calling the auto registration. return cached_transformation( func=func, transformation=_loss_tags_jvp, verifier=lambda: None, params_index=params_index, auto_register_tags=False, allow_left_out_params=True, ) def loss_tags_hvp( func: utils.Func, params_index: int = 0, ) -> ...: """Creates a function for the Hessian-vector product w.r.t. all loss tags. The returned function takes as inputs the concrete values of the primals for the function arguments at which the Hessian will be evaluated at and the concrete values of the tangents for the **parameters**, as specified by ``processed_jaxpr.params_index``. It returns the product of the Hessian with these tangents via backward-over-forward mode autodiff. Args: func: The model function, which must include at least one loss registration. params_index: The variables from the function arguments which are at this index (e.g. `func_args[params_index]`) are to be considered model parameters. Returns: A function that computes the Hessian-vector product and also returns all losses, with signature `Callable[[FuncArgs, Params], Tuple[LossTagsVjp, Tuple[loss_functions.LossFunction, ...]]`. """ # Note that this function is independent of any layer tags, hence we can avoid # calling the auto registration. return cached_transformation( func=func, transformation=_loss_tags_hvp, verifier=lambda: None, params_index=params_index, auto_register_tags=False, allow_left_out_params=True, ) def layer_tags_vjp( func: utils.Func, params_index: int = 0, auto_register_tags: bool = True, raise_error_on_diff_jaxpr: bool = True, **auto_registration_kwargs, ) -> ...: """Creates a function for primal values and tangents w.r.t. all layer tags. The returned function has a similar interface to :func:`jax.vjp`. It takes as inputs the concrete values of the primals at which the Jacobian will be evaluated. It returns a pair ``(losses, vjp_func)``, where ``losses`` is a tuple of :class:`~LossFunction` objects, and ``vjp_func`` is a function taking as inputs the concrete values of the tangents of the inputs for each loss tag (corresponding to a loss object in ``losses``) and returns a list of quantities computed for each layer tag in ``processed_jaxpr``. Each entry of the list is a dictionary with the following self-explanatory keys: ``inputs, outputs, params, outputs_tangents, params_tangents``. Args: func: The model function, which must include at least one loss registration. params_index: The variables from the function arguments which are at this index (e.g. ``func_args[params_index]``) are to be considered model parameters. auto_register_tags: Whether to run an automatic layer registration on the function (e.g. :func:`~auto_register_tags`). raise_error_on_diff_jaxpr: When tracing with different arguments, if the returned jaxpr has a different graph will raise an exception. **auto_registration_kwargs: Any additional keyword arguments, to be passed to the automatic registration pass. Returns: Returns a function that computes primal values and tangents wrt all layer tags, with signature `Callable[[FuncArgs, Params], LossTagsVjp]`. """ return cached_transformation( func=func, transformation=_layer_tag_vjp, params_index=params_index, auto_register_tags=auto_register_tags, allow_left_out_params=False, raise_error_on_diff_jaxpr=raise_error_on_diff_jaxpr, **auto_registration_kwargs )
kfac-jax-main
kfac_jax/_src/tracer.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC functionality for auto-detecting layer tags and graph matching.""" import dataclasses import functools import itertools import pprint from typing import Any, Callable, Mapping, Optional, Sequence, TypeVar, Tuple, Union, Dict, Set from absl import logging import immutabledict import jax import jax.numpy as jnp from kfac_jax._src import layers_and_loss_tags as tags from kfac_jax._src import utils import numpy as np HIGHER_ORDER_NAMES = ("cond", "while", "scan", "xla_call", "xla_pmap") # Types for annotation Array = utils.Array PyTreeDef = utils.PyTreeDef Var = jax.core.Var Vars = Sequence[Var] Jaxpr = jax.core.Jaxpr ClosedJaxpr = jax.core.ClosedJaxpr JaxprEqn = jax.core.JaxprEqn JaxprEqns = Sequence[JaxprEqn] T = TypeVar("T") J = TypeVar("J", Jaxpr, ClosedJaxpr) JaxprOrClosedJaxpr = Union[Jaxpr, ClosedJaxpr] EquivalenceFunction = Callable[[JaxprEqn, JaxprEqn], bool] MakeVarFunc = Callable[[jax.core.AbstractValue], Var] VarProcessor = Callable[[Vars, MakeVarFunc], Tuple[Vars, JaxprEqns]] PatternComputeFunc = Callable[[Array, Sequence[Array]], Array] ParameterExtractorFunc = Callable[[JaxprEqns], Mapping[str, Any]] TagCtor = Callable[[Vars, Vars, JaxprEqns, MakeVarFunc], JaxprEqn] def eval_jaxpr_eqn(eqn: JaxprEqn, in_values: Vars) -> Var: """Computes the outputs of the given Jaxpr equation.""" subfuns, bind_params = eqn.primitive.get_bind_params(eqn.params) with jax.core.source_info_util.user_context( eqn.source_info.traceback): return eqn.primitive.bind(*subfuns, *in_values, **bind_params) def reshape_equivalent( equation1: JaxprEqn, equation2: JaxprEqn, ) -> bool: """Equivalence rule for :func:`~jax.numpy.reshape` primitives.""" if not (equation1.primitive.name == "reshape" and equation2.primitive.name == "reshape"): raise ValueError("This is only applicable to `reshape` primitive.") return equation1.params["dimensions"] == equation2.params["dimensions"] def broadcast_in_dim_equivalent( equation1: JaxprEqn, equation2: JaxprEqn, ) -> bool: """Equivalence rule for :func:`~jax.numpy.broadcast` primitives.""" if not (equation1.primitive.name == "broadcast_in_dim" and equation2.primitive.name == "broadcast_in_dim"): raise ValueError("This is only applicable to `broadcast_in_dim` primitive.") return True def conv_general_dilated_equivalent( equation1: JaxprEqn, equation2: JaxprEqn, ) -> bool: """Equivalence rule for :func:`~jax.lax.conv_general_dilated` primitives.""" if not (equation1.primitive.name == "conv_general_dilated" and equation2.primitive.name == "conv_general_dilated"): raise ValueError("This is only applicable to `conv_general_dilated` " "primitive.") params1 = equation1.params params2 = equation2.params for k in ("window_strides", "padding", "lhs_dilation", "rhs_dilation"): if len(params1[k]) != len(params2[k]): return False if (len(params1["dimension_numbers"].lhs_spec) != len(params2["dimension_numbers"].lhs_spec)): return False if (len(params1["dimension_numbers"].rhs_spec) != len(params2["dimension_numbers"].rhs_spec)): return False if (len(params1["dimension_numbers"].out_spec) != len(params2["dimension_numbers"].out_spec)): return False if ((params1["feature_group_count"] > 1) != (params2["feature_group_count"] > 1)): return False if ((params1["batch_group_count"] > 1) != (params2["batch_group_count"] > 1)): return False return True def dot_general_equivalent( equation1: JaxprEqn, equation2: JaxprEqn, ) -> bool: if not (equation1.primitive.name == "dot_general" and equation2.primitive.name == "dot_general"): raise ValueError("This is only applicable to `conv_general_dilated` " "primitive.") # We ignore precision and preferred_element_type return (equation1.params["dimension_numbers"] == equation2.params["dimension_numbers"]) DEFAULT_SPECIAL_EQUIVALENCE_RULES = immutabledict.immutabledict({ "reshape": reshape_equivalent, "broadcast_in_dim": broadcast_in_dim_equivalent, "conv_general_dilated": conv_general_dilated_equivalent, "dot_general": dot_general_equivalent, }) class GraphMatcherComparator: """A class to compare and determine equivalence of abstract Jax equations.""" def __init__( self, commutative_ops_names: Sequence[str] = ("add", "mul"), special_eqn_equivalence_rules: Mapping[str, EquivalenceFunction] = DEFAULT_SPECIAL_EQUIVALENCE_RULES, ): """Initializes the instance. Args: commutative_ops_names: A sequence of all Jax primitive names, which are consider commutative ops and the order of their arguments is irrelevant. special_eqn_equivalence_rules: A mapping of a Jax primitive names to a comparison rule, which to be used instead of the default comparator, which looks that the whole dictionaries of extra parameters to the primitives match. """ self._commutative_ops_names = set(commutative_ops_names) self._special_eqn_equivalence_rules = dict(**special_eqn_equivalence_rules) @property def commutative_ops_names(self) -> Set[str]: """The set of commutative ops.""" return self._commutative_ops_names @property def special_eqn_equivalence_rules(self) -> Mapping[str, EquivalenceFunction]: """The special equivalence rules.""" return self._special_eqn_equivalence_rules def add_commutative_op_name(self, name: str): """Adds a name to the set of primitive ops considered to be commutative.""" if name in self.commutative_ops_names: raise ValueError(f"Commutative op {name!r} has already been added.") self._commutative_ops_names.add(name) def add_special_equivalence_rule( self, name: str, equivalence_rule: EquivalenceFunction, ): """Adds the special equivalence rule for ``name`` to the global store.""" if name in self.special_eqn_equivalence_rules: raise ValueError( f"Special equation equivalence rule already exists for name: {name}") self._special_eqn_equivalence_rules[name] = equivalence_rule def are_equivalent( self, equation1: JaxprEqn, equation2: JaxprEqn, ) -> bool: """Returns whether the two equations are considered equivalent.""" if equation1.primitive.name != equation2.primitive.name: return False equivalence_rule = self.special_eqn_equivalence_rules.get( equation1.primitive.name) if equivalence_rule is not None: return equivalence_rule(equation1, equation2) # Default comparison return equation1.params == equation2.params @dataclasses.dataclass(frozen=True) class JaxprGraph: """A wrapper around Jaxpr as a graph for pattern matching. Attributes: name: The name for this Jaxpr graph. closed_jaxpr: The original `ClosedJaxpr` that is being wrapped. params_tree: The PyTreeDef of the parameter variables. params_vars: A flat list of all the abstract parameter variables. out_tree: The PyTreeDef of the outputs of the function. tag_ctor: This is an optional attribute, that defines if this is used during automatic layer tag registration, how to construct the corresponding layer tag primitive from the subgraph matching this pattern. losses_eqns: A tuple of all the Jaxpr equations corresponding to a loss tag. var_to_creation_op: A mapping of variables to the Jax equation that created it. manual_registrations: Any layer tag equations that have been manually registered. jaxpr: The underlying :class:`jax.core.Jaxpr` part of ``self.closed_jaxpr``. consts: The underlying constants part ``self.closed_jaxpr``. outvars: The output variables of the underlying :class:`jax.core.Jaxpr` part of ``self.closed_jaxpr``. """ name: str closed_jaxpr: ClosedJaxpr params_tree: PyTreeDef params_vars: Vars out_tree: PyTreeDef tag_ctor: Optional[TagCtor] # Until we stop supporting Python 3.7 we can't use @functools.cached_property, # so we set these attributes in __post_init__ losses_eqns: Tuple[tags.LossTagEqn, ...] = () var_to_creation_op: immutabledict.immutabledict = None # pytype:disable=annotation-type-mismatch manual_registrations: Tuple[tags.LayerTagEqn, ...] = () def __post_init__(self): losses_eqns = tuple( eqn for eqn in self.closed_jaxpr.jaxpr.eqns if isinstance(eqn.primitive, tags.LossTag) ) var_to_creation_op = immutabledict.immutabledict( sum(([(var, eqn) for var in eqn.outvars] for eqn in self.jaxpr.eqns), []) ) registered_tags = [] for eqn in self.jaxpr.eqns: if isinstance(eqn.primitive, tags.LayerTag): for param in eqn.primitive.split_all_inputs(eqn.invars)[2]: if param not in self.params_vars: raise ValueError(f"One of the parameters of the manual layer " f"registration equation: {eqn} is not part of the " f"parameters of the global function.") registered_tags.append(eqn) manual_registrations = tuple(registered_tags) object.__setattr__(self, "losses_eqns", losses_eqns) object.__setattr__(self, "var_to_creation_op", var_to_creation_op) object.__setattr__(self, "manual_registrations", manual_registrations) @property def jaxpr(self) -> Jaxpr: return self.closed_jaxpr.jaxpr @property def consts(self) -> Sequence[Any]: return self.closed_jaxpr.consts @property def outvars(self) -> Vars: return self.jaxpr.outvars # pytype:disable=bad-return-type def sub_graph_eqns(self, root_vars: Vars, leaf_vars: Vars) -> JaxprEqns: """Returns the sub-graph equations between root vars and leaf vars.""" eqns = [] # Extract the subgraph equations such that they both depend on root_vars and # leaf_vars depends on them to_process_eqns = [self.var_to_creation_op[v] for v in leaf_vars] processed_vars = set() while to_process_eqns: next_eqn = to_process_eqns.pop() eqns.append(next_eqn) for v in next_eqn.invars: if (not isinstance(v, jax.core.Literal) and v not in root_vars and v not in processed_vars and v in self.var_to_creation_op): to_process_eqns.append(self.var_to_creation_op[v]) processed_vars.add(v) return tuple(eqns) # # @functools.cached_property # def losses_eqns(self) -> Tuple[tags.LossTagEqn, ...]: # return tuple( # eqn for eqn in self.closed_jaxpr.jaxpr.eqns # if isinstance(eqn.primitive, tags.LossTag) # ) # # @functools.cached_property # def var_to_creation_op(self) -> immutabledict.immutabledict: # return immutabledict.immutabledict( # sum(([(var, eqn) for var in eqn.outvars] # for eqn in self.jaxpr.eqns), [])) # # @functools.cached_property # def manual_registrations(self) -> Tuple[tags.LayerTagEqn, ...]: # """Returns all manually registered tags.""" # registered_tags = [] # for eqn in self.jaxpr.eqns: # if isinstance(eqn.primitive, tags.LayerTag): # for param in eqn.primitive.split_all_inputs(eqn.invars)[2]: # if param not in self.params_vars: # raise ValueError("One of the parameters of the manual layer " # f"registration equation: {eqn} is not part of " # "the parameters of the global function.") # registered_tags.append(eqn) # return tuple(registered_tags) def make_jax_graph( func: utils.Func, func_args: utils.FuncArgs, params_index: Union[int, Sequence[int]], name: str, compute_only_loss_tags: bool, clean_broadcasts: bool, tag_ctor: Optional[TagCtor] = None, ) -> JaxprGraph: """Creates a :class:`~JaxGraph` instance from the provided function and arguments.""" in_tree = jax.tree_util.tree_structure(func_args) closed_jaxpr, out_shapes = jax.make_jaxpr(func, return_shape=True)(*func_args) if compute_only_loss_tags: make_var_func = jax.core.gensym([closed_jaxpr.jaxpr]) eqns = [] sub_graph_vars = set() loss_tags_output_vars = [] for eqn in reversed(closed_jaxpr.jaxpr.eqns): if (isinstance(eqn.primitive, tags.LossTag) or any(v in sub_graph_vars for v in eqn.outvars)): if isinstance(eqn.primitive, tags.LossTag): new_out_vars = [] for v in eqn.outvars: if isinstance(v, jax.core.DropVar): new_out_vars.append(make_var_func(v.aval)) else: new_out_vars.append(v) loss_tags_output_vars.extend(new_out_vars[::-1]) eqns.append(eqn.replace(outvars=new_out_vars)) else: eqns.append(eqn) sub_graph_vars.update( v for v in eqn.invars if not isinstance(v, jax.core.Literal)) consts_i = [i for i, c in enumerate(closed_jaxpr.jaxpr.constvars) if c in sub_graph_vars] closed_jaxpr = ClosedJaxpr( jaxpr=closed_jaxpr.jaxpr.replace( eqns=eqns[::-1], constvars=[closed_jaxpr.jaxpr.constvars[i] for i in consts_i], outvars=loss_tags_output_vars[::-1], ), consts=[closed_jaxpr.consts[i] for i in consts_i], ) out_shapes = [jax.ShapeDtypeStruct(shape=v.aval.shape, dtype=v.aval.dtype) for v in closed_jaxpr.jaxpr.outvars] # pytype:disable=attribute-error if clean_broadcasts: closed_jaxpr: ClosedJaxpr = merge_broadcasts_jaxpr(closed_jaxpr) # pytype:disable=annotation-type-mismatch in_vars = jax.tree_util.tree_unflatten(in_tree, closed_jaxpr.jaxpr.invars) if isinstance(params_index, int): params_vars = in_vars[params_index] else: params_vars = tuple(in_vars[i] for i in params_index) params_vars, params_tree = jax.tree_util.tree_flatten(params_vars) return JaxprGraph( name=name, closed_jaxpr=closed_jaxpr, params_tree=params_tree, params_vars=params_vars, out_tree=jax.tree_util.tree_structure(out_shapes), tag_ctor=tag_ctor ) @dataclasses.dataclass(frozen=True) class GraphPattern: """A graph pattern used for automatically detecting layers. The graph matcher needs to trace at least once the full function, which means the caller needs to provide it with dummy arguments. The shapes of the arguments do not matter, as the graph matcher ignores their values, however the rank does. Especially if there is some broadcasting happening you should register with every possible broadcast pattern. As a general advice avoid using a shape to be 1, unless you want the pattern to specifically match that, as some operations, like squeeze for example, can have special behaviour then. Attributes: name: The name of the pattern that is being registered to. tag_primitive: The primitive tag to bind. compute_func: The function that performs the computation. parameters_extractor_func: A function that extracts from the traced Jaxpr any parameters that are passed into the tag. example_args: Example arguments that can be inputted into ``func``. in_values_preprocessor: A function that can optionally modify the in_vals passed to the tag_primitive, from those that are usually the input to the jaxpr. jaxpr: The underlying :class:`jax.core.Jaxpr` represented by the pattern. param_vars: The list of :class:`jax.core.Var` that correspond to parameters in the pattern. graph: A :class:`JaxprGraph` representation of the pattern. """ name: str tag_primitive: tags.LayerTag compute_func: PatternComputeFunc parameters_extractor_func: ParameterExtractorFunc example_args: utils.FuncArgs in_values_preprocessor: Optional[VarProcessor] = None # Until we stop supporting Python 3.7 we can't use @functools.cached_property, # so we set this attribute in the property _graph: Optional[JaxprGraph] = None @property def jaxpr(self) -> Jaxpr: return self.graph.jaxpr @property def param_vars(self) -> Vars: return self.graph.params_vars @property def graph(self) -> JaxprGraph: """A :class:`JaxprGraph` representation of the pattern.""" if self._graph is None: jnp_args = jax.tree_util.tree_map(jnp.asarray, self.example_args) graph = make_jax_graph( func=self.compute_func, func_args=jnp_args, params_index=1, name=self.name, compute_only_loss_tags=False, clean_broadcasts=True, ) object.__setattr__(self, "_graph", graph) assert self._graph is not None return self._graph def tag_ctor( self, in_vars: Vars, out_vars: Vars, graph_eqns: JaxprEqns, make_var_func: MakeVarFunc, ) -> JaxprEqns: """Registers the layer tag for this graph pattern. Args: in_vars: The input variables to the pattern. out_vars: The output variables to the pattern. graph_eqns: The real graph equations corresponding to the pattern. make_var_func: A function to create correctly new variables. Returns: A sequence of any additional equations that are created from creating the tag. """ assert len(out_vars) == 1 if self.in_values_preprocessor is not None: in_vars, eqns = self.in_values_preprocessor(in_vars, make_var_func) else: eqns = [] new_out_vars = [make_var_func(v.aval) for v in out_vars] tag_eqn = jax.core.new_jaxpr_eqn( invars=[*out_vars, *in_vars], outvars=new_out_vars, primitive=self.tag_primitive, params=self.parameters_extractor_func(graph_eqns), effects=set(), ) return [*eqns, tag_eqn] @dataclasses.dataclass(frozen=True) class GraphMatch: """Represents a match of the pattern on some graph. Attributes: pattern: The pattern that has been matched. variables_map: Mapping of variables from the pattern to the original graph, on which it has been matched. graph_eqns: All the equations in the original graph, that correspond to computation of the pattern. output_var: The variable in the original graph, that correspond to the output variable of the pattern. param_graph_variables: All variables in the original graph, that correspond to parameters of the pattern. name: The name of the pattern that has been matched. """ pattern: GraphPattern variables_map: Mapping[Var, Var] graph_eqns: JaxprEqns # Until we stop supporting Python 3.7 we can't use @functools.cached_property, # so we set these attributes in __post_init__ output_var: Var = None # pytype:disable=annotation-type-mismatch param_graph_variables: Vars = () def __post_init__(self): # Until we stop supporting Python 3.7 we can't use # @functools.cached_property, so we set here additional attributes. output_var = self.variables_map[self.pattern.jaxpr.outvars[0]] param_graph_variables = [self.variables_map[p] for p in self.pattern.graph.params_vars] object.__setattr__(self, "output_var", output_var) object.__setattr__(self, "param_graph_variables", param_graph_variables) @property def name(self) -> str: return self.pattern.name # # @functools.cached_property # def output_var(self) -> Var: # return self._variables_map[self.pattern.jaxpr.outvars[0]] # # @functools.cached_property # def param_graph_variables(self) -> Vars: # return [self._variables_map[p] for p in self.pattern.graph.params_vars] def create_eqn( self, env: Dict[Var, Var], make_var_func: MakeVarFunc, ) -> JaxprEqns: """Creates a new ``JaxprEqn`` for the this match.""" in_vars = [self.variables_map[k] for k in self.pattern.graph.jaxpr.invars] in_vars = [env.get(v, v) for v in in_vars] out_vars = [self.variables_map[k] for k in self.pattern.graph.jaxpr.outvars] out_vars = [env.get(v, v) for v in out_vars] eqns = self.pattern.tag_ctor( in_vars, out_vars, self.graph_eqns, make_var_func) assert len(out_vars) == len(eqns[-1].outvars) # Reinsert the output in the environment for k, v in zip(out_vars, eqns[-1].outvars): env[k] = v return eqns def match_equations( graph: JaxprGraph, current_variables_map: Mapping[Var, Var], reversed_eqns_to_match: Sequence[JaxprEqn], input_vars: Vars, param_variables: Vars, graph_matcher_rules: GraphMatcherComparator, ) -> Optional[Dict[Var, Var]]: """Tries to continue matching the remaining equations to the Jaxpr graph. Args: graph: The :class:`~JaxprGraph` on which we are searching for matching equations. current_variables_map: A mapping from a pattern variables to graph variables, which describes what is the current partial mapping between the pattern and the graph. reversed_eqns_to_match: The remaining equations of the pattern that have not yet been matched to the graph. input_vars: The input variables of the pattern. param_variables: The parameter variables of the pattern. graph_matcher_rules: A :class:`~GraphMatcherRules` instance, which is used for determining equivalence of individual Jax primitives. Returns: ``None`` if it is not possible to finish matching the remaining equations in the graph. Otherwise, returns the full match of the pattern onto the graph, in terms of a variable to variable mapping. """ # Copy the variables mapping current_variables_map = dict(current_variables_map) def add_vars_if_possible( eqn_vars: Sequence[Var], graph_vars: Sequence[Var] ) -> bool: """Tries to update the current variables map. If at least one of the pattern variables is a parameter, but the corresponding graph variable is not or vise-versa, the method does not update the current variables map and returns ``False``. Similarly, if at least one of the graph variables is a :class:`~jax.core.Literal` (meaning a constant, independent of the function inputs) and the corresponding pattern variable is not an input to the pattern, it returns ``False``. In all other cases it updates the map and returns ``True``. Args: eqn_vars: The variables from a single equation of the pattern. graph_vars: The variables from a corresponding equation of the graph. Returns: A boolean describing whether the method succeeded to update the current variables map. """ for var1, var2 in zip(eqn_vars, graph_vars): if (var1 in param_variables and var2 not in graph.params_vars or var1 not in param_variables and var2 in graph.params_vars or (isinstance(var2, jax.core.Literal) and var1 not in input_vars)): return False current_variables_map.update(zip(eqn_vars, graph_vars)) return True # Loop over all remaining equations to match for i, eqn in enumerate(reversed_eqns_to_match): assert all(v in current_variables_map for v in eqn.outvars) # Retrieve the graph equation, whose output currently corresponds to the # first output variable of the pattern equation. first_output_var = current_variables_map[eqn.outvars[0]] graph_eqn = graph.var_to_creation_op.get(first_output_var) if graph_eqn is None: assert first_output_var in (graph.jaxpr.invars + graph.jaxpr.constvars) # Clearly the pattern equation is not an input or parameter return None assert isinstance(graph_eqn, JaxprEqn) # For equations with more than one output, make sure all output variables # in the graph are generated from the same graph equation. for v in eqn.outvars[1:]: if graph_eqn != graph.var_to_creation_op.get(current_variables_map[v]): return None # Check that the graph and pattern equation are equivalent if not graph_matcher_rules.are_equivalent(graph_eqn, eqn): return None # Sanity check assert len(eqn.invars) == len(graph_eqn.invars) if eqn.primitive.name in graph_matcher_rules.commutative_ops_names: # For commutative ops we search through all possible pair alignments. # This requires a recursive solution, on top of the iterative one. results = [] for permutation in itertools.permutations(range(len(eqn.invars))): pattern_vars = [eqn.invars[j] for j in permutation] # Check if this ordering is feasible if not add_vars_if_possible(pattern_vars, graph_eqn.invars): continue # Recursively continue by trying to match the remaining equations. candidate_map = match_equations( graph=graph, current_variables_map=current_variables_map, reversed_eqns_to_match=reversed_eqns_to_match[i + 1:], input_vars=input_vars, param_variables=param_variables, graph_matcher_rules=graph_matcher_rules, ) if candidate_map is not None: # Sanity check assert all(candidate_map[p] in graph.params_vars for p in param_variables) results.append(candidate_map) # Return appropriately if len(results) > 1: raise ValueError("Found multiple branch matches in pattern at " f"associative op {eqn.primitive.name}.") elif len(results) == 1: return results[0] else: return None elif not add_vars_if_possible(eqn.invars, graph_eqn.invars): # In the case where we can't update the current variables map directly # return return None return current_variables_map def match_pattern( graph: JaxprGraph, root_eqn: JaxprEqn, pattern: GraphPattern, graph_matcher_rules: GraphMatcherComparator, ) -> Optional[GraphMatch]: """Tries to match the ``pattern`` in the Jaxpr graph from the ``root_eqn``. Args: graph: The :class:`~JaxprGraph` on which we are searching for matching equations. root_eqn: The equation in the graph, which is assumed to match the output equation of the pattern. pattern: The pattern, which we are trying to match. graph_matcher_rules: A :class:`~GraphMatcherRules` instance, which is used for determining equivalence of individual Jax primitives. Returns: The variable to variable mapping between the pattern and graph variable, if the pattern can be matched to the root equation, otherwise ``None``. """ # Check the number of output variables match. if len(pattern.jaxpr.outvars) != len(root_eqn.outvars): return None # Set the current variables mapping to the output variables and the try to # check the match from there. match_variables_map = match_equations( graph=graph, current_variables_map=dict(zip(pattern.jaxpr.outvars, root_eqn.outvars)), reversed_eqns_to_match=tuple(reversed(pattern.jaxpr.eqns)), input_vars=pattern.jaxpr.invars, param_variables=pattern.param_vars, graph_matcher_rules=graph_matcher_rules, ) if match_variables_map is None: return None # Extract all the graph equations corresponding to the pattern. graph_eqns = [] for k, v in match_variables_map.items(): if (k not in pattern.graph.jaxpr.invars and not isinstance(v, jax.core.Literal)): creation_op = graph.var_to_creation_op[v] assert isinstance(creation_op, JaxprEqn) graph_eqns.append(creation_op) return GraphMatch( pattern=pattern, variables_map=match_variables_map, graph_eqns=graph_eqns, ) def find_layer_tags_and_patterns( graph: JaxprGraph, eqns_for_patterns: Sequence[JaxprEqn], graph_matcher_rules: GraphMatcherComparator, graph_patterns: Sequence[GraphPattern], ) -> Tuple[Tuple[tags.LayerTagEqn, ...], Dict[Var, GraphMatch]]: """Tries to automatically match ``patterns_to_match`` in the Jaxpr graph. The method returns a pair of ``(manual_registrations, matches)``, where ``manual_registrations`` is a tuple of all layer tags that are already present in the graph and ``matches`` contains all newly discovered matches of any pattern. Each entry has as a key the variable of the graph corresponding to the output of the pattern, while each value is a triple ``(pattern, match_map, eqns)`` where ``pattern`` is the :class:`~JaxprGraph` of the pattern that has been matched, ``match_map`` is mapping the pattern variables to the corresponding graph variables and ``eqns`` is the sequence of all graph equations corresponding to the pattern equations. Args: graph: The :class:`~JaxprGraph` on which we are searching for matching equations. eqns_for_patterns: All equation that should be considered for finding a pattern. graph_matcher_rules: A :class:`~GraphMatcherRules` instance, which is used for determining equivalence of individual Jax primitives. graph_patterns: A sequence of :class:`~GraphPattern` objects, which contain all patterns to use, in order of precedence, which to try to find in the graph before registering a parameter with a generic layer tag. Returns: The pair ``(manual_registrations, matches)``. """ # This list keeps track to any equations that are already in a pattern and # hence should not be part of any other. registered_equations = [] # First add any manual registrations to this. for eqn in graph.manual_registrations: assert isinstance(eqn.primitive, tags.LayerTag) outputs, inputs, params = eqn.primitive.split_all_inputs(eqn.invars) for manual_eqn in graph.sub_graph_eqns(inputs + params, outputs): registered_equations.append(manual_eqn) matches = {} # Loop through all equations in reverse and for each one check every pattern for eqn in reversed(eqns_for_patterns): if eqn in registered_equations or eqn.primitive.name in HIGHER_ORDER_NAMES: continue for pattern in graph_patterns: match = match_pattern( graph=graph, root_eqn=eqn, pattern=pattern, graph_matcher_rules=graph_matcher_rules, ) if match is not None: # Add all the match equations to the registered equations registered_equations.extend(match.graph_eqns) # Add the match to the mapping of graph matches matches[match.output_var] = match break return graph.manual_registrations, matches def read_env( env: Mapping[Var, Array], var: Union[jax.core.Literal, Vars], ) -> Union[float, Array, Sequence[Array]]: """Reads from the variable-to-array environment during tracing.""" if isinstance(var, (list, tuple)): return jax.tree_util.tree_map(lambda x: read_env(env, x), var) elif isinstance(var, jax.core.Literal): # Literals are values baked into the Jaxpr return var.val elif isinstance(var, Var): return env[var] else: raise NotImplementedError() def write_env( env: Dict[Var, Array], var: Union[Var, Vars], val: Union[Array, Sequence[Array]], ): """Writes to the variable-to-array environment during tracing.""" if isinstance(var, tuple): raise NotImplementedError() if isinstance(var, list): if not isinstance(val, list): val = [val] return jax.tree_util.tree_map(lambda x, y: write_env(env, x, y), var, val) elif isinstance(var, (jax.core.Literal, Var)): env[var] = val # pytype: disable=container-type-mismatch # numpy-scalars else: raise NotImplementedError() def to_closed_jaxpr(jaxpr: JaxprOrClosedJaxpr) -> ClosedJaxpr: if isinstance(jaxpr, Jaxpr): return ClosedJaxpr(jaxpr=jaxpr, consts=[]) else: return jaxpr def to_jaxpr_or_closed_jaxpr( closed_jaxpr: ClosedJaxpr, original: J, ) -> J: if isinstance(original, Jaxpr): return closed_jaxpr.jaxpr else: return closed_jaxpr def apply_to_higher_order_primitives(eqn, func, *args, **kwargs): """Applies `func` only to higher order Jax primitives.""" if eqn.primitive.name not in HIGHER_ORDER_NAMES: return eqn elif eqn.primitive.name == "cond": params = dict(**eqn.params) params["branches"] = tuple( func(branch, *args, **kwargs) for branch in params["branches"] ) return eqn.replace(params=params) elif eqn.primitive.name == "while": params = dict(**eqn.params) params["body_jaxpr"] = func(params["body_jaxpr"], *args, **kwargs) return eqn.replace(params=params) elif eqn.primitive.name == "scan": params = dict(**eqn.params) params["jaxpr"] = func(params["jaxpr"], *args, **kwargs) return eqn.replace(params=params) elif eqn.primitive.name in ("xla_call", "xla_pmap"): params = dict(**eqn.params) params["call_jaxpr"] = func(params["call_jaxpr"], *args, **kwargs) return eqn.replace(params=params) else: raise NotImplementedError() def clean_jaxpr(jaxpr: J, preserve_tags: bool = True) -> J: """Runs dead code elimination on a Jaxpr, retaining loss and layer tags.""" closed_jaxpr = to_closed_jaxpr(jaxpr) eqns = [] dependants = set(closed_jaxpr.jaxpr.outvars) for eqn in reversed(closed_jaxpr.jaxpr.eqns): eqn = apply_to_higher_order_primitives( eqn, clean_jaxpr, preserve_tags=preserve_tags) check = False for v in eqn.outvars: if v in dependants: dependants.remove(v) check = True if isinstance(eqn.primitive, (tags.LossTag, tags.LayerTag)): check = check or preserve_tags if check: eqns.append(eqn) new_dependants = set(v for v in eqn.invars if not isinstance(v, jax.core.Literal)) dependants = dependants.union(new_dependants) # Dependants should only be invars dependants = dependants - set(closed_jaxpr.jaxpr.invars + closed_jaxpr.jaxpr.constvars) if dependants: raise ValueError("Something went wrong with the dead code elimination.") closed_jaxpr = ClosedJaxpr( jaxpr=closed_jaxpr.jaxpr.replace(eqns=list(reversed(eqns))), consts=closed_jaxpr.consts, ) return to_jaxpr_or_closed_jaxpr(closed_jaxpr, jaxpr) def merge_broadcasts_jaxpr(jaxpr: J) -> J: """Merges consecutive broadcasts in the given Jaxpr.""" closed_jaxpr = clean_jaxpr(to_closed_jaxpr(jaxpr)) broadcasts_outputs = {} eqns = list() for eqn in closed_jaxpr.jaxpr.eqns: eqn = apply_to_higher_order_primitives(eqn, merge_broadcasts_jaxpr) # We ignore broadcasting of constants if (eqn.primitive.name == "broadcast_in_dim" and not all(isinstance(v, jax.core.Literal) for v in eqn.invars)): if eqn.invars[0] in broadcasts_outputs: # Construct a merged equation from the previous and current one prev_eqn = broadcasts_outputs[eqn.invars[0]] broadcasts_outputs[eqn.outvars[0]] = prev_eqn.replace( params={ "shape": eqn.params["shape"], "broadcast_dimensions": tuple( eqn.params["broadcast_dimensions"][d] for d in prev_eqn.params["broadcast_dimensions"] ), }, outvars=eqn.outvars, ) else: broadcasts_outputs[eqn.outvars[0]] = eqn if eqn.outvars[0] in closed_jaxpr.jaxpr.outvars: # We must preserve output equations eqns.append(broadcasts_outputs[eqn.outvars[0]]) else: for v in eqn.invars: if not isinstance(v, jax.core.Literal) and v in broadcasts_outputs: eqns.append(broadcasts_outputs[v]) eqns.append(eqn) closed_jaxpr = ClosedJaxpr( jaxpr=closed_jaxpr.jaxpr.replace(eqns=eqns), consts=closed_jaxpr.consts ) return to_jaxpr_or_closed_jaxpr(closed_jaxpr, jaxpr) # _____ _ _ _ _ # | __ \ (_) | | | | (_) # | |__) |___ __ _ _ ___| |_ _ __ __ _| |_ _ ___ _ __ ___ # | _ // _ \/ _` | / __| __| '__/ _` | __| |/ _ \| '_ \/ __| # | | \ \ __/ (_| | \__ \ |_| | | (_| | |_| | (_) | | | \__ \ # |_| \_\___|\__, |_|___/\__|_| \__,_|\__|_|\___/|_| |_|___/ # __/ | # |___/ def _dense(x: Array, params: Sequence[Array]) -> Array: """Example of a dense layer function.""" w, *opt_b = params y = jnp.matmul(x, w) return y if not opt_b else y + opt_b[0] def _dense_with_reshape(x: Array, params: Sequence[Array],) -> Array: w, b = params y = jnp.matmul(x, w) return y + b.reshape([1, b.size]) def _dense_parameter_extractor( eqns: Sequence[JaxprEqn], ) -> Mapping[str, Any]: """Extracts all parameters from the `dot_general` operator.""" for eqn in eqns: if eqn.primitive.name == "dot_general": return dict(**eqn.params) assert False def _make_dense_pattern( with_bias: bool, reshape: bool, in_dim: int = 13, out_dim: int = 7, ) -> GraphPattern: x_shape = [2, in_dim] p_shapes = ([[in_dim, out_dim], [out_dim]] if with_bias else [[in_dim, out_dim]]) return GraphPattern( name="dense_with_bias" if with_bias else "dense_no_bias", tag_primitive=tags.dense, compute_func=_dense_with_reshape if reshape else _dense, parameters_extractor_func=_dense_parameter_extractor, example_args=[np.zeros(x_shape), [np.zeros(s) for s in p_shapes]], ) def _conv2d(x: Array, params: Sequence[Array]) -> Array: """Example of a conv2d layer function.""" w = params[0] y = jax.lax.conv_general_dilated( x, w, window_strides=(2, 2), padding="SAME", dimension_numbers=("NHWC", "HWIO", "NHWC")) if len(params) == 1: # No bias return y # Add bias return y + params[1][None, None, None] def _conv2d_parameter_extractor( eqns: Sequence[JaxprEqn], ) -> Mapping[str, Any]: """Extracts all parameters from the `conv_general_dilated` operator.""" for eqn in eqns: if eqn.primitive.name == "conv_general_dilated": return dict(**eqn.params) assert False def _make_conv2d_pattern( with_bias: bool, ) -> GraphPattern: x_shape = [2, 8, 8, 5] p_shapes = ([[3, 3, 5, 4], [4]] if with_bias else [[3, 3, 5, 4]]) return GraphPattern( name="conv2d_with_bias" if with_bias else "conv2d_no_bias", tag_primitive=tags.conv2d, compute_func=_conv2d, parameters_extractor_func=_conv2d_parameter_extractor, example_args=[np.zeros(x_shape), [np.zeros(s) for s in p_shapes]], ) def _scale_and_shift( x: Array, params: Sequence[Array], has_scale: bool, has_shift: bool, ) -> Array: """Example of a scale and shift function.""" if has_scale and has_shift: scale, shift = params return x * scale + shift elif has_scale: [scale] = params return x * scale elif has_shift: [shift] = params return x + shift else: raise ValueError("You must have either `has_scale` or `has_shift` set " "to True.") def _make_scale_and_shift_pattern( broadcast_ndim: int, has_scale: bool, has_shift: bool, p_dim: int = 13, ) -> GraphPattern: """Creates a scale and shift graph pattern.""" assert broadcast_ndim >= 0 assert has_scale or has_shift x_shape = [i + 2 for i in range(broadcast_ndim)] + [p_dim] p_shapes = [[p_dim], [p_dim]] if (has_scale and has_shift) else [[p_dim]] if has_scale and has_shift: name = f"scale_and_shift_broadcast_{broadcast_ndim}" elif has_scale: name = f"scale_only_broadcast_{broadcast_ndim}" elif has_shift: name = f"shift_only_broadcast_{broadcast_ndim}" else: raise ValueError("Unreachable.") return GraphPattern( name=name, tag_primitive=tags.scale_and_shift, compute_func=functools.partial( _scale_and_shift, has_scale=has_scale, has_shift=has_shift), parameters_extractor_func= lambda jaxpr: dict(has_scale=has_scale, has_shift=has_shift), example_args=[np.zeros(x_shape), [np.zeros(s) for s in p_shapes]], ) def _normalization_haiku( inputs: Sequence[Array], params: Sequence[Array], has_scale: bool, has_shift: bool, ) -> Array: """Example of normalization as is defined in Haiku.""" if len(params) not in (1, 2): raise ValueError("The inputs to the `normalization_haiku` computation must " f"have either 1 or 2 parameters, but got {len(params)}.") [inputs, rsqrt_var] = inputs inv = params[0] * rsqrt_var if has_scale else rsqrt_var outputs = inputs * inv return outputs + params[-1] if has_shift else outputs def _normalization_haiku_preprocessor( in_vars: Vars, make_var_func: MakeVarFunc, ) -> Tuple[Vars, JaxprEqns]: """Preprocesses the inputs to a Haiku normalization layer. The standard ``scale_and_shift`` represents the following canonical computation: y = x * scale + shift Normalization performs a similar computation, where the `normalized_x` below represents the standard ``x`` input to ``scale_and_shift``: normalized_x = (x - m) / sqrt(var(x) + eps) y = normalized_x * scale + shift Each ``layer_tag`` represents a specific computation and hence it expects its inputs to be in canonical form. For ``scale_and_shift`` the input must be the array that gets multiplied by the ``scale`` before the ``shift`` addition as shown above. However, Haiku performs normalization slightly out of order: y = [(x - m) * scale] / sqrt(var(x) + eps) + shift As a result, in the Jax computation graph the canonical input (normalized_x) does not exist, because of the ordering of the multiplication and division. To remedy this we have to add this additional function, which to be able to compute from the variables in the Haiku normalization computation, the canonical input to ``scale_and_shift`` tag. Args: in_vars: The input variables to the pattern. make_var_func: A function to create correctly new variables. Returns: The canonical input to ``scale_and_shift`` pattern. """ [in_var, rsqrt_var, *param_vars] = in_vars # The equation below corresponds to the computation: # normalized_inputs = inputs * rsqrt_var normalized_inputs_var = make_var_func(in_var.aval) normalized_inputs_eqn = jax.core.new_jaxpr_eqn( invars=[in_var, rsqrt_var], outvars=[normalized_inputs_var], primitive=jax.lax.mul_p, params=dict(), effects=set(), ) return (normalized_inputs_var, *param_vars), [normalized_inputs_eqn] def _make_normalization_haiku_pattern( broadcast_ndim: int, p_dim: int = 13, ): assert broadcast_ndim >= 0 x_shape = [i + 2 for i in range(broadcast_ndim)] + [p_dim] return GraphPattern( name=f"normalization_haiku_broadcast_{broadcast_ndim}", tag_primitive=tags.scale_and_shift, compute_func=functools.partial(_normalization_haiku, has_scale=True, has_shift=True), parameters_extractor_func= lambda jaxpr: dict(has_scale=True, has_shift=True), example_args=[[np.zeros(x_shape), np.zeros(x_shape)], [np.zeros([p_dim]), np.zeros([p_dim])]], in_values_preprocessor=_normalization_haiku_preprocessor ) DEFAULT_GRAPH_PATTERNS = ( _make_dense_pattern(True, False), _make_dense_pattern(True, True), _make_dense_pattern(False, False), _make_conv2d_pattern(True), _make_conv2d_pattern(False), _make_scale_and_shift_pattern(1, True, True), _make_scale_and_shift_pattern(0, True, True), _make_normalization_haiku_pattern(1), _make_normalization_haiku_pattern(0), _make_scale_and_shift_pattern(1, True, False), _make_scale_and_shift_pattern(0, True, False), _make_scale_and_shift_pattern(1, False, True), _make_scale_and_shift_pattern(0, False, True), ) class TagLocation: """Represents a tag location inside a function graph.""" def __init__( self, tag_eqn: JaxprEqn, base_name: str, parent_equations: Sequence[Tuple[JaxprEqn, int]] = (), ): # assert isinstance(tag_eqn.primitive, tags.LayerTag) self.tag_eqn = tag_eqn self.base_name = base_name self.parent_equations = list(parent_equations) @property def full_name(self) -> str: """The full name of the tag location.""" prefix = "" param_vars = self.bottom_level_parameters for eqn, n in reversed(self.parent_equations): assert eqn.primitive.name in HIGHER_ORDER_NAMES # Prefix for this higher order primitive prefix = prefix + f"{eqn.primitive.name}_{n}/" if eqn.primitive.name == "cond": raise NotImplementedError() elif eqn.primitive.name == "scan": p_indexes = [eqn.params["jaxpr"].jaxpr.invars.index(p) for p in param_vars] checks = [pi < eqn.params["num_consts"] for pi in p_indexes] if not (all(checks) or all(not ci for ci in checks)): raise ValueError("Parameters inside scan of the same tag are not both" " carry or const.") if all(checks): prefix = prefix + "const/" else: prefix = prefix + "carry/" elif eqn.primitive.name == "while": p_indexes = [eqn.params["body_jaxpr"].jaxpr.invars.index(p) for p in param_vars] elif eqn.primitive.name in ("xla_call", "xla_pmap"): p_indexes = [eqn.params["call_jaxpr"].invars.index(p) for p in param_vars] else: raise NotImplementedError() param_vars = [eqn.invars[pi] for pi in p_indexes] return prefix + self.base_name @property def bottom_level_parameters(self) -> Vars: """The bottom level variables of the tag location.""" return self.tag_eqn.primitive.split_all_inputs(self.tag_eqn.invars)[2] # pytype:disable=attribute-error @property def top_level_parameters(self) -> Vars: """The top level parameter variables of the tag location.""" param_vars = self.bottom_level_parameters for eqn, _ in reversed(self.parent_equations): assert eqn.primitive.name in HIGHER_ORDER_NAMES if eqn.primitive.name == "cond": raise NotImplementedError() elif eqn.primitive.name == "scan": invars = eqn.params["jaxpr"].jaxpr.invars elif eqn.primitive.name == "while": invars = eqn.params["body_jaxpr"].jaxpr.invars elif eqn.primitive.name in ("xla_call", "xla_pmap"): invars = eqn.params["call_jaxpr"].invars else: raise NotImplementedError() p_indexes = [invars.index(p) for p in param_vars] param_vars = [eqn.invars[pi] for pi in p_indexes] return param_vars def add_parent_eqn(self, eqn: JaxprEqn, counter: int): assert eqn.primitive.name in HIGHER_ORDER_NAMES self.parent_equations.append((eqn, counter)) class TaggedFunction: """Represents a function that has been processed and auto tagged.""" def __init__( self, func_graph: JaxprGraph, tag_locations: Sequence[TagLocation], ): self._func_graph = func_graph self._tag_locations = tag_locations self._flat_func = jax.core.jaxpr_as_fun(func_graph.closed_jaxpr) self._param_labels = self._compute_parameter_labels() def __call__(self, *args, **kwargs): flat_args = jax.tree_util.tree_leaves(args) flat_output = self._flat_func(*flat_args) return jax.tree_util.tree_unflatten(self._func_graph.out_tree, flat_output) def _compute_parameter_labels(self) -> Mapping[Var, Sequence[str]]: # Collect all registration for every tagged parameter tagged_params = {} for tag_l in self._tag_locations: for p in tag_l.top_level_parameters: assert p in self._func_graph.params_vars if p not in tagged_params: tagged_params[p] = [] tagged_params[p].append(tag_l.full_name) return tagged_params def print_parameter_tags(self): """Prints all the parameter registrations.""" # Print all tag parameter registrations labels = ["|".join(self._param_labels.get(p, ["Orphan"])) for p in self._func_graph.params_vars] logging.info("=" * 50) logging.info("Graph parameter registrations:") logging.info(pprint.pformat(jax.tree_util.tree_unflatten( self._func_graph.params_tree, labels))) logging.info("=" * 50) def check_multiple_registrations(self): for p in self._func_graph.params_vars: if len(self._param_labels[p]) > 1: raise ValueError(f"Parameter {p} has been registered to multiple tags: " f"{self._param_labels[p]}.") def _auto_register_tags( graph: JaxprGraph, graph_matcher_rules: GraphMatcherComparator, graph_patterns: Sequence[GraphPattern], register_orphans: bool, register_only_until_losses: bool, ) -> Tuple[JaxprGraph, Sequence[TagLocation]]: """Internal function for automatic registration of layer tags.""" higher_counters = { "cond": 0, "while": 0, "scan": 0, "xla_call": 0, "xla_pmap": 0, } # Extract the sub-graph that leads to losses if register_only_until_losses: eqns_for_registration = [] sub_graph_vars = set() for eqn in reversed(graph.jaxpr.eqns): if (eqn in graph.losses_eqns or any(v in sub_graph_vars for v in eqn.outvars)): eqns_for_registration.append(eqn) sub_graph_vars.update( v for v in eqn.invars if not isinstance(v, jax.core.Literal)) eqns_for_registration = eqns_for_registration[::-1] else: eqns_for_registration = graph.jaxpr.eqns # Process all higher order primitives eqns = [] tag_locations = [] for eqn in graph.jaxpr.eqns: if (eqn not in eqns_for_registration or eqn.primitive.name not in HIGHER_ORDER_NAMES): eqns.append(eqn) continue eqn_name = eqn.primitive.name if eqn_name == "cond": sub_jaxprs = eqn.params["branches"] elif eqn_name == "while": sub_jaxprs = [eqn.params["body_jaxpr"]] elif eqn_name == "scan": sub_jaxprs = [eqn.params["jaxpr"]] elif eqn_name in ("xla_call", "xla_pmap"): sub_jaxprs = [eqn.params["call_jaxpr"]] else: raise NotImplementedError() final_jaxprs = [] final_tag_locations = [] for original_jaxpr in sub_jaxprs: sub_jaxpr = to_closed_jaxpr(original_jaxpr) params_vars = [] for out_v, in_v in zip(eqn.invars, sub_jaxpr.jaxpr.invars): if out_v in graph.params_vars: params_vars.append(in_v) sub_graph, sub_tag_locations = _auto_register_tags( graph=JaxprGraph( name=graph.name + f"_{eqn_name}", closed_jaxpr=sub_jaxpr, params_tree=jax.tree_util.tree_structure(params_vars), params_vars=params_vars, out_tree=jax.tree_util.tree_structure(sub_jaxpr.jaxpr.outvars), tag_ctor=None, ), graph_matcher_rules=graph_matcher_rules, graph_patterns=graph_patterns, register_orphans=False, register_only_until_losses=False, ) final_jaxprs.append( to_jaxpr_or_closed_jaxpr(sub_graph.closed_jaxpr, original_jaxpr)) final_tag_locations.append(sub_tag_locations) if eqn_name == "cond": # TODO(botev): We need to check each branch has identical registrations raise NotImplementedError() else: # Extract the sub jaxpr parameter tag registrations and input vars [sub_tag_locations] = final_tag_locations # pylint:disable=unbalanced-tuple-unpacking # Update the jaxpr parameter in the equation eqn_params = dict(**eqn.params) if eqn_name == "cond": eqn_params["branches"] = final_jaxprs elif eqn_name == "while": [eqn_params["body_jaxpr"]] = final_jaxprs # pylint:disable=unbalanced-tuple-unpacking elif eqn_name == "scan": [eqn_params["jaxpr"]] = final_jaxprs # pylint:disable=unbalanced-tuple-unpacking elif eqn_name in ("xla_call", "xla_pmap"): [eqn_params["call_jaxpr"]] = final_jaxprs # pylint:disable=unbalanced-tuple-unpacking else: raise NotImplementedError() eqns.append(eqn.replace(params=eqn_params)) del sub_graph, final_jaxprs, final_tag_locations # Insert the sub-registrations into the tagged_params for tag_l in sub_tag_locations: tag_l.add_parent_eqn(eqns[-1], higher_counters[eqn_name]) higher_counters[eqn_name] = higher_counters[eqn_name] + 1 tag_locations.append(tag_l) # Make a new graph with the replaced higher order equations mid_graph = JaxprGraph( name=graph.name, closed_jaxpr=ClosedJaxpr( jaxpr=graph.jaxpr.replace(eqns=eqns), consts=graph.consts, ), params_tree=graph.params_tree, params_vars=graph.params_vars, out_tree=graph.out_tree, tag_ctor=None, ) del graph # Find matches manual_registrations, matches = find_layer_tags_and_patterns( graph=mid_graph, eqns_for_patterns=eqns_for_registration, graph_matcher_rules=graph_matcher_rules, graph_patterns=graph_patterns ) tagged_params = set() # Automatically detected registrations in higher order primitives for tag_l in tag_locations: for p in tag_l.top_level_parameters: tagged_params.add(p) # Manual registrations for manual_eqn in manual_registrations: assert isinstance(manual_eqn.primitive, tags.LayerTag) for p in manual_eqn.primitive.split_all_inputs(manual_eqn.invars)[2]: tagged_params.add(p) # Automatically detect registrations for match in matches.values(): for p in match.param_graph_variables: tagged_params.add(p) # Create the Jaxpr with all the tag registrations make_var_func = jax.core.gensym([mid_graph.jaxpr]) eqns = list() env = {} pattern_counters = {} if register_orphans: for param in mid_graph.params_vars: if param not in tagged_params: orphan_p = make_var_func(param.aval) eqns.append(jax.core.new_jaxpr_eqn( invars=[param], outvars=[orphan_p], primitive=tags.generic, params={}, effects=set(), )) env[param] = orphan_p tag_locations.append(TagLocation(eqns[-1], "Orphan")) for eqn in mid_graph.jaxpr.eqns: invars = [env.get(v, v) if isinstance(v, Var) else v for v in eqn.invars] eqns.append(eqn.replace(invars=invars)) if isinstance(eqn.primitive, tags.LayerTag): # Mark manual registrations tag_name = eqn.primitive.name n = pattern_counters.get(tag_name, 0) pattern_counters[tag_name] = n + 1 tag_locations.append(TagLocation(eqn, f"Manual[{tag_name}_{n}]")) for var in eqn.outvars: # Check if this is a match of a graph pattern match = matches.get(var) if match is not None: for additional_eqn in match.create_eqn(env, make_var_func): eqns.append(additional_eqn) # Mark automatic registration tag_name = eqns[-1].primitive.name n = pattern_counters.get(tag_name, 0) pattern_counters[tag_name] = n + 1 tag_locations.append(TagLocation(eqns[-1], f"Auto[{tag_name}_{n}]")) final_outvars = [env.get(v, v) if isinstance(v, Var) else v for v in mid_graph.jaxpr.outvars] final_graph = JaxprGraph( name=mid_graph.name, closed_jaxpr=ClosedJaxpr( jaxpr=mid_graph.jaxpr.replace(eqns=eqns, outvars=final_outvars), consts=mid_graph.closed_jaxpr.consts ), params_tree=mid_graph.params_tree, params_vars=mid_graph.params_vars, out_tree=mid_graph.out_tree, tag_ctor=None, ) return final_graph, tag_locations def auto_register_tags( func: utils.Func, func_args: utils.FuncArgs, params_index: int = 0, register_only_generic: bool = False, compute_only_loss_tags: bool = True, patterns_to_skip: Sequence[str] = (), allow_multiple_registrations: bool = False, graph_matcher_rules: GraphMatcherComparator = GraphMatcherComparator(), graph_patterns: Sequence[GraphPattern] = DEFAULT_GRAPH_PATTERNS, ) -> TaggedFunction: """Transforms the function by automatically registering layer tags. Args: func: The original function to transform. func_args: Example arguments to ``func`` which to be used for tracing it. params_index: Specifies, which inputs to the function are to be considered a parameter variable. Specifically - ``inputs[params_index]``. register_only_generic: If ``True`` registers all parameters not already in a layer tag with a generic tag, effectively ignoring ``graph_patterns``. compute_only_loss_tags: If set to ``True`` (default) the resulting function will only compute the loss tags in ``func``, not its full computation and actual output. patterns_to_skip: The names of any patterns from the provided list, which to be skipped/not used during the pattern matching. allow_multiple_registrations: Whether to raise an error if a parameter is registered with more than one layer tag. graph_matcher_rules: A :class:`~GraphMatcherRules` instance, which is used for determining equivalence of individual Jax primitives. graph_patterns: A sequence of :class:`~GraphPattern` objects, which contain all patterns to use, in order of precedence, which to try to find in the graph before registering a parameter with a generic layer tag. Returns: A transformed function as described above. """ graph = make_jax_graph( func=func, func_args=func_args, params_index=params_index, name="main", compute_only_loss_tags=compute_only_loss_tags, clean_broadcasts=True, ) patterns = () if register_only_generic else tuple( pattern for pattern in graph_patterns if pattern.name not in patterns_to_skip) func_graph, tagged_locations = _auto_register_tags( graph=graph, graph_matcher_rules=graph_matcher_rules, graph_patterns=patterns, register_orphans=True, register_only_until_losses=True ) func = TaggedFunction( func_graph=func_graph, tag_locations=tagged_locations, ) func.print_parameter_tags() if not allow_multiple_registrations: func.check_multiple_registrations() return func
kfac-jax-main
kfac_jax/_src/tag_graph_matcher.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC related utility classes and functions.""" import abc import dataclasses import functools import inspect from typing import Any, Callable, Iterator, Sequence, Type, Tuple, Union, Dict, TypeVar import jax import jax.numpy as jnp from kfac_jax._src.utils import types Array = types.Array Numeric = types.Numeric ArrayTree = types.ArrayTree TArrayTree = types.TArrayTree StateType = TypeVar("StateType") StateTree = types.PyTree["State"] STATE_CLASSES_SERIALIZATION_DICT = {} def fake_element_from_iterator( iterator: Iterator[TArrayTree], ) -> Tuple[TArrayTree, Iterator[TArrayTree]]: """Returns a zeroed-out initial element of the iterator "non-destructively". This function mutates the input iterator, hence after calling this function it will be advanced by one. An equivalent to the original iterator (e.g. not advanced by one) is returned as the second element of the returned pair. The advised usage of the function is: `fake_element, iterator = fake_element_from_iterator(iterator)` Args: iterator: A PyTree iterator. Must yield at least one element. Returns: A pair `(element, output_iterator)` where `element` is a zeroed-out version of the first element of the iterator, and `output_iterator` is an equivalent iterator to the input one. """ init_element = next(iterator) fake_element = jax.tree_util.tree_map(jnp.zeros_like, init_element) def equivalent_iterator() -> Iterator[ArrayTree]: yield init_element # For some reason unknown to us, "yield from" can fail in certain # circumstances while True: yield next(iterator) return fake_element, equivalent_iterator() def to_tuple_or_repeat( x: Union[Numeric, Sequence[Numeric]], length: int, ) -> Tuple[Numeric, ...]: """Converts `x` to a tuple of fixed length. If `x` is an array, it is split along its last axis to a tuple (assumed to have `x.shape[-1] == length`). If it is a scalar, the scalar is repeated `length` times into a tuple, and if it is a list or a tuple it is just verified that its length is the same. Args: x: The input array, scalar, list or tuple. length: The length of the returned tuple. Returns: A tuple constructed by either replicating or splitting `x`. """ if isinstance(x, jnp.ndarray) and x.size > 1: # pytype: disable=attribute-error assert x.shape[-1] == length # pytype: disable=attribute-error return tuple(x[..., i] for i in range(length)) elif isinstance(x, (list, tuple)): assert len(x) == length return tuple(x) elif isinstance(x, (int, float, jnp.ndarray)): return (x,) * length else: raise ValueError(f"Unrecognized type for `x` - {type(x)}.") def first_dim_is_size(size: int, *args: Array) -> bool: """Checks that each element of `args` has first axis size equal to `size`.""" return all(arg.shape[0] == size for arg in args) class State(abc.ABC): """Abstract class for state classes.""" @classmethod def field_names(cls) -> Tuple[str, ...]: return tuple(field.name for field in dataclasses.fields(cls)) # pytype: disable=wrong-arg-types @classmethod def field_types(cls) -> Dict[str, Type[Any]]: return {field.name: field.type for field in dataclasses.fields(cls)} # pytype: disable=wrong-arg-types @property def field_values(self) -> Tuple[ArrayTree, ...]: return tuple(getattr(self, name) for name in self.field_names()) def copy(self: StateType) -> StateType: """Returns a copy of the PyTree structure (but not the JAX arrays).""" (flattened, structure) = jax.tree_util.tree_flatten(self) return jax.tree_util.tree_unflatten(structure, flattened) def tree_flatten(self) -> Tuple[Tuple[ArrayTree, ...], Tuple[str, ...]]: return self.field_values, self.field_names() @classmethod def tree_unflatten( cls, aux_data: Tuple[str, ...], children: Tuple[ArrayTree, ...], ): return cls(**dict(zip(aux_data, children))) def __repr__(self) -> str: return (f"{self.__class__.__name__}(" + ",".join(f"{name}={v!r}" for name, v in self.field_values) + ")") def register_state_class(class_type: Type[Any]) -> Type[Any]: """Extended dataclass decorator, which also registers the class as a PyTree. The function is equivalent to `dataclasses.dataclass`, but additionally registers the `class_type` as a PyTree. This is done done by setting the PyTree nodes to all of the `dataclasses.fields` of the class. Args: class_type: The class type to transform. Returns: The transformed `class_type` which is now a dataclass and also registered as a PyTree. """ if not issubclass(class_type, State): raise ValueError( f"Class {class_type} is not a subclass of kfac_jax.utils.State." ) class_type = dataclasses.dataclass(class_type) class_type = jax.tree_util.register_pytree_node_class(class_type) class_name = f"{class_type.__module__}.{class_type.__qualname__}" STATE_CLASSES_SERIALIZATION_DICT[class_name] = class_type return class_type def serialize_state_tree(instance: StateTree) -> ArrayTree: """Returns a recursively constructed dictionary of the state.""" if isinstance(instance, State): result_dict = {name: serialize_state_tree(getattr(instance, name)) for name in instance.field_names()} cls = instance.__class__ result_dict["__class__"] = f"{cls.__module__}.{cls.__qualname__}" return result_dict elif isinstance(instance, list): return [serialize_state_tree(v) for v in instance] elif isinstance(instance, tuple): return tuple(serialize_state_tree(v) for v in instance) elif isinstance(instance, set): return set(serialize_state_tree(v) for v in instance) elif isinstance(instance, dict): return {k: serialize_state_tree(v) for k, v in instance.items()} else: return instance def deserialize_state_tree(representation: ArrayTree) -> StateTree: """Returns the state class using a recursively constructed.""" if isinstance(representation, list): return [deserialize_state_tree(v) for v in representation] elif isinstance(representation, tuple): return tuple(deserialize_state_tree(v) for v in representation) elif isinstance(representation, set): return set(deserialize_state_tree(v) for v in representation) elif isinstance(representation, dict): if "__class__" not in representation: return {k: deserialize_state_tree(v) for k, v in representation.items()} class_name = representation.pop("__class__") if class_name not in STATE_CLASSES_SERIALIZATION_DICT: raise ValueError(f"Did not find how to reconstruct class {class_name}.") dict_rep = deserialize_state_tree(representation) return STATE_CLASSES_SERIALIZATION_DICT[class_name](**dict_rep) else: return representation class Finalizable(abc.ABC): """A mixin for classes that can "finalize" their attributes. The class provides the function `finalize` which freezes all attributes of the instance after its call. Any attributes assignment thereafter will raise an error. All subclasses must always call `super().__init__()` for the mixin to function properly, and they must set any attributes before any call to `finalize` has happened. """ def __init__( self, forbid_setting_attributes_after_finalize: bool = True, excluded_attribute_names: Sequence[str] = (), **parent_kwargs: Any, ): """Initializes the instance. Args: forbid_setting_attributes_after_finalize: If `True`, trying to set attributes (via direct obj.attr = ...) after `finalize` was called on the instance will raise an error. If `False`, this is not checked. excluded_attribute_names: When `forbid_setting_attributes_after_finalize` is set to `True` this specifies any attributes names that can still be set. **parent_kwargs: Any keyword arguments to be passed to any parent class. """ self._finalized = False self._forbid_setting_attributes = forbid_setting_attributes_after_finalize self._excluded_attribute_names = frozenset(excluded_attribute_names) super().__init__(**parent_kwargs) @property def finalized(self) -> bool: """Whether the object has already been finalized.""" return self._finalized # pytype: disable=attribute-error def finalize(self, *args: Any, **kwargs: Any): """Finalizes the object, after which no attributes can be set.""" if self.finalized: raise ValueError("Object has already been finalized.") self._finalize(*args, **kwargs) self._finalized = True def _finalize(self, *args: Any, **kwargs: Any): """Any logic that a child class needs to do during the finalization.""" def __setattr__(self, name: str, value: Any): if (not getattr(self, "_finalized", False) or not getattr(self, "_forbid_setting_attributes", True) or name in getattr(self, "_excluded_attribute_names", ())): super().__setattr__(name, value) else: raise AttributeError("Can't set attributes after finalization.") def auto_scope_method(method): """Wraps the method call to have automatically generated Jax name scope.""" @functools.wraps(method) def wrapped(instance, *args, **kwargs): class_name = type(instance).__name__ method_name = method.__name__ if method_name.startswith("_"): method_name = method_name[1:] with jax.named_scope(f"{class_name}_{method_name}"): return method(instance, *args, **kwargs) return wrapped def auto_scope_function(func): """Wraps the function call to have automatically generated Jax name scope.""" @functools.wraps(func) def wrapped(*args, **kwargs): with jax.named_scope(func.__name__): return func(*args, **kwargs) return wrapped def default_batch_size_extractor(batch: types.Batch) -> Numeric: """Computes the batch size as the size of axis `0` of the first element.""" return jax.tree_util.tree_leaves(batch)[0].shape[0] def replace_char(original: str, new_str: str, index: int) -> str: """Replaces the character at a given location.""" return original[:index] + new_str + original[index + 1 :] def call_func_with_conditional_kwargs( func: Callable[..., Any], *func_args: Any, **kwargs: Any) -> Any: sig = inspect.signature(func) func_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters} return func(*func_args, **func_kwargs)
kfac-jax-main
kfac_jax/_src/utils/misc.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC related utility classes and functions.""" from kfac_jax._src.utils import accumulators from kfac_jax._src.utils import math from kfac_jax._src.utils import misc from kfac_jax._src.utils import parallel from kfac_jax._src.utils import staging from kfac_jax._src.utils import types # types Array = types.Array PRNGKey = types.PRNGKey Scalar = types.Scalar Numeric = types.Numeric Shape = types.Shape DType = types.DType PyTree = types.PyTree ArrayTree = types.ArrayTree TArrayTree = types.TArrayTree Params = types.Params Batch = types.Batch FuncState = types.FuncState FuncAux = types.FuncAux PyTreeDef = types.PyTreeDef FuncArgs = types.FuncArgs FuncOuts = types.FuncOuts Func = types.Func ValueFunc = types.ValueFunc ValueAndGradFunc = types.ValueAndGradFunc AssumedFuncOutput = types.AssumedFuncOutput tree_is_empty = types.tree_is_empty abstract_objects_equal = types.abstract_objects_equal get_float_dtype_and_check_consistency = ( types.get_float_dtype_and_check_consistency) del types # misc deserialize_state_tree = misc.deserialize_state_tree serialize_state_tree = misc.serialize_state_tree to_tuple_or_repeat = misc.to_tuple_or_repeat first_dim_is_size = misc.first_dim_is_size fake_element_from_iterator = misc.fake_element_from_iterator default_batch_size_extractor = misc.default_batch_size_extractor auto_scope_function = misc.auto_scope_function auto_scope_method = misc.auto_scope_method register_state_class = misc.register_state_class replace_char = misc.replace_char call_func_with_conditional_kwargs = misc.call_func_with_conditional_kwargs Finalizable = misc.Finalizable State = misc.State del misc # parallel in_pmap = parallel.in_pmap wrap_if_pmap = parallel.wrap_if_pmap pmean_if_pmap = parallel.pmean_if_pmap psum_if_pmap = parallel.psum_if_pmap compute_mean = parallel.compute_mean compute_sum = parallel.compute_sum index_if_not_scalar = parallel.index_if_not_scalar get_first = parallel.get_first get_mean = parallel.get_mean get_sum = parallel.get_sum broadcast_all_local_devices = parallel.broadcast_all_local_devices pmap_zeros_like = parallel.pmap_zeros_like jit_zeros_like = parallel.jit_zeros_like replicate_all_local_devices = parallel.replicate_all_local_devices make_different_rng_key_on_all_devices = ( parallel.make_different_rng_key_on_all_devices) p_split = parallel.p_split p_split_num = parallel.p_split_num host_sync = parallel.host_sync host_all_gather = parallel.host_all_gather host_mean = parallel.host_mean pmap_sync_and_divide_value = parallel.pmap_sync_and_divide_value jit_sync_and_divide_value = parallel.jit_sync_and_divide_value copy_array = parallel.copy_array copy_obj = parallel.copy_obj pmap_copy_obj = parallel.pmap_copy_obj distribute_thunks = parallel.distribute_thunks del parallel # math set_special_case_zero_inv = math.set_special_case_zero_inv get_special_case_zero_inv = math.get_special_case_zero_inv product = math.product outer_product = math.outer_product scalar_mul = math.scalar_mul scalar_div = math.scalar_div weighted_sum_of_objects = math.weighted_sum_of_objects inner_product = math.inner_product symmetric_matrix_inner_products = math.symmetric_matrix_inner_products matrix_of_inner_products = math.matrix_of_inner_products vector_of_inner_products = math.vector_of_inner_products block_permuted = math.block_permuted norm = math.norm per_parameter_norm = math.per_parameter_norm psd_inv_cholesky = math.psd_inv_cholesky pi_adjusted_kronecker_factors = math.pi_adjusted_kronecker_factors pi_adjusted_kronecker_inverse = math.pi_adjusted_kronecker_inverse kronecker_product_axis_mul_v = math.kronecker_product_axis_mul_v kronecker_eigen_basis_axis_mul_v = math.kronecker_eigen_basis_axis_mul_v kronecker_product_mul_v = math.kronecker_product_mul_v kronecker_eigen_basis_mul_v = math.kronecker_eigen_basis_mul_v safe_psd_eigh = math.safe_psd_eigh loop_and_parallelize_average = math.loop_and_parallelize_average psd_matrix_norm = math.psd_matrix_norm del math # accumulators WeightedMovingAverage = accumulators.WeightedMovingAverage MultiChunkAccumulator = accumulators.MultiChunkAccumulator del accumulators # staged staged = staging.staged WithStagedMethods = staging.WithStagedMethods del staging
kfac-jax-main
kfac_jax/_src/utils/__init__.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC annotation types and general tree operations.""" from typing import Callable, TypeVar, Sequence, Mapping, Tuple, Union import jax import jax.numpy as jnp # Types for annotation T = TypeVar("T") Array = jax.Array PRNGKey = Array Scalar = Union[float, int] Numeric = Union[Array, Scalar] Shape = Tuple[int, ...] DType = jnp.dtype PyTree = Union[T, Sequence["PyTree[T]"], Mapping[str, "PyTree[T]"]] ArrayTree = PyTree[Array] TArrayTree = TypeVar("TArrayTree", bound=ArrayTree) Params = TypeVar("Params", bound=ArrayTree) Batch = TypeVar("Batch", bound=ArrayTree) FuncState = TypeVar("FuncState", bound=ArrayTree) FuncAux = TypeVar("FuncAux", bound=ArrayTree) PyTreeDef = jax.tree_util.PyTreeDef FuncArgs = Sequence[ArrayTree] FuncOuts = Union[Array, Tuple[Array, FuncAux]] Func = Callable[..., FuncOuts] ValueFunc = Callable[..., Array] ValueAndGradFunc = Callable[..., Tuple[Array, Params]] AssumedFuncOutput = Union[Array, Tuple[Array, FuncAux], Tuple[Array, Tuple[FuncState, FuncAux]]] SCALAR_TYPES = (float, int) def tree_is_empty(obj: ArrayTree) -> bool: """Returns whether the given PyTree is empty.""" return not jax.tree_util.tree_leaves(obj) def abstract_objects_equal( obj1: ArrayTree, obj2: ArrayTree, check_dtype: bool = True ) -> bool: """`True` if the objects have the same PyTree structure, shapes and dtypes.""" return (jax.tree_util.tree_structure(obj1) == jax.tree_util.tree_structure(obj2) and all(e1.shape == e2.shape and (e1.dtype == e2.dtype or not check_dtype) for e1, e2 in zip(jax.tree_util.tree_leaves(obj1), jax.tree_util.tree_leaves(obj2)))) def get_float_dtype_and_check_consistency(obj: ArrayTree) -> DType: """Checks that all leaves have the same float dtype, and returns this.""" leaves = jax.tree_util.tree_leaves(obj) dtype = None for leaf in leaves: if (leaf.dtype == jnp.float16 or leaf.dtype == jnp.bfloat16 or leaf.dtype == jnp.float32 or leaf.dtype == jnp.float64): if dtype is not None and leaf.dtype != dtype: raise ValueError("Inconsistent dtypes detected.") else: dtype = leaf.dtype else: raise ValueError("Non-float dtype detected.") return dtype
kfac-jax-main
kfac_jax/_src/utils/types.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC utilities for various mathematical operations.""" import functools import string from typing import Callable, Optional, Sequence, Iterable, TypeVar, Tuple, Union import jax from jax import lax from jax.experimental.sparse import linalg as experimental_splinalg import jax.numpy as jnp from jax.scipy import linalg from kfac_jax._src.utils import types import numpy as np import optax import tree Array = types.Array Numeric = types.Numeric PRNGKey = types.PRNGKey ArrayTree = types.ArrayTree TArrayTree = types.TArrayTree TNumeric = TypeVar("TNumeric", bound=Numeric) _ALPHABET = string.ascii_lowercase # If true we use a special case formula for when a block has one or more zero # factors. _SPECIAL_CASE_ZERO_INV: bool = True def set_special_case_zero_inv(value: bool): """Sets whether `pi_adjusted_inverse` handles zero and nan matrices.""" global _SPECIAL_CASE_ZERO_INV _SPECIAL_CASE_ZERO_INV = value def get_special_case_zero_inv() -> bool: """Returns whether `pi_adjusted_inverse` handles zero and nan matrices.""" return _SPECIAL_CASE_ZERO_INV def product(iterable_object: Iterable[TNumeric]) -> TNumeric: """Computes the product of all elements in the iterable.""" x = 1 for element in iterable_object: x = x * element return x def outer_product(*arrays: Array) -> Array: """Computes the outer product of an arbitrary number of vectors.""" if not all(a.ndim == 1 for a in arrays): raise ValueError("All arrays must be vectors.") in_str = ",".join(_ALPHABET[:len(arrays)]) out_str = _ALPHABET[:len(arrays)] return jnp.einsum(f"{in_str}->{out_str}", *arrays) def scalar_mul(obj: TArrayTree, scalar: Numeric) -> TArrayTree: """Multiplies all PyTree leaves of the object by the provided scalar.""" # The check below is in its current form because of how `jax.jit` tracing # mechanism work. If we use `scalar == 1` and `scalar` is an array, inside a # `jit` context, jax will raise an error, since you are not allowed to use # abstract values in concrete boolean statements, like native python # if/while/for constructs. if isinstance(scalar, types.SCALAR_TYPES) and scalar == 1.0: return obj return jax.tree_util.tree_map(lambda x: x * scalar, obj) def scalar_div(obj: TArrayTree, scalar: Numeric) -> TArrayTree: """Divides all PyTree leaves of the object by the provided scalar.""" # The check below is in its current form because of how `jax.jit` tracing # mechanism work. If we use `scalar == 1` and `scalar` is an array, inside a # `jit` context, jax will raise an error, since you are not allowed to use # abstract values in concrete boolean statements, like native python # if/while/for constructs. if isinstance(scalar, types.SCALAR_TYPES) and scalar == 1.0: return obj return jax.tree_util.tree_map(lambda x: x / scalar, obj) def weighted_sum_of_objects( objects: Sequence[TArrayTree], coefficients: Sequence[Numeric], ) -> TArrayTree: """Computes a weighted sum of the objects'. The function computes `sum_i coefficients[i] * objects[i]`. All objects must have the same PyTree structure, and PyTree leaves in equivalent positions must have the same shape. Args: objects: The sequence of objects to be summed together. coefficients: The coefficients corresponding to each object instance. Returns: An object, representing the weighted sum, of the same type as the inputs. """ if len(objects) != len(coefficients): raise ValueError("The number of coefficients must equal the number of " "objects.") if not objects: raise ValueError("The objects' sequences can not be empty.") accumulator = scalar_mul(objects[0], coefficients[0]) for o_i, c_i in zip(objects[1:], coefficients[1:]): if not types.abstract_objects_equal(accumulator, o_i): raise ValueError("One or more objects do not have equivalent abstract " "structure.") accumulator = jax.tree_util.tree_map( jnp.add, accumulator, scalar_mul(o_i, c_i)) return accumulator def _inner_product_float64(obj1: ArrayTree, obj2: ArrayTree) -> Array: """Computes inner product explicitly in float64 precision.""" raise NotImplementedError() # This function isn't currently working due to a break in # jax.experimental.enable_x64. # def array_ip(x, y): # x = jnp.array(jnp.reshape(x, [-1]), dtype=jnp.float64) # y = jnp.array(jnp.reshape(y, [-1]), dtype=jnp.float64) # return jnp.dot(x, y, precision=lax.Precision.HIGHEST) # original_dtype = types.get_float_dtype_and_check_consistency((obj1, obj2)) # with jax.experimental.enable_x64(): # elements_inner_products = jax.tree_util.tree_map(array_ip, obj1, obj2) # flat_list = jax.tree_util.tree_leaves(elements_inner_products) # result = flat_List[0] # for element_ip in flat_List[1:]: # result = result + element_ip # return jnp.array(result, dtype=original_dtype) def inner_product( obj1: ArrayTree, obj2: ArrayTree, in_float64: bool = False ) -> Array: """Computes the inner product `<vec(obj1), vec(obj2)>`. To compute the inner product, each of the two input objects is assumed to represent a vector by flattening and concatenating all of their PyTree leaves. Objects `obj1` and `obj2` must have the same PyTree structure, and PyTree leaves in equivalent positions must have the same shape. Args: obj1: The first object representing a vector. obj2: The second object representing a vector. in_float64: Whether to compute the inner product explicitly in `float64` precision. If this is set to `True` the computation will be in double precision regardless of whether `float64` has been enabled in Jax. Returns: The scalar value of the inner product. """ if not types.abstract_objects_equal(obj1, obj2, check_dtype=False): raise ValueError("The objects do not have identical abstract structure.") if in_float64: return _inner_product_float64(obj1, obj2) elements_product = jax.tree_util.tree_map( lambda x, y: jnp.sum(x * y), obj1, obj2) return sum(jax.tree_util.tree_leaves(elements_product)) def symmetric_matrix_inner_products( vectors1: Sequence[ArrayTree], vectors2: Sequence[ArrayTree], ip_function: Callable[[ArrayTree, ArrayTree], Array] = inner_product, ) -> Array: """Computes a matrix of the inner products between the two sequences. Args: vectors1: A sequence of identically structured PyTrees, each one representing a single vector. vectors2: A sequence of identically structured PyTrees, each one representing a single vector. ip_function: A callable which computes the inner product between PyTrees. Defaults to the standard dot-product. Returns: A symmetric matrix `m` with elements `m[i, j] = <vectors[i], vectors2[j]>` for `i >= j`. """ if len(vectors1) != len(vectors2): raise ValueError("The two sequences should have the same length.") m = [[] for _ in vectors1] for i, v_i in enumerate(vectors1): for j, v_j in enumerate(vectors2): if j < i: m[i].append(m[j][i]) else: m[i].append(ip_function(v_i, v_j)) return jnp.asarray(m) def matrix_of_inner_products( vectors: Sequence[ArrayTree], ip_function: Callable[[ArrayTree, ArrayTree], Array] = inner_product, ) -> Array: """Computes the matrix of inner products of the sequence of vectors. Args: vectors: A sequence of identically structured PyTrees, each one representing a single vector. ip_function: A callable which computes the inner product between PyTrees. Defaults to the standard dot-product. Returns: A matrix `m` with elements `m[i, j] = <vectors[i], vectors[j]>`. """ return symmetric_matrix_inner_products(vectors, vectors, ip_function=ip_function) def vector_of_inner_products( base: ArrayTree, vectors: Sequence[ArrayTree], ip_function: Callable[[ArrayTree, ArrayTree], Array] = inner_product, ) -> Array: """Computes a vector of inner products with base. Args: base: A PyTree representing the base vector. vectors: A sequence of identically structured PyTrees, each one representing a single vector. ip_function: A callable which computes the inner product between PyTrees. Defaults to the standard dot-product. Returns: A vector `v` with elements `v[i] = <base, vectors[i]>`. """ v = [] for v_i in vectors: v.append(ip_function(v_i, base)) return jnp.asarray(v) def block_permuted( matrix: Array, block_sizes: Sequence[int], block_order: Sequence[int], ) -> Array: """Permutes whole blocks of the input matrix. Given a square matrix, this function splits it into blocks, each one having a size defined in `block_sizes` and permutes them, both in rows and columns. The permutation sends to the `i` slot the `block_order[i]` block of the input matrix. Example: matrix = [[A_0, B_0, C_0], [A_1, B_1, C_1], [A_2, B_2, C_2]] block_order = [2, 0, 1] => [[C_2, A_2, B_2], [C_0, A_0, B_0], [C_1, A_1, B_1]] Args: matrix: The matrix, whose blocks will be permuted. block_sizes: A sequences of each block's size. block_order: A sequence of the order of the blocks. Returns: The resulting matrix after permuting the blocks. """ if len(block_sizes) != len(block_order): raise ValueError( f"The length of `block_sizes` (=={len(block_sizes)} " f"and `block_order` (=={len(block_order)}) must be " "the same.") if all(i == j for i, j in enumerate(block_order)): return matrix indices = np.cumsum(block_sizes)[:-1] blocks = [jnp.split(row, indices, 1) for row in jnp.split(matrix, indices, 0)] reordered_blocks = [[blocks[i][j] for j in block_order] for i in block_order] return jnp.block(reordered_blocks) def norm(obj: ArrayTree) -> Array: """Computes the Euclidean norm of the provided PyTree object.""" elements_squared_norm = jax.tree_util.tree_map( lambda x: jnp.sum(jnp.square(x)), obj) return jnp.sqrt(sum(jax.tree_util.tree_leaves(elements_squared_norm))) def per_parameter_norm(obj: ArrayTree, key_prefix: str) -> ArrayTree: per_param_norm = jax.tree_util.tree_map(jnp.linalg.norm, obj) per_param_norm = tree.flatten_with_path(per_param_norm) return { key_prefix + "(" + "/".join(k) + ")": v for k, v in per_param_norm } def psd_inv_cholesky(matrix: Array) -> Array: """Computes the inverse of `matrix`, with matrix assumed PSD.""" if matrix.shape[:1] != matrix.shape[1:]: raise ValueError(f"Expected square matrix, but got shape {matrix.shape}.") identity = jnp.eye(matrix.shape[0], dtype=matrix.dtype) return linalg.solve(matrix, identity, assume_a="pos") def psd_matrix_norm( matrix: Array, norm_type: str = "avg_trace", method_2norm: str = "lobpcg", rng_key: Optional[PRNGKey] = None ) -> Array: """Computes one of several different matrix norms for PSD matrices. Args: matrix: a square matrix represented as a 2D array, a 1D vector giving the diagonal, or a 0D scalar (which gets interpreted as a 1x1 matrix). Must be positive semi-definite (PSD). norm_type: a string specifying the type of matrix norm. Can be "2_norm" for the matrix 2-norm aka the spectral norm, "avg_trace" for the average of diagonal entries, "1_norm" for the matrix 1-norm, or "avg_fro" for the Frobenius norm divided by the square root of the number of rows. method_2norm: a string specifying the method used to compute 2-norms. Can be "lobpcg" (recommended) or "power_iteration". rng_key: an optional JAX PRNGKey key to used initialize the lobpcg method for computing the 2-norm. Returns: A 0D scalar giving the requested norm. """ if norm_type == "2_norm": if matrix.ndim == 0: return matrix elif matrix.ndim == 1: return jnp.max(matrix) elif matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]: if method_2norm == "lobpcg": if rng_key is None: rng_key = jax.random.PRNGKey(123) v = jax.random.normal(rng_key, shape=[matrix.shape[0], 1]) return experimental_splinalg.lobpcg_standard( matrix, v, m=300, tol=1e-8)[0][0] elif method_2norm == "power_iteration": return optax.power_iteration( matrix, num_iters=300, error_tolerance=1e-7)[1] else: raise ValueError(f"Unrecognized method string: '{norm_type}'") else: raise ValueError(f"Unsupported shape for factor array: {matrix.shape}") elif norm_type == "avg_trace": if matrix.ndim == 0: return matrix elif matrix.ndim == 1: return jnp.sum(matrix) / matrix.shape[0] elif matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]: return jnp.trace(matrix) / matrix.shape[0] else: raise ValueError(f"Unsupported shape for factor array: {matrix.shape}") elif norm_type == "1_norm": if matrix.ndim == 0: return matrix elif matrix.ndim == 1: return jnp.max(matrix) elif matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]: return jnp.linalg.norm(matrix, ord=1) else: raise ValueError(f"Unsupported shape for factor array: {matrix.shape}") elif norm_type == "avg_fro": if matrix.ndim == 0: return matrix elif matrix.ndim == 1: return jnp.linalg.norm(matrix) / jnp.sqrt(matrix.shape[0]) elif matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]: return jnp.linalg.norm(matrix) / jnp.sqrt(matrix.shape[0]) else: raise ValueError(f"Unsupported shape for factor array: {matrix.shape}") else: raise ValueError(f"Unrecognized norm type: '{norm_type}'") def pi_adjusted_kronecker_factors( *factors: Array, damping: Numeric ) -> Tuple[Array, ...]: """Computes Kronecker factors with pi-adjusted factored damping. The `f1 kron f2 kron ... kron fn + damping * I` is not a Kronecker product in general, because of the added identity. [1] proposed a pi-adjusted factored damping approach to approximate it as a Kronecker product. [2] generalized this approach from two to tree factors, and [3] generalized it to arbitrary numbers of factors. This function implements the generalized approach. [1] - https://arxiv.org/abs/1503.05671 [2] - https://openreview.net/forum?id=SkkTMpjex [3] - https://ui.adsabs.harvard.edu/abs/2021arXiv210602925R/abstract Args: *factors: A list of factors represented as 2D arrays, vectors (which are interpreted as representing the diagonal of a matrix) or scalars (which are interpreted as being a 1x1 matrix). All factors must be PSD. damping: The weight of the identity added to the Kronecker product. Returns: A list of factors with the same length as `factors`, and with the same corresponding representations, whose Kronecker product approximates `(f1 kron f2 kron ... kron fn) + damping * I` according to the pi-adjusted factored-damping approach. """ # The implementation writes each single factor as `c_i u_i`, where the matrix # `u_i` is such that `trace(u_i) / dim(u_i) = 1`. We then factor out all the # scalar factors `c_i` into a single overall scaling coefficient and # distribute the damping to each single non-scalar factor `u_i` equally. norm_type = "avg_trace" norms = [psd_matrix_norm(f, norm_type=norm_type) for f in factors] # Compute the normalized factors `u_i`, such that Trace(u_i) / dim(u_i) = 1 us = [fi / ni for fi, ni in zip(factors, norms)] # kron(arrays) = c * kron(us) c = jnp.prod(jnp.array(norms)) damping = damping.astype(c.dtype) # pytype: disable=attribute-error # numpy-scalars def regular_case() -> Tuple[Array, ...]: non_scalars = sum(1 if f.size != 1 else 0 for f in factors) # We distribute the overall scale over each factor, including scalars if non_scalars == 0: # In the case where all factors are scalar we need to add the damping c_k = jnp.power(c + damping, 1.0 / len(factors)) else: c_k = jnp.power(c, 1.0 / len(factors)) # We distribute the damping only inside the non-scalar factors d_hat = jnp.power(damping / c, 1.0 / non_scalars) u_hats = [] for u in us: if u.size == 1: u_hat = jnp.ones_like(u) # damping not used in the scalar factors elif u.ndim == 2: u_hat = u + d_hat * jnp.eye(u.shape[0], dtype=u.dtype) else: # diagonal case assert u.ndim == 1 u_hat = u + d_hat u_hats.append(u_hat * c_k) return tuple(u_hats) def zero_case() -> Tuple[Array, ...]: # In the special case where for some reason one of the factors is zero, then # the we write each factor as `damping^(1/k) * I`. c_k = jnp.power(damping, 1.0 / len(factors)) u_hats = [] for u in us: if u.ndim == 2: u_hat = jnp.eye(u.shape[0], dtype=u.dtype) else: u_hat = jnp.ones_like(u) u_hats.append(u_hat * c_k) return tuple(u_hats) if get_special_case_zero_inv(): return lax.cond( jnp.greater(c, 0.0), regular_case, zero_case) else: return regular_case() def invert_psd_matrices( matrices: ArrayTree ) -> ArrayTree: """Inverts a PyTree of matrices. Args: matrices: A PyTree of 2D arrays, vectors (which are interpreted as representing the diagonal of a matrix) or scalars (which are interpreted as being a 1x1 matrix) representing the matrices to be inverted. All matrices must be PSD. Returns: A PyTree of matrices giving the inverses of the corresponding matrices passed as arguments (with the same respective representations). """ def invert_psd_matrix(m): if m.ndim == 2: return psd_inv_cholesky(m) assert m.ndim <= 1 return 1.0 / m return jax.tree_map(invert_psd_matrix, matrices) def pi_adjusted_kronecker_inverse( *factors: Array, damping: Numeric, ) -> Tuple[Array, ...]: """Computes pi-adjusted factored damping inverses. The inverse of `(f1 kron f2 kron ... kron fn) + damping * I` is not Kronecker factored in general, because of the added identity. [1] proposed a pi-adjusted factored damping approach to approximate the inverse as a Kronecker product. [2] generalized this approach from two to tree factors, and [3] generalized it to arbitrary numbers of factors. This function implements the generalized approach. [1] - https://arxiv.org/abs/1503.05671 [2] - https://openreview.net/forum?id=SkkTMpjex [3] - https://ui.adsabs.harvard.edu/abs/2021arXiv210602925R/abstract Args: *factors: A list of factors represented as 2D arrays, vectors (which are interpreted as representing the diagonal of a matrix) or scalars (which are interpreted as being a 1x1 matrix). All factors must be PSD. damping: The weight of the identity added to the Kronecker product. Returns: A list of factors with the same length as `factors`, and with the same corresponding representations, whose Kronecker product approximates the inverse of `(f1 kron f2 kron ... kron fn) + damping * I` according to the pi-adjusted factored-damping approach. """ return invert_psd_matrices( pi_adjusted_kronecker_factors(*factors, damping=damping)) # pytype: disable=bad-return-type def kronecker_product_axis_mul_v( factors: Sequence[Array], v: Array, axis_groups: Optional[Sequence[Sequence[int]]] = None, transpose: Union[bool, Sequence[bool]] = False, ): """Computes ``kron(*factors) rvec(v)`` where ``rvec`` is row-wise vectorization. Args: factors: The sequence of factors forming the Kronecker product. Must be square 2D arrays. v: A tensor whose vectorization will be multiplied by the Kronecker product. axis_groups: A list whose i-th element is a sequence of consecutive integers specifying the axes of the input tensor ``v`` that correspond to the i-th Kronecker factor. Passing ``None`` is equivalent to passing ``[[0],[1],[2],...]``. transpose: A single boolean or a sequence of booleans. If it is a sequence, each element specifies if the corresponding factor should be transposed. If it is a single boolean, specifies if all factors should be transposed. Returns: The result, shaped as a tensor, of multiplying the vectorization of the input tensor by the Kronecker-factored matrix. """ if axis_groups is None: axis_groups = tuple((i,) for i in range(v.ndim)) else: axis_groups = tuple(tuple(group) for group in axis_groups) # Sanity checks if sum(axis_groups, ()) != tuple(range(v.ndim)): raise ValueError(f"The `axis_groups={axis_groups}` are either not in " f"consecutive order or do not cover exactly the axis of " f"the input `v`..") if len(factors) != len(axis_groups): raise ValueError("The number of factors provided must be equal to the " "number of axis groups provided.") if isinstance(transpose, bool): transpose = [transpose] * len(factors) elif len(transpose) != len(factors): raise ValueError("The length of the transpose sequence must match the " "number of factors.") factor_strs = ["yz" if t else "zy" for t in transpose] general_str = _ALPHABET[:v.ndim] result = v for group, factor, f_str in zip(axis_groups, factors, factor_strs): # This flattens all axis in `group` of `result` into a single one. shape = v.shape[:min(group)] + (-1,) + v.shape[max(group) + 1:] vector = result.reshape(shape) # This contracts `result` with `factor` along the single axis. vector_str = general_str[:min(group)] + "y" + general_str[max(group) + 1:] result_str = vector_str.replace("y", "z") einsum_str = f"{f_str},{vector_str}->{result_str}" r_next = jnp.einsum(einsum_str, factor, vector) # This reshapes back to the original shape. result = r_next.reshape(v.shape) return result def kronecker_eigen_basis_axis_mul_v( q_factors: Sequence[Array], eigenvalues: Array, v: Array, axis_groups: Optional[Sequence[Sequence[int]]] = None, ): """Computes a matrix-vector product in a Kronecker product eigen-basis. The function computes: ``kron(*q_factors) diag(eigenvalues) kron(*q_factors)^T rvec(v)`` where all variables are appropriately sized matrices and ``rvec`` is row-wise vectorization. The computation is related to the usual Kronecker product ``kron(*factors) rvec(v)``, if ``factors`` are all symmetric PSD matrices and ``q_factors`` are the matrices of eigenvectors of ``factors`` and ``eigenvalues`` is the kronecker product of the eigenvalues of ``factors``. However, the function does not assume that its inputs are of this form. Args: q_factors: A sequence of the orthonormal basis of eigenvectors of each Kronecker factor. eigenvalues: A tensor containing the eigenvalues (e.g. the Kronecker product of eigenvalues of all factors). v: The input vector as a tensor. axis_groups: A list whose i-th element is a sequence of consecutive integers specifying the axes of the input tensor ``v`` that correspond to the i-th Kronecker factor. Passing ``None`` is equivalent to passing ``[[0],[1],[2],...]``. Returns: The result of multiplying the input vector by the Kronecker product of the factors, shaped as a tensor. """ q_proj_v = kronecker_product_axis_mul_v(q_factors, v, axis_groups, True) if eigenvalues.shape != q_proj_v.shape: raise ValueError("The eigenvalues array should have the same shape as the " "projection of `v` onto `kron(*factors)`.") eig_weighted_v = eigenvalues * q_proj_v return kronecker_product_axis_mul_v(q_factors, eig_weighted_v, axis_groups) def kronecker_product_mul_v( a: Array, b: Array, v: Array, a_is_symmetric: bool, ) -> Array: """Computes `unvec[(a kron b) vec(v)]` for correctly sized input matrices.""" del a_is_symmetric # not used return kronecker_product_axis_mul_v([b, a], v) def kronecker_eigen_basis_mul_v( q_a: Array, q_b: Array, eigenvalues: Array, v: Array, ) -> Array: """Computes a matrix-vector product in a Kronecker product eigen-basis. The function computes: `(q_a kron q_b) diagonal(eigenvalues) (q_a kron q_b)^T vec(v)` where all variables are appropriately sized matrices. The computation is related to the usual Kronecker product `(a kron b) vec(v)`, if `a` and `b` are symmetric matrices and `q_a` and `q_b` are the matrices of eigenvectors of `a` and `b` and `eigenvalues` is the outer product of the eigenvalues of `a` and `b`. However, the function does not assume anything about the `eigenvalues` and allows for any dense matrix. Args: q_a: An orthonormal basis for eigenvectors of the first Kronecker factor. q_b: An orthonormal basis for eigenvectors of the second Kronecker factor. eigenvalues: A matrix containing the eigenvalues (e.g. the product of eigenvalues of both factors). v: The input vector as a matrix. Returns: The result of the matrix-vector product. """ return kronecker_eigen_basis_axis_mul_v([q_b, q_a], eigenvalues, v) def _host_eigh(x: Array, *_) -> Tuple[Array, Array]: """This calls the CPU numpy function for eigh.""" shape_s = jax.ShapeDtypeStruct(x.shape[:-1], x.dtype) shape_q = jax.ShapeDtypeStruct(x.shape, x.dtype) return jax.pure_callback(np.linalg.eigh, (shape_s, shape_q), x) def _eigh( x: Array, force_on_host: bool = False, ) -> Tuple[Array, Array]: """Computes eigenvectors and eigenvalues, with optionally offloading to cpu.""" if force_on_host: return _host_eigh(x) s, q = jnp.linalg.eigh(x) # Recently with CUDA 11.7 there is a bug in cuSOLVER which makes the eigh # implementation unstable sometimes on GPUs. return jax.lax.cond( jnp.any(jnp.isnan(s)), _host_eigh, lambda *args: args[1:], x, s, q ) def safe_psd_eigh( x: Array, force_on_host: bool = False, ) -> Tuple[Array, Array]: """Computes the eigenvalue decomposition for a PSD matrix. The function is similar to `jax.numpy.linalg.eigh`, but it clips the returned eigenvalues to always be non-negative, which we know mathematically holds for PSD matrices, but due to numerical errors `jax.numpy.linalg.eigh` could return negative values. Args: x: The input matrix, assumed to be PSD. force_on_host: If `True` will perform the computation on the host CPU. Returns: A pair of (eigenvalues, eigenvectors) arrays. """ d = x.shape[0] # Here we are handling the case of NaNs separately, because in some versions # of cuda and cudablas they can cause a runtime error. s, q = lax.cond( jnp.any(jnp.isnan(x)), lambda _: (jnp.full([d], jnp.nan, dtype=x.dtype), # pylint: disable=g-long-lambda jnp.full([d, d], jnp.nan, dtype=x.dtype)), functools.partial(_eigh, force_on_host=force_on_host), x, ) # The matrix is PSD by construction, but numerical inaccuracies can produce # slightly negative eigenvalues. Hence, clip at zero. return jnp.clip(s, a_min=0.0), q def loop_and_parallelize_average( func: Callable[..., ArrayTree], max_parallel_size: int, ) -> Callable[..., ArrayTree]: """Returns a function that computes the average of `func` over any arguments. The returned function is mathematically equivalent to jnp.mean(jax.vmap(func)(*args), axis=0). However, naively using the above code could lead to prohibitively large memory usage, as it scales linearly with the leading axis size of `args`, because of `jax.vmap`. To amortize the memory cost, if the leading axis has size larger than `max_parallel_size`, we call multiple times `vmap` in a loop via `scan` by splitting the arguments to multiple chunks. This allows to trade off memory usage for the cost of compute time. Args: func: A function that computes a singleton output. max_parallel_size: The maximum number of elements that are allowed to be part of a single call to `jax.vmap`. Returns: A function that computes the averaged output of `func` over the leading axis of its arguments. """ vmap_fn = jax.vmap(func) @functools.wraps(func) def average_func(*args) -> ArrayTree: lead_axis_sizes = set(x.shape[0] for x in jax.tree_util.tree_leaves(args)) if not lead_axis_sizes: raise ValueError("You must pass in at least one argument with a PyTree " "leaf node.") elif len(lead_axis_sizes) != 1: raise ValueError(f"Inconsistent leading axis sizes seen: " f"{lead_axis_sizes!r}.") leading_size = next(iter(lead_axis_sizes)) singleton_args = jax.tree_util.tree_map(lambda _x: _x[0], args) _, output_tree = jax.make_jaxpr(func, return_shape=True)(*singleton_args) singleton_size = sum(x.size for x in jax.tree_util.tree_leaves(output_tree)) output_size = singleton_size * leading_size # Compute the loop size and any remainder size if max_parallel_size is None or output_size <= max_parallel_size: parallel_size = leading_size else: parallel_size = max( min(max_parallel_size // singleton_size, leading_size), 1) # The arguments have to be split into chunks along their leading axis, # however since `jax.scan` does not support inputs with different size, # if the leading axis is not divisible by the parallel_size, we need to # separately compute the values for the last remaining arguments chunks. num_parallel_chunks = leading_size // parallel_size remainder_size = leading_size % parallel_size all_chunks_size = leading_size - remainder_size # Index to get the loop arguments loop_args = jax.tree_util.tree_map(lambda x: x[:all_chunks_size], args) if num_parallel_chunks == 1: averaged_value = jnp.mean(vmap_fn(*loop_args), axis=0) else: def scan_fn(accumulator, args_): vmap_value = vmap_fn(*args_) avg_value = jax.tree_util.tree_map( lambda x: jnp.mean(x, axis=0), vmap_value) return jax.tree_util.tree_map(jnp.add, accumulator, avg_value), None loop_shape = (num_parallel_chunks, parallel_size) loop_args = jax.tree_util.tree_map( lambda x: x.reshape(loop_shape + x.shape[1:]), loop_args) summed_value, _ = jax.lax.scan( scan_fn, init=jax.tree_util.tree_map( jnp.zeros_like, output_tree), xs=loop_args) averaged_value = scalar_div(summed_value, num_parallel_chunks) if remainder_size == 0: return averaged_value # Index to get the remainder arguments remainder_args = jax.tree_util.tree_map(lambda x: x[all_chunks_size:], args) remainder_value = jnp.mean(vmap_fn(*remainder_args), axis=0) avg_weight = all_chunks_size / leading_size remainder_weight = remainder_size / leading_size return weighted_sum_of_objects( [averaged_value, remainder_value], [avg_weight, remainder_weight]) return average_func
kfac-jax-main
kfac_jax/_src/utils/math.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC for accumulating statistics.""" from typing import Any, Optional, Generic import jax import jax.numpy as jnp from kfac_jax._src.utils import misc from kfac_jax._src.utils import parallel from kfac_jax._src.utils import types Array = types.Array Numeric = types.Numeric Shape = types.Shape DType = types.DType ArrayTree = types.ArrayTree TArrayTree = types.TArrayTree @misc.register_state_class class WeightedMovingAverage(Generic[TArrayTree], misc.State): """A wrapped class for an arbitrary weighted moving average.""" weight: Numeric raw_value: Optional[TArrayTree] @property def value(self) -> TArrayTree: """The value of the underlying arrays data structure.""" return jax.tree_util.tree_map(lambda x: x / self.weight, self.raw_value) def update( self, value: TArrayTree, old_weight_multiplier: Numeric, new_weight: Numeric, ): """Updates the underlying array and weight accordingly.""" if self.raw_value is None: self.raw_value = value self.weight = jnp.asarray(new_weight).astype(self.weight.dtype) else: self.weight = self.weight * old_weight_multiplier + new_weight self.raw_value = jax.tree_util.tree_map( lambda x, y: x * old_weight_multiplier + y * new_weight, self.raw_value, value, ) def sync(self, pmap_axis_name: Optional[str]): """Syncs the underlying array across devices.""" if self.raw_value is None: raise ValueError("`raw_value` has not been set yet.") self.raw_value = parallel.pmean_if_pmap(self.raw_value, pmap_axis_name) def clear(self, value_to_none: bool = False): """Resets the weighted average.""" self.weight = jnp.zeros_like(self.weight) self.raw_value = None if value_to_none else jnp.zeros_like(self.raw_value) def value_and_clear(self) -> TArrayTree: """Retrieves the value of the weighted average and clears it.""" value = self.value self.clear() return value @classmethod def zeros_array( cls, shape: Shape, dtype: Optional[DType] = None, ) -> "WeightedMovingAverage[Array]": """Initializes a `WeightedMovingAverage` with a single array of zeros.""" return cls( # pytype: disable=wrong-keyword-args weight=jnp.zeros([], dtype=dtype), raw_value=jnp.zeros(shape, dtype=dtype), ) @classmethod def zeros_like(cls, value: TArrayTree) -> "WeightedMovingAverage[TArrayTree]": """Initializes a `WeightedMovingAverage` with zeros structure like `value`.""" return cls( # pytype: disable=wrong-keyword-args weight=jnp.array( 0.0, dtype=types.get_float_dtype_and_check_consistency(value) ), raw_value=jax.tree_util.tree_map(jnp.zeros_like, value), ) @classmethod def empty(cls, dtype: Optional[DType] = None) -> "WeightedMovingAverage[Any]": """Returns an empty moving average instance.""" weight = jnp.zeros([]) if dtype is None else jnp.zeros([], dtype=dtype) return WeightedMovingAverage(weight=weight, raw_value=None) class MultiChunkAccumulator(Generic[TArrayTree]): """Statistics accumulation, abstracted over multiple chunks.""" def __init__( self, init_obj_value: Optional[TArrayTree], weight: Numeric, multi_device: bool, ): """Initializes an accumulator instance with the provided object and counter. Args: init_obj_value: The initial value of the accumulator. weight: The initial weight, which specifies how many samples are assumed to have been already counted in the initial value of the accumulator. multi_device: Whether the objects that are accumulated are outputs of a multi-device computation (e.g. `jax.pmap`). """ self._accumulator = init_obj_value self._weight = weight self._multi_device = multi_device @property def accumulator(self) -> TArrayTree: """The current value of the underlying not-normalized accumulator.""" return self._accumulator @property def weight(self) -> Numeric: """The current normalization weight of the underlying accumulator.""" return self._weight @property def multi_device(self) -> bool: """Whether the accumulator is the output of a multi-device computation.""" return self._multi_device @property def value(self) -> TArrayTree: """The current normalized value of the accumulator.""" if types.tree_is_empty(self.accumulator): return self.accumulator if self._multi_device: return parallel.pmap_sync_and_divide_value(self.accumulator, self.weight) else: return parallel.jit_sync_and_divide_value(self.accumulator, self.weight) def clear(self) -> None: """Sets the underlying accumulator and weight to `None`.""" self._accumulator = None self._weight = None def value_and_clear(self) -> TArrayTree: """Retrieves the normalized value of the accumulator and clears it.""" value = self.value self.clear() return value def add(self, value_obj: TArrayTree, weight: Numeric = 1): """Adds an element to the moving average and the max. The exact update equation for the statistics are: raw_value_t = raw_value_{t-1} + value_obj * weight weight_t = weight_{t-1} + weight Args: value_obj: The value of the object, which scaled by `weight` will be added to the accumulator. weight: The relative weight of the `value_obj`. """ value_obj = jax.tree_util.tree_map(lambda x: x * weight, value_obj) if self._accumulator is None: self._accumulator = value_obj if isinstance(weight, types.SCALAR_TYPES): self._weight = jnp.full_like(self._weight, weight) elif not isinstance(weight, jax.Array): raise ValueError("`weight` should be an instance of float, int or " "jax.Array.") elif self._weight.shape != weight.shape: # pytype: disable=attribute-error # numpy-scalars raise ValueError("If `weight` is an `jnp.ndarray` then should have the " "same shape as the weight of the accumulator.") else: self._weight = weight return if not types.tree_is_empty(self._accumulator): if types.tree_is_empty(value_obj): raise ValueError("The provided `value_obj` has an empty PyTree " "structure, but the accumulator has been initialized " "with a non-empty PyTree object.") self._accumulator = jax.tree_util.tree_map( jnp.add, self._accumulator, value_obj) elif not types.tree_is_empty(value_obj): raise ValueError("The provided `value_obj` has a non-empty PyTree " "structure, but the accumulator has been initialized " "with an empty PyTree object.") self._weight = self._weight + weight @classmethod def zeros_like( cls, obj: TArrayTree, multi_device: bool ) -> "MultiChunkAccumulator[TArrayTree]": """Creates a zero initialized accumulator as `obj`.""" if multi_device: value = (parallel.pmap_zeros_like(obj) if not types.tree_is_empty(obj) else obj) weight = parallel.replicate_all_local_devices( jnp.zeros([], dtype=jnp.int32)) else: value = (parallel.jit_zeros_like(obj) if not types.tree_is_empty(obj) else obj) weight = jnp.zeros([], dtype=jnp.int32) return cls(value, weight, multi_device) @classmethod def empty(cls, multi_device: bool) -> "MultiChunkAccumulator[Any]": """Creates an empty accumulator.""" weight = jnp.zeros([], dtype=jnp.int32) if multi_device: weight = parallel.replicate_all_local_devices(weight) return cls(None, weight, multi_device) def __repr__(self): return (f"{self.__class__.__name__}({self._accumulator!r}, " f"{self._weight!r}, {self._multi_device})") def copy(self): """Returns a copy of the PyTree structure (but not the JAX arrays).""" (flattened, structure) = jax.tree_util.tree_flatten(self) return jax.tree_util.tree_unflatten(structure, flattened) jax.tree_util.register_pytree_node( MultiChunkAccumulator, lambda x: ((x.accumulator, x.weight), (x.multi_device,)), lambda fixed, arrays: MultiChunkAccumulator(*arrays, *fixed) )
kfac-jax-main
kfac_jax/_src/utils/accumulators.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC utilities for multi-device execution.""" import functools from typing import Callable, Optional, Sequence import jax from jax import core from jax import lax import jax.numpy as jnp from kfac_jax._src.utils import types Array = types.Array Numeric = types.Numeric PRNGKey = types.PRNGKey TArrayTree = types.TArrayTree # TODO(jamesmartens,botev): add a test for this function? def in_pmap(axis_name: Optional[str]) -> bool: """Returns whether we are in a pmap with the given axis name.""" if axis_name is None: return False try: # The only way to know if we are under `jax.pmap` is to check if the # function call below raises a `NameError` or not. core.axis_frame(axis_name) return True except NameError: return False def wrap_if_pmap( p_func: Callable[[TArrayTree, str], TArrayTree], ) -> Callable[[TArrayTree, Optional[str]], TArrayTree]: """Wraps `p_func` to be executed only when inside a `jax.pmap` context.""" @functools.wraps(p_func) def p_func_if_pmap(obj: TArrayTree, axis_name: Optional[str]) -> TArrayTree: return p_func(obj, axis_name) if in_pmap(axis_name) else obj return p_func_if_pmap pmean_if_pmap = wrap_if_pmap(lax.pmean) psum_if_pmap = wrap_if_pmap(lax.psum) compute_mean = jax.pmap(lambda x: lax.pmean(x, "i"), axis_name="i") compute_sum = jax.pmap(lambda x: lax.psum(x, "i"), axis_name="i") def index_if_not_scalar(value: Numeric, index: int = 0) -> Numeric: """Index `value` at axis 0 if it is not a scalar, otherwise return it.""" if isinstance(value, Array): if value.ndim > 0: # pytype: disable=attribute-error # numpy-scalars return value[index] else: return value elif isinstance(value, (float, int)): return value else: raise ValueError("The input should be an instance of `Numeric`.") def get_first(obj: TArrayTree) -> TArrayTree: """Index the PyTree leaves `x` of `obj` by `x[0]` if they are not scalars.""" return jax.tree_util.tree_map(index_if_not_scalar, obj) def get_mean(obj: TArrayTree) -> TArrayTree: """Returns the average of `obj` over different devices.""" return get_first(compute_mean(obj)) def get_sum(obj: TArrayTree) -> TArrayTree: """Returns the sum of `obj` over different devices.""" return get_first(compute_sum(obj)) broadcast_all_local_devices = jax.pmap(lambda x: x) pmap_zeros_like = jax.pmap(lambda x: jax.tree_util.tree_map(jnp.zeros_like, x)) jit_zeros_like = jax.jit(lambda x: jax.tree_util.tree_map(jnp.zeros_like, x)) def replicate_all_local_devices(obj: TArrayTree) -> TArrayTree: """Replicates `obj` to all local Jax devices.""" if types.tree_is_empty(obj): return obj return jax.device_put_replicated(obj, devices=jax.local_devices()) def make_different_rng_key_on_all_devices(rng: PRNGKey) -> PRNGKey: """Makes a different PRNG for all Jax devices and processes.""" rng = jax.random.fold_in(rng, jax.process_index()) rng = jax.random.split(rng, jax.local_device_count()) return broadcast_all_local_devices(rng) p_split = jax.pmap(lambda key: tuple(jax.random.split(key))) p_split_num = jax.pmap(lambda key, num: tuple(jax.random.split(key, num)), static_broadcasted_argnums=1) default_device_sync = None def host_sync( obj: TArrayTree, sync_op: Callable[[TArrayTree, str], TArrayTree], ) -> TArrayTree: """Syncs `obj` across multiple hosts with the operation `sync_op`.""" # The implementation here is to use the pmap syncing mechanisms but with only # the default device of each host. Technically we could do this with all # the devices on each host, but that would possibly be wasteful. if jax.process_count() > 1: # We set default_device_sync here because calling jax.local_devices during # the library import stage will break JAX. global default_device_sync if default_device_sync is None: default_devices = [jax.local_devices(process_index=p_idx)[0] for p_idx in range(jax.process_count())] default_device_sync = jax.pmap(lambda x, sync_op: sync_op(x, "i"), devices=default_devices, axis_name="i", static_broadcasted_argnums=1) obj = jax.tree_util.tree_map(lambda x: jnp.expand_dims(x, axis=0), obj) return get_first(default_device_sync(obj, sync_op)) return obj def host_all_gather(x: TArrayTree) -> TArrayTree: """Gathers on every host the values of the PyTree leaves `x`.""" return host_sync(x, lax.all_gather) def host_mean(x: TArrayTree) -> TArrayTree: """Computes the mean of the PyTree leaves of `x` over multiple hosts.""" return host_sync(x, lax.pmean) def sync_and_divide_value( value: TArrayTree, counter: Numeric, axis_name: Optional[str] = None, ) -> TArrayTree: """Computes the mean of `value` over all hosts and divides it by `counter`.""" value = jax.tree_util.tree_map(lambda x: x / counter, value) return pmean_if_pmap(value, axis_name) jit_sync_and_divide_value = jax.jit(sync_and_divide_value) pmap_sync_and_divide_value = jax.pmap( functools.partial(sync_and_divide_value, axis_name="i"), axis_name="i", ) # We might be able to change this to "return jnp.array(x)" in newer JAX # versions. Or maybe we can use jnp.copy now? def copy_array(x: Array) -> Array: """Copies a Jax array so that it can be donated freely.""" return x + jnp.zeros_like(x) copy_obj = jax.jit(lambda x: jax.tree_util.tree_map(copy_array, x)) _pmap_copy_obj = jax.pmap(copy_obj) def pmap_copy_obj(x: Optional[TArrayTree]) -> Optional[TArrayTree]: # pmap will fail to work if passed a totally empty tree if x is None: return None if types.tree_is_empty(x): # this does a shallow copy of the tree similar to .copy(): (flattened, structure) = jax.tree_util.tree_flatten(x) return jax.tree_util.tree_unflatten(structure, flattened) return _pmap_copy_obj(x) def distribute_thunks( thunks: Sequence[Callable[[], TArrayTree]], pmap_axis_name: str, ) -> TArrayTree: """Distributes the computation of a list of thunks over the pmapped devices. Given a list of thunks, this function distributes their computation over the devices of the current pmap in a round-robin fashion, syncronizes the results across devices, and then returns them as a sequence of PyTrees. Note that this function is meant to be used in a compiled context, and may call ``thunk[i]()`` several times for each i, with all but one call getting "optimized away" by XLA. Args: thunks: A sequence of callables performing the desired computations. Each callable must take zero arguments and return a PyTree of JAX arrays. As with callables passed to (most) standard JAX API functions, these need to be stateless and free of side effects. The output of each callable must be the same regardless of the device it is executed on. pmap_axis_name: The name of the pmap axis to use. Returns: A sequence of PyTrees that are the output of the corresponding element of ``thunks``. """ # The strategy here is to make a callable for each device which executes only # the thunks i such that i % total_devices == device_index, and returns a tree # of zeros for the remaining thunks. We then do a lax.switch over these based # on device_index, and return psum over these. Note that the more obvious way # of doing this, which is to perform a psum over the output of a sequence of # lax.cond calls (with one for each thunk), won't work in general. This is # because in order to save memory, XLA will sometimes elect to execute these # conds sequentially instead of in parallel. if not in_pmap(pmap_axis_name): raise ValueError(f"Provided pmap_axis_name {pmap_axis_name} is not a valid " "pmap axis in current pmap (or this function was not " "called in a pmap).") assert pmap_axis_name is not None total_devices = lax.psum(1, axis_name=pmap_axis_name) # returns a constant current_device_index = lax.axis_index(pmap_axis_name) # This should get optimized away by XLA since we don't use the values: dummy_output_trees = tuple(thunk() for thunk in thunks) def make_branch(device_index): def branch(): """Execute only thunks i such that i % total_devices == device_index.""" outs = [] for i in range(len(thunks)): if i % total_devices == device_index: outs.append(thunks[i]()) else: outs.append( jax.tree_util.tree_map(jnp.zeros_like, dummy_output_trees[i])) return tuple(outs) return branch branches = tuple(make_branch(device_index) for device_index in range(total_devices)) output_trees = jax.lax.switch(current_device_index, branches) return jax.lax.psum(output_trees, axis_name=pmap_axis_name)
kfac-jax-main
kfac_jax/_src/utils/parallel.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """K-FAC utilities for classes with staged methods.""" import functools import numbers import operator from typing import Any, Callable, Optional, Sequence, Tuple, Union import jax import jax.numpy as jnp from kfac_jax._src.utils import misc from kfac_jax._src.utils import parallel from kfac_jax._src.utils import types TArrayTree = types.TArrayTree class WithStagedMethods(misc.Finalizable): """An mixin for classes which can have staged/compiled methods.""" class StagingContext: """A context manager for handling methods that are staged/compiled.""" def __init__(self, wsm_instance: "WithStagedMethods"): """Initializes the context manager. Args: wsm_instance: The corresponding `WithStagedMethods` instance. """ self._wsm_instance = wsm_instance def __enter__(self): """Enters the staging context.""" if self._wsm_instance._in_staging: raise RuntimeError("Cannot enter staging context while already in " "staging context.") self._wsm_instance._in_staging = True def __exit__(self, *_): """Exits the staging context.""" assert self._wsm_instance._in_staging, "Exiting while not in staging." self._wsm_instance._in_staging = False def __init__( self, multi_device: bool = False, pmap_axis_name: Optional[str] = None, debug: bool = False, **parent_kwargs: Any, ): """Initializes the instance. Args: multi_device: Whether any of decorated staged methods are to be run on a single or multiple devices. If this is set to `True` than any call would internally be delegated to `jax.pmap` and otherwise to `jax.jit`. pmap_axis_name: The name of the pmap axis to use when running on multiple devices. This is required if `multi_device=True`. debug: If this is set `True` than any call to a stage method would directly call the method and would not stage/compile it. **parent_kwargs: Any additional keyword arguments for the parent class. """ if "excluded_attribute_names" in parent_kwargs: parent_kwargs["excluded_attribute_names"] = ( ("_in_staging",) + tuple(parent_kwargs["excluded_attribute_names"])) else: parent_kwargs["excluded_attribute_names"] = ("_in_staging",) super().__init__(**parent_kwargs) if multi_device and not isinstance(pmap_axis_name, str): raise ValueError("When `multi_device=True` you must pass in a string for " "`pmap_axis_name`.") self._multi_device = multi_device self._pmap_axis_name = pmap_axis_name self._debug = debug self._in_staging = False @property def multi_device(self) -> bool: """Indicates whether staged method will be run across multiple devices.""" return self._multi_device @property def pmap_axis_name(self) -> Optional[str]: """The name of the `jax.pmap` axis to use for staged methods.""" return self._pmap_axis_name @property def debug(self) -> bool: """Whether staged methods would be run in 'debug' mode.""" return self._debug @property def in_staging(self) -> bool: """Whether we are in a staging context while compiling staged methods.""" return self._in_staging def staging_context(self) -> "StagingContext": """Returns a staging context manager, linked to this instance.""" return self.StagingContext(self) def get_first(self, obj: TArrayTree) -> TArrayTree: """Indexes the `obj` PyTree leaves over leading axis if `multi_device`.""" return parallel.get_first(obj) if self.multi_device else obj def copy_obj(self, obj: Optional[TArrayTree]) -> Optional[TArrayTree]: """Copies the object.""" if self.multi_device: return parallel.pmap_copy_obj(obj) else: return parallel.copy_obj(obj) def replicate(self, obj: TArrayTree) -> TArrayTree: """Replicates the object to all local devices if `multi_device`.""" if self.multi_device: return parallel.replicate_all_local_devices(obj) else: return obj def staged( method: Callable[..., TArrayTree], static_argnums: Optional[Union[int, Sequence[int]]] = None, donate_argnums: Optional[Union[int, Sequence[int]]] = None, ) -> Callable[..., TArrayTree]: """Makes the instance method staged. This decorator **should** only be applied to instance methods of classes that inherit from the `WithStagedMethods` class. The decorator makes the decorated method staged, which is equivalent to `jax.jit` if `instance.multi_device` is `False` and to `jax.pmap` otherwise. When specifying static and donated argunms, the `self` reference **must not** be counted. Example: @functools.partial(staged, donate_argunms=0) def try(self, x): ... then `instance.try(x)` is equivalent to `jax.jit(instance.try, donate_argnums=0)(x)` if `instance.multi_device` is `False` and to `jax.pmap(instance.try, donate_argnums=0)(x)` otherwise. Args: method: The method to be transformed into a staged method. static_argnums: The static argument numbers, as defined in `jax.jit/pmap`. donate_argnums: The donated argument numbers, as defined in `jax.jit/pmap`. Returns: The transformed method, which will now be a staged function. """ if isinstance(static_argnums, int): static_argnums = (static_argnums,) # This is needed because of b/147015762 if donate_argnums is None: donate_argnums = () if isinstance(donate_argnums, int): donate_argnums = (donate_argnums,) else: donate_argnums: Tuple[int, ...] = tuple(donate_argnums) bcast_argnums = static_argnums or () # shift static_argnums by 1 and include instance (self) static_argnums = (0,) + tuple(i + 1 for i in (static_argnums or ())) # shift donate_argnums by 1 and include state donate_argnums = tuple(i + 1 for i in donate_argnums) pmap_funcs = {} jitted_func = jax.jit(method, static_argnums=static_argnums, donate_argnums=donate_argnums) @functools.wraps(method) def decorated(instance: "WithStagedMethods", *args: Any) -> TArrayTree: if instance.in_staging: return method(instance, *args) with instance.staging_context(): if instance.multi_device and instance.debug: # In this case we want to call `method` once for each device index. # Note that this might not always produce sensible behavior, and will # depend on the details of the method and if it has side effects on the # state of the class. outs = [] non_bcast_args = [args[i] if i not in bcast_argnums else None for i in range(len(args))] for i in range(jax.local_device_count()): non_bcast_args_i = jax.tree_util.tree_map( operator.itemgetter(i), non_bcast_args) args_i = [ non_bcast_args_i[j] if j not in bcast_argnums else args[j] for j in range(len(args)) ] outs.append(method(instance, *args_i)) outs = jax.tree_util.tree_map(lambda *args_: jnp.stack(args_), *outs) elif instance.debug: outs = method(instance, *args) elif instance.multi_device: # Compute in_axes so we broadcast any argument that is a scalar in_axes = [None] for i in range(len(args)): if (isinstance(args[i], numbers.Number) or (isinstance(args[i], jax.Array) and not args[i].shape)): # Single scalar in_axes.append(None) else: in_axes.append(0) in_axes = tuple(in_axes) key = (instance.pmap_axis_name, in_axes) func = pmap_funcs.get(key) if func is None: func = jax.pmap( method, static_broadcasted_argnums=static_argnums, donate_argnums=donate_argnums, axis_name=instance.pmap_axis_name, in_axes=in_axes, ) pmap_funcs[key] = func outs = func(instance, *args) else: outs = jitted_func(instance, *args) return outs return decorated
kfac-jax-main
kfac_jax/_src/utils/staging.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module with models used for testing.""" import functools from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union import distrax import haiku as hk import jax import jax.numpy as jnp import kfac_jax tags = kfac_jax.layers_and_loss_tags loss_functions = kfac_jax.loss_functions utils = kfac_jax.utils Array = kfac_jax.utils.Array PRNGKey = kfac_jax.utils.PRNGKey LayerValues = List[Tuple[Array, Array]] LayerInputs = Tuple[Array, LayerValues, Optional[Tuple[Array, ...]]] LossOutputs = Union[ List[List[Array]], List[Array], Tuple[List[Array], LayerValues] ] def _extract_params( instance: hk.Module, names: Sequence[str], ) -> Tuple[Array, Optional[Array]]: """Extracts the weights and bias parameters or `None` if don't exists.""" params = [None] * len(names) for name, v in instance.params_dict().items(): found = False for i, k in enumerate(names): # In the tests, parameter names are guaranteed to have the form # 'layer_name/parameter_name'. if "/" + k in name: params[i] = v found = True break if not found: raise ValueError(f"Did not match parameter {name}.") assert len(params) == 2 and params[0] is not None return tuple(params) class _Linear(hk.Linear): """A linear layer which also can register and return intermediate values.""" def __init__( self, *args: Any, explicit_tagging: bool = False, **kwargs: Any, ): """Initializes the instance. Args: *args: Arguments to pass to the `hk.Linear` constructor. explicit_tagging: Whether to explicitly tag the computation of the layer with a `dense_tag`. **kwargs: Keywords arguments to pass to the `hk.Conv2D` constructor. """ self._explicit_tagging = explicit_tagging super().__init__(*args, **kwargs) def __call__(self, inputs: LayerInputs, *_) -> LayerInputs: # pytype: disable=signature-mismatch # overriding-parameter-name-checks jax_version = tuple(map(int, jax.__version__.split(".")[:3])) x, layer_values, aux = inputs y = super().__call__(x, precision=jax.lax.Precision.HIGHEST) if aux is not None: y, aux = y + aux[0], aux[1:] if self._explicit_tagging: params = _extract_params(self, ("w", "b")) if jax_version < (0, 4, 14): preferred_element_type = None else: assert all(p.dtype == y.dtype for p in params if p is not None) preferred_element_type = y.dtype y = tags.register_dense( y, x, *params, dimension_numbers=(((1,), (0,)), ((), ())), precision=(jax.lax.Precision.HIGHEST, jax.lax.Precision.HIGHEST), preferred_element_type=preferred_element_type, ) layer_values.append((x, y)) return y, layer_values, aux class _Conv2D(hk.Conv2D): """A conv2d layer which also can register and return intermediate values.""" def __init__( self, *args: Any, explicit_tagging: bool = False, **kwargs: Any, ): """Initializes the instance. Args: *args: Arguments to pass to the `hk.Conv2D` constructor. explicit_tagging: Whether to explicitly tag the computation of the layer with a `dense_tag`. **kwargs: Keywords arguments to pass to the `hk.Conv2D` constructor. """ self._explicit_tagging = explicit_tagging super().__init__(*args, **kwargs) def __call__(self, inputs: LayerInputs, *_) -> LayerInputs: x, layer_values, aux = inputs y = super().__call__(x, precision=jax.lax.Precision.HIGHEST) if aux is not None: y, aux = y + aux[0], aux[1:] layer_values.append((x, y)) if self._explicit_tagging: params = _extract_params(self, ("w", "b")) y = tags.register_conv2d( y, x, *params, batch_group_count=1, dimension_numbers=jax.lax.ConvDimensionNumbers( lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2), ), feature_group_count=1, lhs_dilation=(1, 1), padding=((0, 1), (0, 1)), precision=(jax.lax.Precision.HIGHEST, jax.lax.Precision.HIGHEST), preferred_element_type=None, rhs_dilation=(1, 1), window_strides=self.stride, ) return y, layer_values, aux class _LayerNorm(hk.LayerNorm): """A layer norm layer which can register and return intermediate values.""" def __init__( self, *args: Any, explicit_tagging: bool = False, **kwargs: Any, ): """Initializes the instance. Args: *args: Arguments to pass to the `hk.LayerNorm` constructor. explicit_tagging: Whether to explicitly tag the computation of the layer with a `dense_tag`. **kwargs: Keywords arguments to pass to the `hk.Conv2D` constructor. """ self._explicit_tagging = explicit_tagging super().__init__(*args, create_scale=True, create_offset=True, **kwargs) def __call__(self, inputs: LayerInputs, *_) -> LayerInputs: # pytype: disable=signature-mismatch # jax-ndarray x, layer_values, aux = inputs mean = jnp.mean(x, axis=self.axis, keepdims=True) variance = jnp.var(x, axis=self.axis, keepdims=True) param_shape = x.shape[-1:] scale = hk.get_parameter("scale", param_shape, x.dtype, init=self.scale_init) offset = hk.get_parameter("offset", param_shape, x.dtype, init=self.offset_init) scale = jnp.broadcast_to(scale, x.shape) offset = jnp.broadcast_to(offset, x.shape) mean = jnp.broadcast_to(mean, x.shape) rsqrt = jax.lax.rsqrt(variance + self.eps) # This is specially implemented to preserve correct ordering in the jaxpr multiplier = scale * rsqrt diff = x - mean y = multiplier * diff + offset normalized_inputs = diff * rsqrt if aux is not None: y, aux = y + aux[0], aux[1:] layer_values.append((normalized_inputs, y)) if self._explicit_tagging: params = _extract_params(self, ("scale", "offset")) y = tags.register_scale_and_shift(y, normalized_inputs, *params) return y, layer_values, aux class _VanillaRNN(hk.VanillaRNN): """Modified Vanilla RNN to handle layer values and auxiliary inputs.""" def __init__( self, hidden_size: int, activation: Callable[[LayerInputs], LayerInputs], explicit_tagging: bool = False, double_bias: bool = True, name: Optional[str] = None ): super().__init__(hidden_size, double_bias, name=name) self.activation = activation self.explicit_tagging = explicit_tagging def __call__( self, inputs: LayerInputs, prev_state: Array, *_, ) -> Tuple[Tuple[Array, LayerValues], Array]: x, layer_values, aux = inputs input_to_hidden = _Linear( self.hidden_size, explicit_tagging=self.explicit_tagging) hidden_to_hidden = _Linear( self.hidden_size, explicit_tagging=self.explicit_tagging, with_bias=self.double_bias) ih, layer_values, aux = input_to_hidden((x, layer_values, aux)) hh, layer_values, aux = hidden_to_hidden((x, layer_values, aux)) out, layer_values, aux = self.activation((ih + hh, layer_values, aux)) assert aux is None or not aux return (out, layer_values), out def _modify_func( func: Callable[[Array], Array] ) -> Callable[[LayerInputs], LayerInputs]: """Functorially maps f: x -> y to f2: (x, p, q) -> (f(x), p, q).""" def func2(inputs: LayerInputs) -> LayerInputs: """Applies `func` only to the first argument of `inputs`.""" if not (isinstance(inputs, tuple) and len(inputs) == 3): raise ValueError("Transformed activations take a tuple of length 3 as an " "argument.") return func(inputs[0]), inputs[1], inputs[2] return func2 _special_tanh = _modify_func(jax.nn.tanh) _special_relu = _modify_func(jax.nn.relu) _special_flatten = _modify_func(lambda x: x.reshape([x.shape[0], -1])) _special_identity = _modify_func(lambda x: x) class _DeterministicBernoulli(distrax.Bernoulli): """A fake deterministic bernoulli distribution, to make KFAC deterministic.""" def _sample_n(self, key: PRNGKey, n: int) -> Array: del key # not used return jnp.repeat(jnp.round(self.probs)[None], n, axis=0) class _DeterministicBernoulliNegativeLogProbLoss( loss_functions.MultiBernoulliNegativeLogProbLoss): """A negative log-likelihood loss using the `DeterministicBernoulli`.""" @property def dist(self): return _DeterministicBernoulli(logits=self._logits, dtype=jnp.int32) _DeterministicBernoulliNegativeLogProbLoss_tag = loss_functions.tags.LossTag( _DeterministicBernoulliNegativeLogProbLoss, parameter_dependants=["logits"], parameter_independants=["targets", "weight"], ) def _register_deterministic_bernoulli( logits: Array, targets: Array, weight=1.0 ): """Registers a deterministic bernoulli loss.""" if targets is None: args = [logits, weight] args_names = ["logits", "weight"] else: args = [logits, targets, weight] args_names = ["logits", "targets", "weight"] _DeterministicBernoulliNegativeLogProbLoss_tag.bind(*args, args_names=args_names) class _DeterministicCategorical(distrax.Categorical): """A fake deterministic bernoulli distribution, to make KFAC deterministic.""" def _sample_n(self, key: PRNGKey, n: int) -> Array: del key # not used return jnp.repeat(jnp.round(self.probs)[None], n, axis=0) class _DeterministicCategoricalNegativeLogProbLoss( loss_functions.CategoricalLogitsNegativeLogProbLoss): """A negative log-likelihood loss using the `DeterministicBernoulli`.""" @property def dist(self) -> _DeterministicCategorical: return _DeterministicCategorical(logits=self._logits, dtype=jnp.int32) _DeterministicCategoricalNegativeLogProbLoss_tag = loss_functions.tags.LossTag( _DeterministicCategoricalNegativeLogProbLoss, parameter_dependants=["logits"], parameter_independants=["targets", "weight"], ) def _register_deterministic_categorical( logits: Array, targets: Array, weight=1.0 ) -> Array: """Registers a deterministic categorical loss.""" if targets is None: args = [logits, weight] args_names = ["logits", "weight"] else: args = [logits, targets, weight] args_names = ["logits", "targets", "weight"] return _DeterministicCategoricalNegativeLogProbLoss_tag.bind( *args, args_names=args_names)[0] def squared_error_loss( params: utils.Params, batch: utils.Batch, model_func: Callable[..., hk.Transformed], l2_reg: float = 0.0, explicit_tagging: bool = False, return_losses_outputs: bool = False, return_layer_values: bool = False, ) -> LossOutputs: """A squared error loss computed for the given model function.""" x, y = batch["images"], batch["targets"] y_hat, layer_values = model_func( explicit_tagging=explicit_tagging, output_dim=y.shape[-1], # pytype: disable=attribute-error # numpy-scalars ).apply(params, x) assert y_hat.shape == y.shape # pytype: disable=attribute-error # numpy-scalars y = y.reshape((-1, y.shape[-1])) # pytype: disable=attribute-error # numpy-scalars y_hat = y_hat.reshape((-1, y_hat.shape[-1])) loss_functions.register_squared_error_loss(y_hat, y, weight=0.5) if return_losses_outputs: return [[y_hat]] loss = jnp.mean(jnp.sum((y_hat - y) ** 2, axis=-1)) / 2 loss = loss + l2_reg * utils.norm(params) if return_layer_values: return [loss], layer_values else: return [loss] def autoencoder( layer_widths: Sequence[int], output_dim: int, explicit_tagging: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> hk.Transformed: """Constructs a Haiku transformed object of the autoencoder network.""" def func( batch: Union[Array, Mapping[str, Array]], aux: Optional[Tuple[Array, ...]] = None, ) -> Tuple[Array, LayerValues]: images = batch["images"] if isinstance(batch, Mapping) else batch images = images.reshape([images.shape[0], -1]) layers = [] for width in layer_widths: layers.append(_Linear(output_size=width, explicit_tagging=explicit_tagging)) layers.append(activation) layers.append(_Linear(output_size=output_dim, explicit_tagging=explicit_tagging)) model = hk.Sequential(layers) output, layer_values, aux = model((images, list(), aux)) assert aux is None or not aux return output, layer_values return hk.without_apply_rng(hk.transform(func)) def linear_squared_error_autoencoder_loss( params: utils.Params, batch: utils.Batch, layer_widths: Sequence[int], l2_reg: float = 0.0, explicit_tagging: bool = False, return_losses_outputs: bool = False, return_layer_values: bool = False, ) -> LossOutputs: """A linear autoencoder with squared error.""" batch["images"] = batch["images"].reshape(batch["images"].shape[0], -1) # type: ignore # numpy-scalars batch["targets"] = batch["images"] # pytype: disable=unsupported-operands # numpy-scalars model_func = functools.partial( autoencoder, layer_widths=layer_widths, activation=_special_identity) return squared_error_loss( params=params, batch=batch, model_func=model_func, l2_reg=l2_reg, explicit_tagging=explicit_tagging, return_losses_outputs=return_losses_outputs, return_layer_values=return_layer_values, ) def autoencoder_deterministic_loss( params: utils.Params, batch: utils.Batch, layer_widths: Sequence[int], l2_reg: float = 0.0, explicit_tagging: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> Array: """Evaluate the autoencoder with a deterministic loss.""" x = batch["images"].reshape((batch["images"].shape[0], -1)) # pytype: disable=attribute-error # numpy-scalars logits, _ = autoencoder( layer_widths, x.shape[-1], explicit_tagging, activation=activation, ).apply(params, x) _register_deterministic_bernoulli(logits, x) loss = - distrax.Bernoulli(logits=logits).log_prob(x) loss = jnp.mean(jnp.sum(loss, axis=-1)).astype(logits.dtype) return loss + l2_reg * utils.norm(params) def autoencoder_with_two_losses( params: utils.Params, batch: utils.Batch, layer_widths: Sequence[int], aux: Optional[Tuple[Array, ...]] = None, explicit_tagging: bool = False, return_losses_outputs: bool = False, return_layer_values: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> LossOutputs: """Evaluate the autoencoder with two losses.""" x = batch["images"].reshape((batch["images"].shape[0], -1)) # pytype: disable=attribute-error # numpy-scalars logits, layer_values = autoencoder( layer_widths, x.shape[-1], explicit_tagging, activation=activation, ).apply(params, x, aux) # Register both losses in KFAC loss_functions.register_multi_bernoulli_predictive_distribution( logits, x) loss_functions.register_normal_predictive_distribution( logits, x, weight=0.1) if return_losses_outputs: return [[logits], [logits]] loss_1: Array = - distrax.Bernoulli(logits=logits).log_prob(x) # pytype: disable=annotation-type-mismatch scale_diag = jnp.ones_like(logits) * jnp.sqrt(0.5) loss_2: Array = - distrax.MultivariateNormalDiag( # pytype: disable=annotation-type-mismatch loc=logits, scale_diag=scale_diag).log_prob(x) if return_layer_values: return [loss_1, 0.1 * loss_2], layer_values else: return [loss_1, 0.1 * loss_2] def conv_classifier( num_classes: int, layer_channels: Sequence[int], explicit_tagging: bool = False, kernel_size: int = 3, stride: int = 2, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> hk.Transformed: """Constructs a Haiku transformed object of a convolutional classifier.""" def func( batch: Union[Array, Mapping[str, Array]], aux: Optional[Tuple[Array, ...]] = None, ) -> Tuple[Array, LayerValues]: images = batch["images"] if isinstance(batch, Mapping) else batch layers = [] # Conv channels for num_channels in layer_channels[:-1]: layers.append(_Conv2D( output_channels=num_channels, kernel_shape=kernel_size, stride=stride, explicit_tagging=explicit_tagging)) layers.append(activation) # Last conv has layer norm layers.append(_Conv2D( output_channels=layer_channels[-1], kernel_shape=kernel_size, stride=stride, with_bias=False, explicit_tagging=explicit_tagging)) layers.append(_LayerNorm( axis=-1, explicit_tagging=explicit_tagging)) layers.append(activation) # Flatten layers.append(_special_flatten) # One Linear layer with activation and final layer layers.append(_Linear(output_size=layer_channels[-1], explicit_tagging=explicit_tagging)) layers.append(activation) layers.append(_Linear(output_size=num_classes, explicit_tagging=explicit_tagging)) model = hk.Sequential(layers) output, layer_values, aux = model((images, list(), aux)) assert aux is None or not aux return output, layer_values return hk.without_apply_rng(hk.transform(func)) def conv_classifier_deterministic_loss( params: utils.Params, batch: utils.Batch, num_classes: int, layer_channels: Sequence[int], l2_reg: float = 0.0, explicit_tagging: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> Array: """Evaluate the convolutional classifier with a deterministic loss.""" logits, _ = conv_classifier( num_classes, layer_channels, explicit_tagging, activation=activation ).apply(params, batch["images"]) _register_deterministic_categorical(logits, batch["labels"]) loss = - distrax.Categorical(logits=logits).log_prob(batch["labels"]) loss = jnp.mean(jnp.sum(loss, axis=-1)).astype(logits.dtype) return loss + l2_reg * utils.norm(params) def conv_classifier_loss( params: utils.Params, batch: utils.Batch, num_classes: int, layer_channels: Sequence[int], aux: Optional[Tuple[Array, ...]] = None, l2_reg: float = 0.0, explicit_tagging: bool = False, return_losses_outputs: bool = False, return_layer_values: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> LossOutputs: """Evaluates the convolutional classifier loss.""" logits, layer_values = conv_classifier( num_classes, layer_channels, explicit_tagging, activation=activation ).apply(params, batch["images"], aux=aux) loss_functions.register_categorical_predictive_distribution( logits, batch["labels"]) if return_losses_outputs: return [[logits]] loss = - distrax.Categorical(logits=logits).log_prob(batch["labels"]) loss = loss + l2_reg * utils.norm(params) if return_layer_values: return [loss], layer_values else: return [loss] def layer_stack_with_scan_mlp( layer_widths: Sequence[int], output_dim: int, explicit_tagging: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> hk.Transformed: """A model that uses ``hk.experimental.layer_stack`` with scan.""" def scan_fn( x: Array, aux: Optional[Tuple[Array, ...]] = None, ) -> Tuple[Array, LayerValues]: layers = [] for w in layer_widths: layers.append(_Linear(w, explicit_tagging=explicit_tagging)) layers.append(activation) layers.append(_Linear(x.shape[-1], explicit_tagging=explicit_tagging)) model = hk.Sequential(layers) output, layer_values, aux = model((x, list(), aux)) assert aux is None or not aux return output, layer_values def func( batch: Union[Array, Mapping[str, Array]], aux: Optional[Tuple[Array, ...]] = None, ) -> Tuple[Array, LayerValues]: x = batch["images"] if isinstance(batch, Mapping) else batch stack = hk.experimental.layer_stack(2, with_per_layer_inputs=True)(scan_fn) if aux is None: aux = None x, layer_values = stack(x) else: aux_scan, aux = aux x, layer_values = stack(scan_fn)(x, aux_scan) y_hat, layer_values, aux = _Linear( output_dim, explicit_tagging=explicit_tagging)((x, layer_values, aux)) assert aux is None or not aux return y_hat, layer_values return hk.without_apply_rng(hk.transform(func)) def layer_stack_mlp_loss( params: utils.Params, batch: utils.Batch, layer_widths: Sequence[int], l2_reg: float = 0.0, explicit_tagging: bool = False, return_losses_outputs: bool = False, return_layer_values: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> LossOutputs: """A layer stack mlp loss.""" return squared_error_loss( params=params, batch=batch, model_func=functools.partial( layer_stack_with_scan_mlp, layer_widths=layer_widths, activation=activation, ), l2_reg=l2_reg, explicit_tagging=explicit_tagging, return_losses_outputs=return_losses_outputs, return_layer_values=return_layer_values, ) def vanilla_rnn_with_scan( hidden_size: int, output_dim: int, explicit_tagging: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> hk.Transformed: """A model that uses an RNN with scan.""" def func( batch: Union[Array, Mapping[str, Array]], aux: Optional[Tuple[Array, ...]] = None, ) -> Tuple[Array, LayerValues]: x = batch["images"] if isinstance(batch, Mapping) else batch core = _VanillaRNN( hidden_size, activation=activation, explicit_tagging=explicit_tagging) init_state = core.initial_state(x.shape[1]) if aux is None: aux = None unroll_in = (x, list(), None) (x, layer_values), _ = hk.dynamic_unroll(core, unroll_in, init_state) else: aux_rnn, aux = aux unroll_in = (x, list(), aux_rnn) (x, layer_values), _ = hk.dynamic_unroll(core, unroll_in, init_state) layer_values = list() # We need this in order the dense tag to recognize things correctly x_r = x.reshape((-1, x.shape[-1])) y_hat, layer_values, aux = _Linear( output_dim, explicit_tagging=explicit_tagging)((x_r, layer_values, aux)) y_hat = y_hat.reshape(x.shape[:2] + (output_dim,)) assert aux is None or not aux return y_hat, layer_values return hk.without_apply_rng(hk.transform(func)) def vanilla_rnn_with_scan_loss( params: utils.Params, batch: utils.Batch, hidden_size: int, l2_reg: float = 0.0, explicit_tagging: bool = False, return_losses_outputs: bool = False, return_layer_values: bool = False, activation: Callable[[LayerInputs], LayerInputs] = _special_tanh, ) -> LossOutputs: """A layer stack mlp loss.""" return squared_error_loss( params=params, batch=batch, model_func=functools.partial( vanilla_rnn_with_scan, hidden_size=hidden_size, activation=activation, ), l2_reg=l2_reg, explicit_tagging=explicit_tagging, return_losses_outputs=return_losses_outputs, return_layer_values=return_layer_values, ) NON_LINEAR_MODELS = [ ( autoencoder([20, 10, 20], output_dim=8).init, functools.partial( autoencoder_with_two_losses, layer_widths=[20, 10, 20]), dict(images=(8,)), 1231987, ), ( conv_classifier( num_classes=10, layer_channels=[8, 16] ).init, functools.partial( conv_classifier_loss, num_classes=10, layer_channels=[8, 16]), dict(images=(8, 8, 3), labels=(10,)), 354649831, ), ] LINEAR_MODELS = [ ( autoencoder([20, 10, 20], output_dim=8).init, functools.partial( linear_squared_error_autoencoder_loss, layer_widths=[20, 10, 20]), dict(images=(8,)), 1240982837, ), ] PIECEWISE_LINEAR_MODELS = [ ( autoencoder([20, 10, 20], output_dim=8).init, functools.partial( autoencoder_with_two_losses, layer_widths=[20, 10, 20], activation=_special_relu, ), dict(images=(8,)), 1231987, ), ] SCAN_MODELS = [ ( layer_stack_with_scan_mlp([20, 15, 10], output_dim=2).init, functools.partial( layer_stack_mlp_loss, layer_widths=[20, 15, 10], activation=_special_tanh, ), dict(images=(13,), targets=(2,)), 9812386123, ), ( vanilla_rnn_with_scan(20, output_dim=2).init, functools.partial( vanilla_rnn_with_scan_loss, hidden_size=20, activation=_special_tanh, ), dict(images=(7, 13), targets=(7, 2)), 650981239, ), ]
kfac-jax-main
tests/models.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing the functionality of the graph matcher.""" import functools from typing import Callable, Mapping from absl.testing import absltest from absl.testing import parameterized import jax import jax.numpy as jnp import kfac_jax from tests import models Array = kfac_jax.utils.Array Shape = kfac_jax.utils.Shape class TestGraphMatcher(parameterized.TestCase): """Test class for the functions in `tag_graph_matcher.py`.""" def check_equation_match(self, eqn1, vars_to_vars, vars_to_eqn): """Checks that equation is matched in the other graph.""" eqn1_out_vars = [v for v in eqn1.outvars if not isinstance(v, jax.core.DropVar)] eqn2_out_vars = [vars_to_vars[v] for v in eqn1_out_vars] eqns = [vars_to_eqn[v] for v in eqn2_out_vars] self.assertTrue(all(e == eqns[0] for e in eqns[1:])) eqn2 = eqns[0] self.assertEqual(eqn1.primitive, eqn2.primitive) if eqn1.primitive.name == "conv2d_tag": # params removed in https://github.com/google/jax/pull/14211 skip_params = ["lhs_shape", "rhs_shape"] else: skip_params = [] if eqn1.primitive.name == "cond": raise NotImplementedError() elif eqn1.primitive.name == "while": exclude_param = "body_jaxpr" elif eqn1.primitive.name == "scan": exclude_param = "jaxpr" elif eqn1.primitive.name in ("xla_call", "xla_pmap"): exclude_param = "call_jaxpr" else: exclude_param = "" # Check all eqn parameters for k in eqn1.params: if k != exclude_param and k not in skip_params: self.assertEqual(eqn1.params[k], eqn2.params[k]) # For higher order primitive check the jaxpr match if exclude_param: j1 = eqn1.params[exclude_param] j2 = eqn2.params[exclude_param] if isinstance(j1, jax.core.ClosedJaxpr): assert isinstance(j2, jax.core.ClosedJaxpr) self.assertEqual(len(j1.consts), len(j2.consts)) j1 = j1.jaxpr j2 = j2.jaxpr self.check_jaxpr_equal(j1, j2, True) # Check variables for v1, v2 in zip(eqn1.invars, eqn2.invars): if isinstance(v1, jax.core.Literal): self.assertIsInstance(v2, jax.core.Literal) self.assertEqual(v1.aval, v2.aval) else: self.assertEqual(v1.aval.shape, v2.aval.shape) self.assertEqual(v1.aval.dtype, v2.aval.dtype) vars_to_vars[v1] = v2 return vars_to_vars def check_jaxpr_equal(self, jaxpr_1, jaxpr_2, map_output_vars: bool): """Checks that the two jaxpr match.""" self.assertEqual(len(jaxpr_1.invars), len(jaxpr_2.invars)) self.assertEqual(len(jaxpr_1.constvars), len(jaxpr_2.constvars)) self.assertEqual(len(jaxpr_1.outvars), len(jaxpr_2.outvars)) self.assertEqual(len(jaxpr_1.eqns), len(jaxpr_2.eqns)) # Extract all loss tags from both jax expressions l1_eqns = [] for eqn in jaxpr_1.eqns: if isinstance(eqn.primitive, kfac_jax.layers_and_loss_tags.LossTag): l1_eqns.append(eqn) l2_eqns = [] vars_to_eqn = {} for eqn in jaxpr_2.eqns: if isinstance(eqn.primitive, kfac_jax.layers_and_loss_tags.LossTag): l2_eqns.append(eqn) for v in eqn.outvars: vars_to_eqn[v] = eqn self.assertEqual(len(l1_eqns), len(l2_eqns)) # Match all losses output variables if map_output_vars: vars_to_vars = dict(zip(jaxpr_1.outvars, jaxpr_2.outvars)) else: vars_to_vars = {} for eqn1, eqn2 in zip(l1_eqns, l2_eqns): self.assertEqual(len(eqn1.outvars), len(eqn2.outvars)) for v1, v2 in zip(eqn1.outvars, eqn2.outvars): if isinstance(v1, jax.core.DropVar): self.assertIsInstance(v2, jax.core.DropVar) elif isinstance(v1, jax.core.Literal): self.assertIsInstance(v2, jax.core.Literal) self.assertEqual(v1.aval, v2.aval) else: self.assertEqual(v1.aval.shape, v2.aval.shape) self.assertEqual(v1.aval.dtype, v2.aval.dtype) vars_to_vars[v1] = v2 # Match all others equations for eqn in reversed(jaxpr_1.eqns): vars_to_vars = self.check_equation_match(eqn, vars_to_vars, vars_to_eqn) for v1, v2 in zip(jaxpr_1.invars, jaxpr_2.invars): if v1 in vars_to_vars: self.assertEqual(v2, vars_to_vars[v1]) self.assertEqual(v1.aval.shape, v2.aval.shape) self.assertEqual(v1.aval.dtype, v2.aval.dtype) self.assertEqual(v1.count, v2.count) @parameterized.parameters(models.NON_LINEAR_MODELS + models.SCAN_MODELS) def test_auto_register_tags_jaxpr( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], seed: int, data_size: int = 4, ): """Tests the tags auto registration.""" rng = jax.random.PRNGKey(seed) init_key, data_key = jax.random.split(rng) data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size,) + shape) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func = kfac_jax.tag_graph_matcher.auto_register_tags( model_func, (params, data)) jaxpr = jax.make_jaxpr(func)(params, data).jaxpr tagged_func = functools.partial( model_func, explicit_tagging=True, return_losses_outputs=True, ) tagged_jaxpr = jax.make_jaxpr(tagged_func)(params, data).jaxpr self.check_jaxpr_equal(jaxpr, tagged_jaxpr, False) if __name__ == "__main__": absltest.main()
kfac-jax-main
tests/test_graph_matcher.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing functionalities of the curvature estimation.""" import functools from typing import Callable, Mapping from absl.testing import absltest from absl.testing import parameterized import jax import jax.numpy as jnp import kfac_jax from tests import models import numpy as np Array = kfac_jax.utils.Array PRNGKey = kfac_jax.utils.PRNGKey Shape = kfac_jax.utils.Shape StateType = kfac_jax.curvature_estimator.StateType NON_LINEAR_MODELS_AND_CURVATURE_TYPE = [ model + ("ggn",) for model in models.NON_LINEAR_MODELS ] + [ model + ("fisher",) for model in models.NON_LINEAR_MODELS ] LINEAR_MODELS_AND_CURVATURE_TYPE = [ model + ("ggn",) for model in models.LINEAR_MODELS ] + [ model + ("fisher",) for model in models.LINEAR_MODELS ] PIECEWISE_LINEAR_MODELS_AND_CURVATURE = [ model + ("ggn",) for model in models.PIECEWISE_LINEAR_MODELS ] + [ model + ("fisher",) for model in models.PIECEWISE_LINEAR_MODELS ] @functools.partial(jax.jit, static_argnums=(0, 3, 4)) def compute_exact_approx_curvature( estimator: kfac_jax.CurvatureEstimator[StateType], rng: PRNGKey, func_args: kfac_jax.utils.FuncArgs, batch_size: int, curvature_type: str, ) -> StateType: """Computes the full Fisher matrix approximation for the estimator.""" state = estimator.init( rng=rng, func_args=func_args, exact_powers_to_cache=None, approx_powers_to_cache=None, cache_eigenvalues=False, ) state = estimator.update_curvature_matrix_estimate( state=state, ema_old=0.0, ema_new=1.0, batch_size=batch_size, rng=rng, func_args=func_args, estimation_mode=f"{curvature_type}_exact", ) estimator.sync(state, pmap_axis_name="i") return state class TestEstimator(parameterized.TestCase): """Testing of different curvature estimators.""" def assertAllClose( self, x: kfac_jax.utils.PyTree, y: kfac_jax.utils.PyTree, check_dtypes: bool = True, atol: float = 1e-6, rtol: float = 1e-6, ): """Asserts that the two PyTrees are close up to the provided tolerances.""" x_v, x_tree = jax.tree_util.tree_flatten(x) y_v, y_tree = jax.tree_util.tree_flatten(y) self.assertEqual(x_tree, y_tree) for xi, yi in zip(x_v, y_v): self.assertEqual(xi.shape, yi.shape) if check_dtypes: self.assertEqual(xi.dtype, yi.dtype) np.testing.assert_allclose(xi, yi, rtol=rtol, atol=atol) @parameterized.parameters(NON_LINEAR_MODELS_AND_CURVATURE_TYPE) def test_explicit_exact_full( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str, data_size: int = 4, ): """Tests the explicit exact estimator matches the implicit one.""" rng_key = jax.random.PRNGKey(seed) init_key, data_key, estimator_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func_args = (params, data) # Compute curvature matrix using the explicit exact curvature explicit_estimator = kfac_jax.ExplicitExactCurvature(model_func) state = compute_exact_approx_curvature( explicit_estimator, estimator_key, func_args, data_size, curvature_type, ) explicit_exact_matrix = explicit_estimator.to_dense_matrix(state) # Compute exact curvature matrix using the implicit curvature implicit = kfac_jax.ImplicitExactCurvature(model_func) zeros_vector = jnp.zeros([explicit_estimator.dim]) @jax.jit def mul_e_i(index, *_): flat_v_e = zeros_vector.at[index].set(1.0) v_e_leaves = [] i = 0 for p in jax.tree_util.tree_leaves(params): v_e_leaves.append(flat_v_e[i: i + p.size].reshape(p.shape)) i += p.size v_e = jax.tree_util.tree_unflatten( jax.tree_util.tree_structure(params), v_e_leaves) if curvature_type == "fisher": r_e = implicit.multiply_fisher(func_args, v_e) elif curvature_type == "ggn": r_e = implicit.multiply_ggn(func_args, v_e) else: raise ValueError(f"Unrecognized curvature_type={curvature_type}.") flat_r_e = jax.tree_util.tree_leaves( jax.tree_util.tree_map(lambda x: x.flatten(), r_e)) return index + 1, jnp.concatenate(flat_r_e, axis=0) _, matrix = jax.lax.scan(mul_e_i, 0, None, length=explicit_estimator.dim) # Compare self.assertAllClose(matrix, explicit_exact_matrix) @parameterized.parameters(NON_LINEAR_MODELS_AND_CURVATURE_TYPE) def test_block_diagonal_full( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str, data_size: int = 4, ): """Tests that the block diagonal full is equal to the explicit curvature.""" rng_key = jax.random.PRNGKey(seed) init_key, data_key, estimator_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func_args = (params, data) # Compute curvature matrix using the block diagonal full estimator block_estimator = kfac_jax.BlockDiagonalCurvature( model_func, layer_tag_to_block_ctor=dict( dense_tag=kfac_jax.DenseFull, conv2d_tag=kfac_jax.Conv2DFull, scale_and_shift_tag=kfac_jax.ScaleAndShiftFull, ) ) block_state = compute_exact_approx_curvature( block_estimator, estimator_key, func_args, data_size, curvature_type, ) blocks = block_estimator.to_diagonal_block_dense_matrix(block_state) # Compute curvature matrix using the explicit exact curvature full_estimator = kfac_jax.ExplicitExactCurvature( model_func, default_estimation_mode="fisher_exact", ) state = compute_exact_approx_curvature( full_estimator, estimator_key, func_args, data_size, curvature_type, ) full_matrix = full_estimator.to_dense_matrix(state) # Compare blocks d = 0 for block in blocks: s = slice(d, d + block.shape[0]) self.assertAllClose(block, full_matrix[s, s]) d = d + block.shape[0] self.assertEqual(d, full_matrix.shape[0]) @parameterized.parameters(PIECEWISE_LINEAR_MODELS_AND_CURVATURE) def test_block_diagonal_full_to_hessian( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str, data_size: int = 4, ): """Tests for piecewise linear models that block equal to the Hessian.""" rng_key = jax.random.PRNGKey(seed) init_key, data_key, estimator_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func_args = (params, data) block_estimator = kfac_jax.BlockDiagonalCurvature( model_func, layer_tag_to_block_ctor=dict( dense_tag=kfac_jax.DenseFull, conv2d_tag=kfac_jax.Conv2DFull, scale_and_shift_tag=kfac_jax.ScaleAndShiftFull, ) ) block_state = compute_exact_approx_curvature( block_estimator, estimator_key, func_args, data_size, curvature_type, ) blocks = (block_estimator.to_diagonal_block_dense_matrix(block_state)) # Compute exact curvature matrix using the implicit curvature implicit = kfac_jax.ImplicitExactCurvature(model_func) zeros_vector = jnp.zeros([block_estimator.dim]) @jax.jit def mul_e_i(index, *_): flat_v_e = zeros_vector.at[index].set(1.0) v_e_leaves = [] i = 0 for p in jax.tree_util.tree_leaves(params): v_e_leaves.append(flat_v_e[i: i + p.size].reshape(p.shape)) i += p.size v_e = jax.tree_util.tree_unflatten( jax.tree_util.tree_structure(params), v_e_leaves) r_e = implicit.multiply_hessian(func_args, v_e) flat_r_e = jax.tree_util.tree_leaves( jax.tree_map(lambda x: x.flatten(), r_e)) return index + 1, jnp.concatenate(flat_r_e, axis=0) _, hessian = jax.lax.scan(mul_e_i, 0, None, length=block_estimator.dim) # Compare blocks d = 0 for block in blocks: s = slice(d, d + block.shape[0]) self.assertAllClose(block, hessian[s, s]) d = d + block.shape[0] self.assertEqual(d, hessian.shape[0]) @parameterized.parameters(NON_LINEAR_MODELS_AND_CURVATURE_TYPE) def test_diagonal( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str, data_size: int = 4, ): """Tests that the diagonal estimation is the diagonal of the full.""" rng_key = jax.random.PRNGKey(seed) init_key, data_key, estimator_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func_args = (params, data) # Compute curvature matrix using the block diagonal diagonal estimator diagonal_estimator = kfac_jax.BlockDiagonalCurvature( model_func, layer_tag_to_block_ctor=dict( dense_tag=kfac_jax.DenseDiagonal, conv2d_tag=kfac_jax.Conv2DDiagonal, scale_and_shift_tag=kfac_jax.ScaleAndShiftDiagonal, ) ) diag_state = compute_exact_approx_curvature( diagonal_estimator, estimator_key, func_args, data_size, curvature_type, ) diagonals = diagonal_estimator.to_diagonal_block_dense_matrix(diag_state) # Compute curvature matrix using the block diagonal full estimator block_estimator = kfac_jax.BlockDiagonalCurvature( model_func, layer_tag_to_block_ctor=dict( dense_tag=kfac_jax.DenseFull, conv2d_tag=kfac_jax.Conv2DFull, scale_and_shift_tag=kfac_jax.ScaleAndShiftFull, ) ) block_state = compute_exact_approx_curvature( block_estimator, estimator_key, func_args, data_size, curvature_type, ) blocks = block_estimator.to_diagonal_block_dense_matrix(block_state) # Compare diagonals self.assertEqual(len(diagonals), len(blocks)) for diagonal, block in zip(diagonals, blocks): self.assertAllClose(diagonal, jnp.diag(jnp.diag(block))) @parameterized.parameters(LINEAR_MODELS_AND_CURVATURE_TYPE) def test_kronecker_factored( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str = "fisher", data_size: int = 4, ): """Test for linear network if the KF blocks match the full.""" rng_key = jax.random.PRNGKey(seed) init_key, data_key, estimator_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func_args = (params, data) # Compute curvature matrix using the kronecker factored blocks # Note that this identity does not hold for conv layers, as there the # KF approximation assumes independence between locations as well. kf_estimator = kfac_jax.BlockDiagonalCurvature( model_func, layer_tag_to_block_ctor=dict( dense_tag=kfac_jax.DenseTwoKroneckerFactored, conv2d_tag=None, scale_and_shift_tag=kfac_jax.ScaleAndShiftFull, ) ) kf_state = compute_exact_approx_curvature( kf_estimator, estimator_key, func_args, data_size, curvature_type, ) kf_blocks = kf_estimator.to_diagonal_block_dense_matrix(kf_state) # Compute curvature matrix using the block diagonal full estimator full_estimator = kfac_jax.BlockDiagonalCurvature( model_func, layer_tag_to_block_ctor=dict( dense_tag=kfac_jax.DenseFull, conv2d_tag=kfac_jax.Conv2DFull, scale_and_shift_tag=kfac_jax.ScaleAndShiftFull, ) ) full_state = compute_exact_approx_curvature( full_estimator, estimator_key, func_args, data_size, curvature_type, ) blocks = full_estimator.to_diagonal_block_dense_matrix(full_state) # Compare diagonals self.assertEqual(len(kf_blocks), len(blocks)) for kf, block in zip(kf_blocks, blocks): self.assertAllClose(kf, block) @parameterized.parameters([ ( dict(images=(32, 32, 3), labels=(10,)), 1230971, "ggn", ), ( dict(images=(32, 32, 3), labels=(10,)), 1230971, "fisher", ), ]) def test_eigenvalues( self, data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str = "fisher", data_size: int = 4, ): """Test for linear network if the KF blocks match the full.""" num_classes = data_point_shapes["labels"][0] init_func = models.conv_classifier( num_classes=num_classes, layer_channels=[8, 16, 32]).init model_func = functools.partial( models.conv_classifier_loss, num_classes=num_classes, layer_channels=[8, 16, 32]) rng_key = jax.random.PRNGKey(seed) init_key, data_key, estimator_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key, data) func_args = (params, data) estimator = kfac_jax.BlockDiagonalCurvature( model_func, index_to_block_ctor={ (0, 1): kfac_jax.Conv2DTwoKroneckerFactored, (3, 2): kfac_jax.Conv2DDiagonal, (4,): kfac_jax.ScaledIdentity, (6, 5): kfac_jax.ScaleAndShiftDiagonal, (8, 7): kfac_jax.DenseTwoKroneckerFactored, (10, 9): kfac_jax.DenseFull, } ) state = compute_exact_approx_curvature( estimator, estimator_key, func_args, data_size, curvature_type, ) cached_state = estimator.update_cache( state, identity_weight=1e-2, exact_powers=-1, approx_powers=None, eigenvalues=True, pmap_axis_name=None, ) block_eigenvalues = estimator.block_eigenvalues(cached_state, True) scales = [block.fixed_scale() for block in estimator.blocks] self.assertLen(block_eigenvalues, estimator.num_blocks) for block_state, eigs, scale in zip( cached_state.blocks_states, block_eigenvalues, scales): if isinstance(block_state, kfac_jax.TwoKroneckerFactored.State): in_eigs, _ = kfac_jax.utils.safe_psd_eigh( block_state.factors[1].value) out_eigs, _ = kfac_jax.utils.safe_psd_eigh( block_state.factors[0].value) self.assertAllClose(scale * jnp.outer(out_eigs, in_eigs), eigs) elif isinstance(block_state, kfac_jax.Diagonal.State): diag_eigs = jnp.concatenate([factor.value.flatten() for factor in block_state.diagonal_factors]) self.assertAllClose(diag_eigs, eigs) elif isinstance(block_state, kfac_jax.Full.State): matrix_eigs, _ = kfac_jax.utils.safe_psd_eigh(block_state.matrix.value) self.assertAllClose(matrix_eigs, eigs) elif isinstance(block_state, kfac_jax.CurvatureBlock.State): # ScaledIdentity self.assertAllClose(jnp.ones_like(eigs), eigs) else: raise NotImplementedError() @parameterized.parameters([ ( dict(images=(32, 32, 3), labels=(10,)), 1230971, "ggn", ), ( dict(images=(32, 32, 3), labels=(10,)), 1230971, "fisher", ), ]) def test_matmul( self, data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str, data_size: int = 4, e: float = 1.0, ): """Test for linear network if the KF blocks match the full.""" num_classes = data_point_shapes["labels"][0] init_func = models.conv_classifier( num_classes=num_classes, layer_channels=[8, 16, 32]).init model_func = functools.partial( models.conv_classifier_loss, num_classes=num_classes, layer_channels=[8, 16, 32]) rng_key = jax.random.PRNGKey(seed) init_key1, init_key2, data_key, estimator_key = jax.random.split(rng_key, 4) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key1, data) func_args = (params, data) estimator = kfac_jax.BlockDiagonalCurvature( model_func, index_to_block_ctor={ (1, 0): kfac_jax.Conv2DTwoKroneckerFactored, (3, 2): kfac_jax.Conv2DDiagonal, (4,): kfac_jax.ScaledIdentity, (6, 5): kfac_jax.ScaleAndShiftDiagonal, (8, 7): kfac_jax.DenseTwoKroneckerFactored, (10, 9): kfac_jax.DenseFull, } ) state = compute_exact_approx_curvature( estimator, estimator_key, func_args, data_size, curvature_type, ) cached_state = estimator.update_cache( state, identity_weight=e, exact_powers=-1, approx_powers=None, eigenvalues=True, pmap_axis_name=None, ) v = init_func(init_key2, data) m_v = estimator.multiply(state, v, e, True, True, None) m_inv_v = estimator.multiply_inverse(cached_state, v, e, True, True, None) # Check cached and non-cached are the same m_inv_v2 = estimator.multiply_inverse(state, v, e, True, False, None) self.assertAllClose(m_inv_v, m_inv_v2, atol=1e-5, rtol=1e-4) block_vectors = estimator.params_vector_to_blocks_vectors(v) results = estimator.params_vector_to_blocks_vectors(m_v) results_inv = estimator.params_vector_to_blocks_vectors(m_inv_v) block_matrices = estimator.to_diagonal_block_dense_matrix(state) for i in range(estimator.num_blocks): # In all modules the parameters are in reverse canonical order v_i_flat = jnp.concatenate([x.flatten() for x in block_vectors[i][::-1]]) r_i_flat = jnp.concatenate([x.flatten() for x in results[i][::-1]]) r2_i_flat = jnp.concatenate([x.flatten() for x in results_inv[i][::-1]]) # Matrix multiplication computed = block_matrices[i] @ v_i_flat + e * v_i_flat self.assertAllClose(computed, r_i_flat) # Matrix inverse multiplication m_i_plus_eye = block_matrices[i] + e * jnp.eye(block_matrices[i].shape[0]) computed2 = jnp.linalg.solve(m_i_plus_eye, v_i_flat) self.assertAllClose(computed2, r2_i_flat, atol=1e-5, rtol=1e-4) @parameterized.parameters([ ( dict(images=(32, 32, 3), labels=(10,)), 1230971, "ggn", ), ( dict(images=(32, 32, 3), labels=(10,)), 1230971, "fisher", ), ]) def test_implicit_factor_products( self, data_point_shapes: Mapping[str, Shape], seed: int, curvature_type: str, data_size: int = 4, ): """Tests that the products of the curvature factors are correct.""" num_classes = data_point_shapes["labels"][0] init_func = models.conv_classifier( num_classes=num_classes, layer_channels=[8, 16, 32]).init model_func = functools.partial( models.conv_classifier_loss, num_classes=num_classes, layer_channels=[8, 16, 32]) rng_key = jax.random.PRNGKey(seed) init_key1, init_key2, data_key = jax.random.split(rng_key, 3) # Generate data data = {} for name, shape in data_point_shapes.items(): data_key, key = jax.random.split(data_key) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) params = init_func(init_key1, data) func_args = (params, data) estimator = kfac_jax.ImplicitExactCurvature(model_func) v = init_func(init_key2, data) if curvature_type == "fisher": c_factor_v = estimator.multiply_fisher_factor_transpose(func_args, v) c_v_1 = estimator.multiply_fisher_factor(func_args, c_factor_v) c_v_2 = estimator.multiply_fisher(func_args, v) elif curvature_type == "ggn": c_factor_v = estimator.multiply_ggn_factor_transpose(func_args, v) c_v_1 = estimator.multiply_ggn_factor(func_args, c_factor_v) c_v_2 = estimator.multiply_ggn(func_args, v) else: raise NotImplementedError() self.assertAllClose(c_v_1, c_v_2, atol=1e-6, rtol=1e-6) if __name__ == "__main__": absltest.main()
kfac-jax-main
tests/test_estimator.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing all the tracing mechanisms from tracer.py.""" from typing import Any, Callable, Mapping, Tuple from absl.testing import absltest from absl.testing import parameterized import jax import jax.numpy as jnp import kfac_jax from tests import models import numpy as np tracer = kfac_jax.tracer utils = kfac_jax.utils Array = utils.Array PRNGKey = utils.PRNGKey Shape = utils.Shape class TestTracer(parameterized.TestCase): """Test class for the functions in `tracer.py`.""" def assertAllClose( self, x: utils.PyTree, y: utils.PyTree, check_dtypes: bool = True, atol: float = 5e-6, rtol: float = 1e-6, ): """Asserts that the two PyTrees are close up to the provided tolerances.""" x_v, x_tree = jax.tree_util.tree_flatten(x) y_v, y_tree = jax.tree_util.tree_flatten(y) self.assertEqual(x_tree, y_tree) for xi, yi in zip(x_v, y_v): self.assertEqual(xi.shape, yi.shape) if check_dtypes: self.assertEqual(xi.dtype, yi.dtype) np.testing.assert_allclose(xi, yi, rtol=rtol, atol=atol) @staticmethod def generate_data( init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shapes: Mapping[str, Shape], rng: PRNGKey, data_size: int = 4, ) -> Tuple[ models.hk.Params, Mapping[str, Array], models.hk.Params, Tuple[Tuple[Array, ...], ...] ]: """Generates random data for any testing.""" data = {} for name, shape in data_point_shapes.items(): rng, key = jax.random.split(rng) data[name] = jax.random.uniform(key, (data_size, *shape)) if name == "labels": data[name] = jnp.argmax(data[name], axis=-1) rng, key = jax.random.split(rng) params = init_func(key, data) rng, key = jax.random.split(rng) p_tangents = init_func(key, data) loss_values, layers_values = model_func( params, data, return_layer_values=True) last_layer_output = layers_values[-1][1] keys = tuple(jax.random.split(key, len(loss_values))) output_tangents = tuple( (jax.random.normal(key, last_layer_output.shape),) for key in keys) return params, data, p_tangents, output_tangents def compare_multi_batch( self, func: Callable[[Any], Any], data: Any, data_size: int, combine: str, atol: float = 1e-6, rtol: float = 1e-6, ): """Compares `func` with a single large batch and multiple small one.""" # Single batch computation single_output = func(data) # Different batch computation data1 = jax.tree_util.tree_map(lambda x: x[:data_size // 2], data) data2 = jax.tree_util.tree_map(lambda x: x[data_size // 2:], data) outputs = list() for d in (data1, data2): outputs.append(func(d)) if combine == "concatenate": outputs = jax.tree_util.tree_map( lambda x, y: jnp.concatenate([x, y], axis=0), *outputs) elif combine == "sum": outputs = jax.tree_util.tree_map(lambda x, y: x + y, *outputs) else: raise NotImplementedError() self.assertAllClose(single_output, outputs, atol=atol, rtol=rtol) @parameterized.parameters(models.NON_LINEAR_MODELS) def test_loss_tags_jvp( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, dataset_size: int = 4, ): """Tests `tracer.py:loss_tags.jvp`.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, p_tangents, _ = self.generate_data( init_func, model_func, data_point_shape, rng, dataset_size, ) # True computation (primals_out, tangents_out) = jax.jvp( lambda p: model_func(p, data, return_layer_values=True), [params], [p_tangents]) loss_values, _ = primals_out _, layers_tangents = tangents_out last_layer_output_tangents = layers_tangents[-1][1] loss_tangents = ((last_layer_output_tangents,),) * len(loss_values) # Tracer computation tracer_jvp = tracer.loss_tags_jvp(model_func) tracer_losses, tracer_loss_tangents = tracer_jvp((params, data), p_tangents) # pytype: disable=attribute-error # always-use-return-annotations tracer_losses_values = [loss.evaluate() for loss in tracer_losses] self.assertAllClose(loss_values, tracer_losses_values) self.assertAllClose(loss_tangents, tracer_loss_tangents) @parameterized.parameters(models.NON_LINEAR_MODELS) def test_loss_tags_jvp_diff_batch_size( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, data_size: int = 4, ): """Tests `tracer.py:loss_tags.jvp` for multiple batches.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, p_tangents, _ = self.generate_data( init_func, model_func, data_point_shape, rng, data_size, ) jvp = tracer.loss_tags_jvp(model_func) def func(data_): losses, loss_tangents = jvp((params, data_), p_tangents) # pytype: disable=attribute-error # always-use-return-annotations losses = [loss.evaluate() for loss in losses] return losses, loss_tangents self.compare_multi_batch(func, data, data_size, "concatenate") @parameterized.parameters(models.NON_LINEAR_MODELS) def test_loss_tags_vjp( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, dataset_size: int = 4, ): """Tests `tracer.py:loss_tags_vjp`.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, _, output_tangents = self.generate_data( init_func, model_func, data_point_shape, rng, dataset_size, ) def no_data_func(p): losses, layers_values = model_func(p, data, return_layer_values=True) last_layer_output = layers_values[-1][1] return losses, last_layer_output # True computation (loss_values, _), vjp_func = jax.vjp(no_data_func, params) loss_tangents = jax.tree_util.tree_map(jnp.zeros_like, loss_values) summed_output_tangents = sum(jax.tree_util.tree_leaves(output_tangents)) p_tangents, = vjp_func((loss_tangents, summed_output_tangents)) # Tracer computation trace_vjp = tracer.loss_tags_vjp(model_func) tracer_losses, tracer_vjp_func = trace_vjp((params, data)) # pytype: disable=attribute-error # always-use-return-annotations tracer_losses = [loss.evaluate() for loss in tracer_losses] tracer_p_tangents = tracer_vjp_func(output_tangents) # Comparison self.assertAllClose(loss_values, tracer_losses) self.assertAllClose(p_tangents, tracer_p_tangents) @parameterized.parameters(models.NON_LINEAR_MODELS) def test_loss_tags_vjp_diff_batch_size( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, data_size: int = 4, ): """Tests `tracer.py:loss_tags_vjp` for multiple batches.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, _, output_tangents = self.generate_data( init_func, model_func, data_point_shape, rng, data_size, ) # Tracer computation vjp = tracer.loss_tags_vjp(model_func) def func1(data_): losses, _ = vjp((params, data_)) # pytype: disable=attribute-error # always-use-return-annotations return [loss.evaluate() for loss in losses] self.compare_multi_batch(func1, data, data_size, "concatenate") def func2(data_and_output_tangents): data_, output_tangents_ = data_and_output_tangents _, vjp_func = vjp((params, data_)) # pytype: disable=attribute-error # always-use-return-annotations return vjp_func(output_tangents_) self.compare_multi_batch(func2, (data, output_tangents), data_size, "sum") @parameterized.parameters(models.NON_LINEAR_MODELS) def test_loss_tags_hvp( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, dataset_size: int = 4, ): """Tests `tracer.py:loss_tags_hvp`.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, p_tangents, _ = self.generate_data( init_func, model_func, data_point_shape, rng, dataset_size, ) def no_data_func(p): return sum(jax.tree_util.tree_map(jnp.sum, model_func(p, data))) # True computation grad_func = jax.grad(no_data_func) def grad_time_tangents(args): return utils.inner_product(grad_func(args), p_tangents) hvp = jax.grad(grad_time_tangents) hvp_vectors = hvp(params) # Tracer computation tracer_hvp = tracer.loss_tags_hvp(model_func) tracer_hvp_vectors, _ = tracer_hvp((params, data), p_tangents) # pytype: disable=attribute-error # always-use-return-annotations # Comparison self.assertAllClose(hvp_vectors, tracer_hvp_vectors, atol=5e-6) @parameterized.parameters(models.NON_LINEAR_MODELS) def test_loss_tags_hvp_diff_batch_size( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, data_size: int = 4, ): """Tests `tracer.py:loss_tags_hvp` for multiple batches.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, p_tangents, _ = self.generate_data( init_func, model_func, data_point_shape, rng, data_size ) hvp = tracer.loss_tags_hvp(model_func) def func(data_): return hvp((params, data_), p_tangents)[0] self.compare_multi_batch(func, data, data_size, "sum", rtol=1e-4) @parameterized.parameters(models.NON_LINEAR_MODELS) def test_layer_tags_vjp( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, dataset_size: int = 4, ): """Tests `tracer.py:layer_tags_vjp`.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, _, output_tangents = self.generate_data( init_func, model_func, data_point_shape, rng, dataset_size, ) def aux_no_data_func(aux, p): _, layers_values = model_func( p, data, aux=aux, return_layer_values=True) last_layer_output = layers_values[-1][1] return last_layer_output # True computation loss_values, layer_values = model_func( params, data, return_layer_values=True) layer_outputs = tuple(v[1] for v in layer_values) aux_values = jax.tree_util.tree_map(jnp.zeros_like, layer_outputs) _, vjp = jax.vjp(aux_no_data_func, aux_values, params) summed_output_tangents = sum(jax.tree_util.tree_leaves(output_tangents)) aux_tangents, p_tangents = vjp(summed_output_tangents) self.assertEqual(len(layer_values), len(params)) self.assertEqual(len(aux_tangents), len(p_tangents)) layers_info = list() for (x, y), aux_t, param, param_tangent in zip( layer_values, aux_tangents, list(params.values()), list(p_tangents.values()) ): info = dict() info["inputs"] = (x,) info["outputs"] = (y,) info["outputs_tangent"] = (aux_t,) general_names = ("w", "b") if "w" in param else ("scale", "offset") p_names = tuple(name for name in general_names if name in param) self.assertLessEqual(len(p_names), len(param)) info["params"] = tuple(param[name] for name in p_names) info["params_tangent"] = tuple(param_tangent[name] for name in p_names) layers_info.append(info) layers_info = tuple(layers_info) # Tracer computation tracer_losses, tracer_vjp_func = tracer.layer_tags_vjp(model_func)( # pytype: disable=attribute-error # always-use-return-annotations (params, data)) tracer_losses = [loss.evaluate() for loss in tracer_losses] tracer_info = tracer_vjp_func(output_tangents) # We don't support testing of inputs_tangent currently for info in tracer_info: info.pop("inputs_tangent") # Comparison self.assertAllClose(loss_values, tracer_losses) self.assertAllClose(layers_info, tracer_info) @parameterized.parameters(models.NON_LINEAR_MODELS) def test_layer_tags_vjp_diff_batch_size( self, init_func: Callable[..., models.hk.Params], model_func: Callable[..., Array], data_point_shape: Mapping[str, Shape], seed: int, data_size: int = 4, ): """Tests `tracer.py:layer_tags_vjp` for multiple batches.""" # Model and data setup rng = jax.random.PRNGKey(seed) params, data, _, output_tangents = self.generate_data( init_func, model_func, data_point_shape, rng, data_size, ) vjp = tracer.layer_tags_vjp(model_func) def func(data_and_output_tangents): data_, output_tangents_ = data_and_output_tangents losses, vjp_func = vjp((params, data_)) # pytype: disable=attribute-error # always-use-return-annotations losses = [loss.evaluate() for loss in losses] layers_info = vjp_func(output_tangents_) for info in layers_info: # These quantities are not per-batch, but averaged, so we skip them info.pop("params") info.pop("params_tangent") return losses, layers_info self.compare_multi_batch( func, (data, output_tangents), data_size, "concatenate") if __name__ == "__main__": absltest.main()
kfac-jax-main
tests/test_tracer.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing the functionality of the patches second moment computation.""" import itertools from typing import Tuple, Union from absl.testing import absltest from absl.testing import parameterized import jax from jax import lax import jax.numpy as jnp import kfac_jax import numpy as np psm = kfac_jax.patches_second_moment utils = kfac_jax.utils class TestPatchesMoments(parameterized.TestCase): """Test class for the patches second moment functions.""" def assertAllClose( self, x: utils.PyTree, y: utils.PyTree, check_dtypes: bool = True, atol: float = 1e-6, rtol: float = 1e-6, ): """Asserts that the two PyTrees are close up to the provided tolerances.""" x_v, x_tree = jax.tree_util.tree_flatten(x) y_v, y_tree = jax.tree_util.tree_flatten(y) self.assertEqual(x_tree, y_tree) for xi, yi in zip(x_v, y_v): self.assertEqual(xi.shape, yi.shape) if check_dtypes: self.assertEqual(xi.dtype, yi.dtype) np.testing.assert_allclose(xi, yi, rtol=rtol, atol=atol) @parameterized.parameters(list(itertools.product( [8, 16], # h [(1, 2), (3, 3), (4, 5)], # kernel_shape [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)], # strides ["VALID", "SAME"], # padding )) + [ (9, (2, 2), (2, 2), ((0, 0), (2, 3))), # custom padding (8, (2, 2), (1, 3), ((0, 1), (2, 3))), # custom padding ]) def test_num_locations( self, h_and_w: int, kernel_shape: Tuple[int, int], strides: Tuple[int, int], padding: Union[str, Tuple[Tuple[int, int], Tuple[int, int]]], ): """Tests calculation of the number of convolutional locations.""" spatial_shape = (h_and_w, h_and_w) patches = lax.conv_general_dilated_patches( jnp.zeros((1,) + spatial_shape + (3,)), filter_shape=kernel_shape, window_strides=strides, padding=padding, dimension_numbers=("NHWC", "IOHW", "NHWC"), ) num_locations = patches.size // patches.shape[-1] num_location_fast = psm.num_conv_locations( spatial_shape, kernel_spatial_shape=kernel_shape, spatial_strides=strides, spatial_padding=padding) self.assertEqual(num_locations, num_location_fast) @parameterized.parameters(list(itertools.product( [3], # c [8, 16], # h [(1, 2), (3, 3), (4, 5)], # kernel_shape [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)], # strides ["VALID", "SAME"], # padding ["NHWC", "NCHW"], # data_format [False, True], # per_channel )) + [ (3, 9, (2, 2), (2, 2), ((0, 0), (2, 3)), "NHWC", False), # custom padding (3, 9, (2, 2), (2, 2), ((0, 0), (2, 3)), "NHWC", True), # custom padding (3, 8, (2, 2), (1, 3), ((0, 1), (2, 3)), "NHWC", False), # custom padding (3, 8, (2, 2), (1, 3), ((0, 1), (2, 3)), "NHWC", True), # custom padding ]) def test_patches_moments_2d( self, c: int, h_and_w: int, kernel_spatial_shape: Tuple[int, int], strides: Tuple[int, int], padding: Union[str, Tuple[Tuple[int, int], Tuple[int, int]]], data_format: str, per_channel: bool, ): """Tests the patches second moment calculation for 2D convolution.""" rng = jax.random.PRNGKey(1214321) n = 5 axis = data_format.index("C") if data_format == "NHWC": shape = (n, h_and_w, h_and_w, c) else: shape = (n, c, h_and_w, h_and_w) w_shape = (*kernel_spatial_shape, c, c + 1) feature_group_count = c if per_channel else 1 num_locations = psm.num_conv_locations( (h_and_w, h_and_w), kernel_spatial_shape=kernel_spatial_shape, spatial_strides=strides, spatial_padding=padding) normalizer = n * num_locations ones_inputs = jnp.ones(shape) key, rng = jax.random.split(rng) random_inputs = jax.random.normal(key, shape) random_inputs = jnp.asarray(random_inputs.astype(ones_inputs.dtype)) random_w = jax.random.uniform(rng, w_shape, dtype=ones_inputs.dtype) random_out = jax.lax.conv_general_dilated( lhs=random_inputs, rhs=random_w, window_strides=strides, padding=padding, dimension_numbers=(data_format, "HWIO", data_format) ) random_out = jnp.sum(jnp.square(random_out), axis=axis) weighting_array = 1.0 + jax.random.uniform(rng, shape=random_out.shape) for inputs in (ones_inputs, random_inputs): matrix, vector = psm.patches_moments_explicit( inputs, kernel_spatial_shape=kernel_spatial_shape, strides=strides, padding=padding, data_format=data_format, feature_group_count=feature_group_count, unroll_loop=True, precision=jax.lax.Precision.HIGHEST, weighting_array=weighting_array, ) matrix_fast, vector_fast = psm.patches_moments( inputs, kernel_spatial_shape=kernel_spatial_shape, strides=strides, padding=padding, data_format=data_format, feature_group_count=feature_group_count, precision=jax.lax.Precision.HIGHEST, weighting_array=weighting_array, ) # For accurate results we compare the mean over the batch and locations matrix, vector, matrix_fast, vector_fast = jax.tree_util.tree_map( lambda x: x / normalizer, (matrix, vector, matrix_fast, vector_fast) # pylint: disable=cell-var-from-loop ) self.assertAllClose(matrix, matrix_fast) self.assertAllClose(vector, vector_fast) if __name__ == "__main__": absltest.main()
kfac-jax-main
tests/test_patches_second_moment.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration file for the Sphinx documentation builder.""" # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # pylint: disable=g-bad-import-order # pylint: disable=g-import-not-at-top import inspect import os import sys import typing def _add_annotations_import(path): """Appends a future annotations import to the file at the given path.""" with open(path) as f: contents = f.read() if contents.startswith('from __future__ import annotations'): # If we run sphinx multiple times then we will append the future import # multiple times too. return assert contents.startswith('#'), (path, contents.split('\n')[0]) with open(path, 'w') as f: # NOTE: This is subtle and not unit tested, we're prefixing the first line # in each Python file with this future import. It is important to prefix # not insert a newline such that source code locations are accurate (we link # to GitHub). The assertion above ensures that the first line in the file is # a comment so it is safe to prefix it. f.write('from __future__ import annotations ') f.write(contents) def _recursive_add_annotations_import(): for path, _, files in os.walk('../kfac_jax/'): for file in files: if file.endswith('.py'): _add_annotations_import(os.path.abspath(os.path.join(path, file))) if 'READTHEDOCS' in os.environ: _recursive_add_annotations_import() # TODO(b/254461517) Remove the annotation filtering when we drop Python 3.8 # support. # We remove `None` type annotations as this breaks Sphinx under Python 3.7 and # 3.8 with error `AssertionError: Invalid annotation [...] None is not a class.` filter_nones = lambda x: dict((k, v) for k, v in x.items() if v is not None) typing.get_type_hints = lambda obj, *unused: filter_nones(obj.__annotations__) sys.path.insert(0, os.path.abspath('../')) sys.path.append(os.path.abspath('ext')) import kfac_jax import sphinxcontrib.katex as katex # -- Project information ----------------------------------------------------- project = 'KFAC-JAX' copyright = '2022, DeepMind' # pylint: disable=redefined-builtin author = 'KFAC-JAX Contributors' # -- General configuration --------------------------------------------------- master_doc = 'index' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.intersphinx', 'sphinx.ext.linkcode', 'sphinx.ext.napoleon', 'sphinxcontrib.bibtex', 'sphinxcontrib.katex', 'sphinx_autodoc_typehints', 'sphinx_rtd_theme', 'coverage_check', 'myst_nb', # This is used for the .ipynb notebooks ] bibtex_bibfiles = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for autodoc ----------------------------------------------------- autodoc_default_options = { 'member-order': 'bysource', 'special-members': True, 'exclude-members': '__repr__, __str__, __weakref__, __eq__, __hash__' } # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # html_favicon = '_static/favicon.ico' # -- Options for myst ------------------------------------------------------- jupyter_execute_notebooks = 'force' execution_allow_errors = False # -- Options for katex ------------------------------------------------------ # See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html latex_macros = r""" \def \d #1{\operatorname{#1}} """ # Translate LaTeX macros to KaTeX and add to options for HTML builder katex_macros = katex.latex_defs_to_katex_macros(latex_macros) katex_options = 'macros: {' + katex_macros + '}' # Add LaTeX macros for LATEX builder latex_elements = {'preamble': latex_macros} # -- Source code links ------------------------------------------------------- def linkcode_resolve(domain, info): """Resolve a GitHub URL corresponding to Python object.""" if domain != 'py': return None try: mod = sys.modules[info['module']] except ImportError: return None obj = mod try: for attr in info['fullname'].split('.'): obj = getattr(obj, attr) except AttributeError: return None else: obj = inspect.unwrap(obj) try: filename = inspect.getsourcefile(obj) except TypeError: return None try: source, lineno = inspect.getsourcelines(obj) except OSError: return None prefix = 'https://github.com/google-deepmind/kfac-jax/tree/master/kfac_jax/' version = '%s#L%d#L%d' % ( os.path.relpath(filename, start=os.path.dirname( kfac_jax.__file__)), lineno, lineno + len(source) - 1) return prefix + version # -- Intersphinx configuration ----------------------------------------------- intersphinx_mapping = { 'jax': ('https://jax.readthedocs.io/en/latest/', None), } source_suffix = ['.rst', '.md', '.ipynb']
kfac-jax-main
docs/conf.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Asserts all public symbols are covered in the docs.""" import inspect import types from typing import Any, Mapping, Set, Sequence, Tuple import kfac_jax from sphinx import application from sphinx import builders from sphinx import errors def get_public_functions( root_module: types.ModuleType) -> Sequence[Tuple[str, types.FunctionType]]: """Returns `(function_name, function)` for all functions of `root_module`.""" fns = [] for name in dir(root_module): o = getattr(root_module, name) if inspect.isfunction(o): fns.append((name, o)) return fns def get_public_symbols( root_module: types.ModuleType) -> Sequence[Tuple[str, types.FunctionType]]: """Returns `(symbol_name, symbol)` for all symbols of `root_module`.""" fns = [] for name in getattr(root_module, "__all__"): o = getattr(root_module, name) fns.append((name, o)) return fns class CoverageCheck(builders.Builder): """Builder that checks all public symbols are included.""" name = "coverage_check" def get_outdated_docs(self) -> str: return "coverage_check" def write(self, *ignored: Any) -> None: pass def finish(self) -> None: def public_symbols() -> Set[str]: symbols = set() for symbol_name, _ in get_public_symbols(kfac_jax): symbols.add("kfac_jax." + symbol_name) return symbols documented_objects = frozenset(self.env.domaindata["py"]["objects"]) undocumented_objects = public_symbols() - documented_objects if undocumented_objects: undocumented_objects = tuple(sorted(undocumented_objects)) raise errors.SphinxError( "All public symbols must be included in our documentation, did you " "forget to add an entry to `api.rst`?\n" f"Undocumented symbols: {undocumented_objects}.") def setup(app: application.Sphinx) -> Mapping[str, Any]: app.add_builder(CoverageCheck) return dict(version=kfac_jax.__version__, parallel_read_safe=True)
kfac-jax-main
docs/ext/coverage_check.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities and loading pipelines for datasets used in the examples. """ import types from typing import Callable, Dict, Iterator, Optional, Tuple import jax import jax.numpy as jnp import kfac_jax import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets tfds = tensorflow_datasets # Types for annotation Array = jax.Array Shape = Tuple[int, ...] Batch = Dict[str, Array] TfBatch = Dict[str, tf.Tensor] # Special global variables _IMAGENET_MEAN_RGB = (0.485, 0.456, 0.406) _IMAGENET_STDDEV_RGB = (0.229, 0.224, 0.225) def iterator_on_device(iterator: Iterator[Batch]) -> Iterator[Batch]: for batch in iterator: yield kfac_jax.utils.broadcast_all_local_devices(batch) def mnist_dataset( split: str, has_labels: bool, flatten_images: bool, device_batch_size: int, repeat: bool, shuffle: bool, drop_remainder: bool, seed: Optional[int] = None, multi_device: bool = True, reshuffle_each_iteration: bool = True, dtype: str = "float32", ) -> Iterator[Batch]: """Standard MNIST dataset pipeline. Args: split: Which data split to load. has_labels: Whether to return the labels or only the images. flatten_images: Whether to flatten the images to a vector. device_batch_size: The per-device batch size to use. repeat: Whether to repeat the dataset. shuffle: Whether to shuffle the dataset. drop_remainder: Whether to drop the remainder of the dataset if the number of data points is not divisible by the total batch size. seed: Any seed to use for random pre-processing. multi_device: If the returned batch should take into account the number of devices present, in which case it will return an array with shape `(num_device, device_batch_size, ...)`. reshuffle_each_iteration: Whether to reshuffle the dataset in a new order after each iteration. dtype: The returned data type of the images. Returns: The MNIST dataset as a tensorflow dataset. """ # Set for multi devices vs single device num_devices = jax.device_count() if multi_device else 1 num_local_devices = jax.local_device_count() if multi_device else 1 if multi_device: host_batch_shape = [num_local_devices, device_batch_size] else: host_batch_shape = [device_batch_size] host_batch_size = num_local_devices * device_batch_size num_examples = tfds.builder("mnist").info.splits[split].num_examples if num_examples % num_devices != 0: raise ValueError("The number of examples should be divisible by the number " "of devices.") def preprocess_batch( images: tf.Tensor, labels: tf.Tensor ) -> Dict[str, tf.Tensor]: """Standard reshaping of the images to (28, 28).""" images = tf.image.convert_image_dtype(images, dtype) single_example_shape = [784] if flatten_images else [28, 28] images = tf.reshape(images, host_batch_shape + single_example_shape) labels = tf.reshape(labels, host_batch_shape) if has_labels: return dict(images=images, labels=labels) else: return dict(images=images) ds = tfds.load(name="mnist", split=split, as_supervised=True) ds = ds.shard(jax.process_count(), jax.process_index()) ds = ds.cache() if host_batch_size < num_examples and shuffle: ds = ds.shuffle(buffer_size=(num_examples // jax.process_count()), seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) if repeat: ds = ds.repeat() ds = ds.batch(host_batch_size, drop_remainder=drop_remainder) ds = ds.map(preprocess_batch, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return iterator_on_device(iter(tensorflow_datasets.as_numpy(ds))) def imagenet_num_examples_and_split( split: str ) -> Tuple[int, tensorflow_datasets.Split]: """Returns the number of examples in the given split of Imagenet.""" if split == "train": return 1271167, tensorflow_datasets.Split.TRAIN elif split == "valid": return 10000, tensorflow_datasets.Split.TRAIN elif split == "train_and_valid": return 1281167, tensorflow_datasets.Split.TRAIN elif split == "train_eval": return 200000, tensorflow_datasets.Split.TRAIN elif split == "test": return 50000, tensorflow_datasets.Split.VALIDATION else: raise NotImplementedError() def imagenet_dataset( split: str, is_training: bool, batch_dims: Shape, seed: int = 123, shuffle_files: bool = True, buffer_size_factor: int = 10, shuffle: bool = False, cache: bool = False, dtype: jnp.dtype = jnp.float32, image_size: Shape = (224, 224), data_dir: Optional[str] = None, extra_preprocessing_func: Optional[ Callable[[Array, Array], Tuple[Array, Array]]] = None, ) -> Iterator[Batch]: """Standard ImageNet dataset pipeline. Args: split: Which data split to load. is_training: Whether this is on the training or evaluator worker. batch_dims: The shape of the batch dimensions. seed: Any seed to use for random pre-processing, shuffling, and file shuffling. shuffle_files: Whether to shuffle the ImageNet files. buffer_size_factor: Batch size factor for computing cache size. shuffle: Whether to shuffle the cache. cache: Whether to cache the whole dataset. dtype: The returned data type of the images. image_size: The image sizes. data_dir: If specified, will use this directory to load the dataset from. extra_preprocessing_func: A callable to perform addition data preprocessing if desired. Should take arguments `image` and `label` consisting of the image and its label (without batch dimension), and return a tuple consisting of the processed version of these two. Returns: The ImageNet dataset as a tensorflow dataset. """ preprocess_seed = seed shuffle_seed = seed + 1 file_shuffle_seed = seed + 2 del seed num_examples, tfds_split = imagenet_num_examples_and_split(split) shard_range = np.array_split(np.arange(num_examples), jax.process_count())[jax.process_index()] start, end = shard_range[0], shard_range[-1] + 1 if split == "train": # Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000]. offset = 10000 start += offset end += offset total_batch_size = int(np.prod(batch_dims)) tfds_split = tfds.core.ReadInstruction( tfds_split, from_=start, to=end, unit="abs") read_config = tfds.ReadConfig(shuffle_seed=file_shuffle_seed) read_config.options.threading.private_threadpool_size = 48 read_config.options.threading.max_intra_op_parallelism = 1 read_config.options.deterministic = True ds = tfds.load( name="imagenet2012:5.*.*", shuffle_files=shuffle_files, split=tfds_split, decoders={"image": tfds.decode.SkipDecoding()}, data_dir=data_dir, read_config=read_config, ) if is_training: if cache: ds = ds.cache() ds = ds.repeat() if shuffle: ds = ds.shuffle(buffer_size=buffer_size_factor * total_batch_size, seed=shuffle_seed) elif num_examples % total_batch_size != 0: # If the dataset is not divisible by the batch size then just randomize if shuffle: ds = ds.shuffle(buffer_size=buffer_size_factor * total_batch_size, seed=shuffle_seed) if is_training: rng = jax.random.PRNGKey(preprocess_seed) tf_seed = tf.convert_to_tensor(rng, dtype=tf.int32) # When training we generate a stateless pipeline, at test we don't need it def scan_fn( seed_: tf.Tensor, data: TfBatch, ) -> Tuple[tf.Tensor, Tuple[TfBatch, tf.Tensor]]: new_seeds = tf.random.experimental.stateless_split(seed_, num=2) return new_seeds[0], (data, new_seeds[1]) # create a sequence of seeds across cases by repeated splitting ds = ds.scan(tf_seed, scan_fn) def preprocess( example: Dict[str, tf.Tensor], seed_: Optional[tf.Tensor] = None ) -> Dict[str, tf.Tensor]: image = _imagenet_preprocess_image( image_bytes=example["image"], seed=seed_, is_training=is_training, image_size=image_size ) label = tf.cast(example["label"], tf.int32) if extra_preprocessing_func is not None: image, label = extra_preprocessing_func(image, label) return {"images": image, "labels": label} ds = ds.map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE) def cast_fn(batch_): tf_dtype = (tf.bfloat16 if dtype == jnp.bfloat16 else tf.dtypes.as_dtype(dtype)) batch_ = dict(**batch_) batch_["images"] = tf.cast(batch_["images"], tf_dtype) return batch_ for i, batch_size in enumerate(reversed(batch_dims)): ds = ds.batch(batch_size, drop_remainder=not is_training) if i == 0: # NOTE: You may be tempted to move the casting earlier on in the pipeline, # but for bf16 some operations will end up silently placed on the TPU and # this causes stalls while TF and JAX battle for the accelerator. ds = ds.map(cast_fn) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return iterator_on_device(iter(tensorflow_datasets.as_numpy(ds))) def _imagenet_preprocess_image( image_bytes: tf.Tensor, seed: tf.Tensor, is_training: bool, image_size: Shape, ) -> tf.Tensor: """Returns processed and resized images for Imagenet.""" if is_training: seeds = tf.random.experimental.stateless_split(seed, num=2) # Random cropping of the image image = _decode_and_random_crop( image_bytes, seed=seeds[0], image_size=image_size) # Random left-right flipping image = tf.image.stateless_random_flip_left_right(image, seed=seeds[1]) else: image = _decode_and_center_crop(image_bytes, image_size=image_size) assert image.dtype == tf.uint8 # NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without # clamping overshoots. This means values returned will be outside the range # [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]). image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC) # Normalize image mean = tf.constant(_IMAGENET_MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype) std = tf.constant(_IMAGENET_STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype) return (image - mean * 255) / (std * 255) def _distorted_bounding_box_crop( image_bytes: tf.Tensor, jpeg_shape: tf.Tensor, bbox: tf.Tensor, seed: tf.Tensor, min_object_covered: float, aspect_ratio_range: Tuple[float, float], area_range: Tuple[float, float], max_attempts: int, ) -> tf.Tensor: """Generates cropped_image using one of the bboxes randomly distorted for Imagenet.""" bbox_begin, bbox_size, _ = tf.image.stateless_sample_distorted_bounding_box( image_size=jpeg_shape, bounding_boxes=bbox, seed=seed, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True ) # Crop the image to the specified bounding box. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) return tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) def _decode_and_random_crop( image_bytes: tf.Tensor, seed: tf.Tensor, image_size: Shape = (224, 224), ) -> tf.Tensor: """Make a random crop of 224 for Imagenet.""" jpeg_shape = tf.image.extract_jpeg_shape(image_bytes) bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = _distorted_bounding_box_crop( image_bytes=image_bytes, jpeg_shape=jpeg_shape, bbox=bbox, seed=seed, min_object_covered=0.1, aspect_ratio_range=(3 / 4, 4 / 3), area_range=(0.08, 1.0), max_attempts=10) if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))): # If the random crop failed fall back to center crop. image = _decode_and_center_crop(image_bytes, jpeg_shape, image_size) return image def _decode_and_center_crop( image_bytes: tf.Tensor, jpeg_shape: Optional[tf.Tensor] = None, image_size: Shape = (224, 224), ) -> tf.Tensor: """Crops to center of image with padding then scales for Imagenet.""" if jpeg_shape is None: jpeg_shape = tf.image.extract_jpeg_shape(image_bytes) image_height = jpeg_shape[0] image_width = jpeg_shape[1] # Pad the image with at least 32px on the short edge and take a # crop that maintains aspect ratio. scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32), tf.cast(image_width, tf.float32) / (image_size[1] + 32)) padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32) padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32) offset_height = ((image_height - padded_center_crop_height) + 1) // 2 offset_width = ((image_width - padded_center_crop_width) + 1) // 2 crop_window = tf.stack([offset_height, offset_width, padded_center_crop_height, padded_center_crop_width]) return tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) def _imagenet_distort_color( image: tf.Tensor, seed: tf.Tensor, color_ordering: int = 0, ) -> tf.Tensor: """Randomly distorts colors for Imagenet.""" seeds = tf.random.experimental.stateless_split(seed, num=4) if color_ordering == 0: image = tf.image.stateless_random_brightness( image, max_delta=32. / 255., seed=seeds[0]) image = tf.image.stateless_random_saturation( image, lower=0.5, upper=1.5, seed=seeds[1]) image = tf.image.stateless_random_hue( image, max_delta=0.2, seed=seeds[2]) image = tf.image.stateless_random_contrast( image, lower=0.5, upper=1.5, seed=seeds[3]) elif color_ordering == 1: image = tf.image.stateless_random_brightness( image, max_delta=32. / 255., seed=seeds[0]) image = tf.image.stateless_random_contrast( image, lower=0.5, upper=1.5, seed=seeds[1]) image = tf.image.stateless_random_saturation( image, lower=0.5, upper=1.5, seed=seeds[2]) image = tf.image.stateless_random_hue( image, max_delta=0.2, seed=seeds[3]) else: raise ValueError("color_ordering must be in {0, 1}") # The random_* ops do not necessarily clamp. return tf.clip_by_value(image, 0.0, 1.0)
kfac-jax-main
examples/datasets.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for setting up different optimizers.""" import functools from typing import Any, Callable, Dict, Iterator, Mapping, NamedTuple, Optional, Sequence, Tuple, Type, Union from absl import logging import jax from jax import lax import jax.numpy as jnp import kfac_jax from ml_collections import config_dict import optax Array = kfac_jax.utils.Array Numeric = kfac_jax.utils.Numeric PRNGKey = kfac_jax.utils.PRNGKey Params = kfac_jax.utils.Params Batch = kfac_jax.utils.Batch FuncState = kfac_jax.utils.FuncState OptaxState = kfac_jax.utils.ArrayTree ValueFunc = kfac_jax.optimizer.ValueFunc FuncArgsVariants = kfac_jax.optimizer.FuncArgsVariants ScheduleType = kfac_jax.optimizer.ScheduleType OptaxCtor = Callable[[ScheduleType], optax.GradientTransformation] EstimatorState = kfac_jax.curvature_estimator.BlockDiagonalCurvature.State EmptyState = optax.EmptyState class PreconditionState(NamedTuple): count: Array estimator_state: EstimatorState class Preconditioner: """An Optax-compatible K-FAC preconditioner.""" def __init__( self, value_func: ValueFunc, l2_reg: Numeric = 0.0, damping: Optional[float] = None, damping_schedule: Optional[ScheduleType] = None, norm_constraint: Optional[Numeric] = None, estimation_mode: str = "fisher_gradients", curvature_ema: Numeric = 0.95, curvature_update_period: int = 1, inverse_update_period: int = 5, use_exact_inverses: bool = False, register_only_generic: bool = False, patterns_to_skip: Sequence[str] = (), auto_register_kwargs: Optional[Dict[str, Any]] = None, layer_tag_to_block_ctor: Optional[ Dict[str, kfac_jax.curvature_estimator.CurvatureBlockCtor] ] = None, pmap_axis_name: str = "kfac_axis", batch_size_extractor: Callable[ [Batch], Numeric ] = kfac_jax.utils.default_batch_size_extractor, distributed_inverses: bool = True, distributed_precon_apply: bool = True, num_samples: int = 1, should_vmap_samples: bool = False, ): """Initializes the curvature estimator and preconditioner. Args: value_func: Callable. The function should return the value of the loss to be optimized. l2_reg: Scalar. Set this value to tell the optimizer what L2 regularization coefficient you are using (if any). Note the coefficient appears in the regularizer as ``coeff / 2 * sum(param**2)``. This adds an additional diagonal term to the curvature and hence will affect the quadratic model when using adaptive damping. Note that the user is still responsible for adding regularization to the loss. (Default: ``0.``) damping: Scalar. The fixed damping that will be used throughput the lifespan of Preconditioner. (Default: ``None``) damping_schedule: Callable. A schedule for the damping. This should take as input the current step number and return a single array that represents the learning rate. (Default: ``None``) norm_constraint: Scalar. If specified, the update is scaled down so that its approximate squared Fisher norm ``v^T F v`` is at most the specified value. (Note that here ``F`` is the approximate curvature matrix, not the exact.) (Default: ``None``) estimation_mode: String. The type of estimator to use for the curvature matrix. See the documentation for :class:`~CurvatureEstimator` for a detailed description of the possible options. (Default: ``fisher_gradients``). curvature_ema: The decay factor used when calculating the covariance estimate moving averages. (Default: ``0.95``) curvature_update_period: Int. The number of steps in between updating the the curvature estimates. (Default: ``1``) inverse_update_period: Int. The number of steps in between updating the the computation of the inverse curvature approximation. (Default: ``5``) use_exact_inverses: Bool. If ``True``, preconditioner inverses are computed "exactly" without the pi-adjusted factored damping approach. Note that this involves the use of eigendecompositions, which can sometimes be much more expensive. (Default: ``False``) register_only_generic: Boolean. Whether when running the auto-tagger to register only generic parameters, or allow it to use the graph matcher to automatically pick up any kind of layer tags. (Default: ``False``) patterns_to_skip: Tuple. A list of any patterns that should be skipped by the graph matcher when auto-tagging. (Default: ``()``) auto_register_kwargs: Any additional kwargs to be passed down to :func:`~auto_register_tags`, which is called by the curvature estimator. (Default: ``None``) layer_tag_to_block_ctor: Dictionary. A mapping from layer tags to block classes which to override the default choices of block approximation for that specific tag. See the documentation for :class:`~CurvatureEstimator` for a more detailed description. (Default: ``None``) pmap_axis_name: String. The name of the pmap axis to use when ``multi_device`` is set to True. (Default: ``kfac_axis``) batch_size_extractor: A function that takes as input the function arguments and returns the batch size for a single device. (Default: ``kfac.utils.default_batch_size_extractor``) distributed_inverses: Boolean. Whether to distribute the inverse computations (required to compute the preconditioner) across the different devices in a layer-wise fashion. If False, each device will (redundantly) perform the required computations for all of the layers. (Default: True) distributed_precon_apply: Boolean. Whether to distribute the application of the preconditioner across the different devices in a layer-wise fashion. If False, each device will (redundantly) perform the required operations for all of the layers. (Default: True) num_samples: Number of samples (per case) to use when computing stochastic curvature matrix estimates. This option is only used when ``estimation_mode == 'fisher_gradients'`` or ``estimation_mode == '[fisher,ggn]_curvature_prop'``. (Default: 1) should_vmap_samples: Whether to use ``jax.vmap`` to compute samples when ``num_samples > 1``. (Default: False) """ self._l2_reg = l2_reg self._damping = damping self._damping_schedule = damping_schedule if (self._damping_schedule is None) == (self._damping is None): raise ValueError( "Only one of `damping_schedule` or `damping` has to be specified." ) self._norm_constraint = norm_constraint self._curvature_ema = curvature_ema self._curvature_update_period = curvature_update_period self._inverse_update_period = inverse_update_period self._pmap_axis_name = pmap_axis_name self._batch_size_extractor = batch_size_extractor self._use_cached_inverses = self._inverse_update_period != 1 self._use_exact_inverses = use_exact_inverses # Curvature estimator self._estimator = kfac_jax.curvature_estimator.BlockDiagonalCurvature( func=value_func, default_estimation_mode=estimation_mode, params_index=0, layer_tag_to_block_ctor=layer_tag_to_block_ctor, register_only_generic=register_only_generic, patterns_to_skip=patterns_to_skip, distributed_multiplies=distributed_precon_apply, distributed_cache_updates=distributed_inverses, num_samples=num_samples, should_vmap_samples=should_vmap_samples, **(auto_register_kwargs or {}), ) def init( self, func_args: FuncArgsVariants, rng: PRNGKey, ) -> PreconditionState: """Initializes the preconditioner and returns the state.""" return PreconditionState( count=jnp.array(0, dtype=jnp.int32), estimator_state=self.estimator.init( rng=rng, func_args=func_args, exact_powers_to_cache=self._exact_powers_to_cache, approx_powers_to_cache=self._approx_powers_to_cache, cache_eigenvalues=False, ), ) @property def _exact_powers_to_cache(self) -> Optional[Union[int, Sequence[int]]]: if self._use_exact_inverses and self._use_cached_inverses: return -1 else: return None @property def _approx_powers_to_cache(self) -> Optional[Union[int, Sequence[int]]]: if not self._use_exact_inverses and self._use_cached_inverses: return -1 else: return None @property def estimator(self) -> kfac_jax.curvature_estimator.BlockDiagonalCurvature: """The underlying curvature estimator used by the preconditioner.""" return self._estimator @property def pmap_axis_name(self): return self._pmap_axis_name def get_identity_weight( self, state: PreconditionState ) -> Union[Array, float]: damping = self._damping if damping is None: damping = self._damping_schedule(state.count) return damping + self._l2_reg def sync_estimator_state( self, state: PreconditionState, ) -> PreconditionState: """Syncs the estimator state.""" return PreconditionState( count=state.count, estimator_state=self.estimator.sync( state.estimator_state, pmap_axis_name=self.pmap_axis_name), ) def should_update_estimate_curvature( self, state: PreconditionState ) -> Union[Array, bool]: """Whether at the current step the preconditioner should update the curvature estimates.""" if self._curvature_update_period == 1: return True return state.count % self._curvature_update_period == 0 def should_sync_estimate_curvature( self, state: PreconditionState ) -> Union[Array, bool]: """Whether at the current step the preconditioner should synchronize (pmean) the curvature estimates.""" # sync only before inverses are calculated (either for updating the # cache or for preconditioning). if not self._use_cached_inverses: return True return self.should_update_inverse_cache(state) def should_update_inverse_cache( self, state: PreconditionState ) -> Union[Array, bool]: """Whether at the current step the preconditioner should update the inverse cache.""" if not self._use_cached_inverses: return False return state.count % self._inverse_update_period == 0 def maybe_update( self, state: PreconditionState, func_args: FuncArgsVariants, rng: PRNGKey, ) -> PreconditionState: """Updates the estimates if it is the right iteration.""" # NOTE: This maybe update curvatures and inverses at an iteration. But # if curvatures should be accumulated for multiple iterations # before updating inverses (for micro-batching), call # `maybe_update_estimator_curvature` and `maybe_update_inverse_cache` # separately, instead of calling this method. state = self.maybe_update_estimator_curvature( state=state, func_args=func_args, rng=rng, sync=self.should_sync_estimate_curvature(state), ) state = self.maybe_update_inverse_cache(state) return PreconditionState(state.count, state.estimator_state) def _update_estimator_curvature( self, estimator_state: EstimatorState, func_args: FuncArgsVariants, rng: PRNGKey, ema_old: Numeric, ema_new: Numeric, sync: Union[Array, bool] = True ) -> EstimatorState: """Updates the curvature estimator state.""" state = self.estimator.update_curvature_matrix_estimate( state=estimator_state, ema_old=ema_old, ema_new=ema_new, # Note that the batch is always the last entry of FuncArgsVariantsdef batch_size=self._batch_size_extractor(func_args[-1]), rng=rng, func_args=func_args, ) return jax.lax.cond( sync, functools.partial(self.estimator.sync, pmap_axis_name=self.pmap_axis_name), lambda state_: state_, state, ) def maybe_update_estimator_curvature( self, state: PreconditionState, func_args: FuncArgsVariants, rng: PRNGKey, decay_old_ema: Union[Array, bool] = True, sync: Union[Array, bool] = True, ) -> PreconditionState: """Updates the curvature estimates if it is the right iteration.""" ema_old = decay_old_ema * self._curvature_ema + (1.0 - decay_old_ema) * 1.0 return self._maybe_update_estimator_state( state, self.should_update_estimate_curvature(state), self._update_estimator_curvature, func_args=func_args, rng=rng, ema_old=ema_old, ema_new=1.0, sync=sync, ) def maybe_update_inverse_cache( self, state: PreconditionState, ) -> PreconditionState: """Updates the estimator state cache if it is the right iteration.""" if state.count is None: raise ValueError( "PreconditionState is not initialized. Call" " `maybe_update_estimator_curvature` first." ) return self._maybe_update_estimator_state( state, self.should_update_inverse_cache(state), self.estimator.update_cache, identity_weight=self.get_identity_weight(state), exact_powers=self._exact_powers_to_cache, approx_powers=self._approx_powers_to_cache, eigenvalues=False, pmap_axis_name=self.pmap_axis_name, ) def _maybe_update_estimator_state( self, state: PreconditionState, should_update: Union[Array, bool], update_func: Callable[..., EstimatorState], **update_func_kwargs, ) -> PreconditionState: """Updates the estimator state if it should update.""" estimator_state = lax.cond( should_update, functools.partial(update_func, **update_func_kwargs), lambda s: s, state.estimator_state, ) return PreconditionState(state.count, estimator_state) def apply( self, updates: optax.Updates, state: PreconditionState, ) -> optax.Updates: """Preconditions (= multiplies the inverse curvature estimation matrix to) updates.""" new_updates = self.estimator.multiply_inverse( state=state.estimator_state, parameter_structured_vector=updates, identity_weight=self.get_identity_weight(state), exact_power=self._use_exact_inverses, use_cached=self._use_cached_inverses, pmap_axis_name=self.pmap_axis_name, ) if self._norm_constraint is not None: sq_norm_grads = kfac_jax.utils.inner_product(new_updates, updates) del updates max_coefficient = jnp.sqrt(self._norm_constraint / sq_norm_grads) coeff = jnp.minimum(max_coefficient, 1) new_updates = kfac_jax.utils.scalar_mul(new_updates, coeff) else: del updates return new_updates def multiply_curvature( self, updates: optax.Updates, state: PreconditionState, ) -> optax.Updates: """Multiplies the (non-inverse) curvature estimation matrix to updates.""" # NOTE: Currently, `exact_power` and `use_cached` arguments are not used # in `self.estimator.multiply()`, and the exact power (of 1) is always used. # Therefore, the way `identity_weight` (damping) is used with # `estimator.multiply()` is different from how it's used in # `estimator.multiply_inverse()` (in `Preconditioner.apply()`) when # `use_exact_inverses == False` (default). In particular, the former uses # non-factored damping while the latter uses factored one, and the two are # NOT the exact inverses of each other. updates = self.estimator.multiply( state=state.estimator_state, parameter_structured_vector=updates, identity_weight=self.get_identity_weight(state), exact_power=self._use_exact_inverses, # this argument will not be used. use_cached=self._use_cached_inverses, # this argument will not be used. pmap_axis_name=self.pmap_axis_name, ) return updates def as_gradient_transform( self, use_inverse: bool = True ) -> optax.GradientTransformationExtraArgs: """Multiplies the inverse or non-inverse curvature estimation matrix to updates.""" def init_fn(params): del params return EmptyState() multiply_fn = self.apply if use_inverse else self.multiply_curvature def update_fn( updates, state, params=None, *, precond_state: PreconditionState, **extra_args, ): del params, extra_args updates = multiply_fn(updates, precond_state) return updates, state return optax.GradientTransformationExtraArgs(init_fn, update_fn) def increment_count(self, state: PreconditionState): count_inc = optax.safe_int32_increment(state.count) return PreconditionState(count_inc, state.estimator_state) class OptaxAndPreconditionState(NamedTuple): optax_state: OptaxState precond_state: Optional[PreconditionState] = None class OptaxWrapper: """Wrapper class for Optax optimizers to have the same interface as KFAC.""" def __init__( self, value_and_grad_func: kfac_jax.optimizer.ValueAndGradFunc, value_func_has_aux: bool, value_func_has_state: bool, value_func_has_rng: bool, learning_rate: ScheduleType, optax_optimizer_ctor: OptaxCtor, batch_process_func: Callable[[Batch], Batch] = lambda x: x, preconditioner: Optional[Preconditioner] = None, include_norms_in_stats: bool = False, include_per_param_norms_in_stats: bool = False, ): """Initializes the Optax wrapper. Args: value_and_grad_func: Python callable. The function should return the value of the loss to be optimized and its gradients. If the argument `value_func_has_aux` is `False` then the interface should be: loss, loss_grads = value_and_grad_func(params, batch) If `value_func_has_aux` is `True` then the interface should be: (loss, aux), loss_grads = value_and_grad_func(params, batch) value_func_has_aux: Boolean. Specifies whether the provided callable `value_and_grad_func` returns the loss value only, or also some auxiliary data. (Default: `False`) value_func_has_state: Boolean. Specifies whether the provided callable `value_and_grad_func` has a persistent state that is inputted and it also outputs an update version of it. (Default: `False`) value_func_has_rng: Boolean. Specifies whether the provided callable `value_and_grad_func` additionally takes as input an rng key. (Default: `False`) learning_rate: The learning rate or learning rate schedule. optax_optimizer_ctor: A callable that takes the learning rate schedule as an input and returns the optax optimizer. batch_process_func: Callable. A function which to be called on each batch before feeding to the KFAC on device. This could be useful for specific device input optimizations. (Default: `lambda x: x`) preconditioner: The optax-compatible K-FAC preconditioner. include_norms_in_stats: Boolean. It True, the vector norms of the gradient, preconditioned gradient, and parameter update are included in the statistics returned by the step function. (Default: ``False``) include_per_param_norms_in_stats: Boolean. It True, the per-parameter vector norms of the gradient, preconditioned gradient, and parameter update are included in the statistics returned by the step function. (Default: ``False``) """ self._value_and_grad_func = value_and_grad_func self._value_func_has_aux = value_func_has_aux self._value_func_has_state = value_func_has_state self._value_func_has_rng = value_func_has_rng if not callable(learning_rate): self._learning_rate = lambda _: learning_rate else: self._learning_rate = learning_rate # Wraps the optax optimizer (gradient transformation), so that it ignores # extra args (i.e. `precond_state` for preconditioner) if not needed. self._optax_optimizer = optax.with_extra_args_support( optax_optimizer_ctor(self._learning_rate) ) self._preconditioner = preconditioner self._include_norms_in_stats = include_norms_in_stats self._include_per_param_norms_in_stats = include_per_param_norms_in_stats self._batch_process_func = batch_process_func or (lambda x: x) self.pmap_axis_name = ( "optax_axis" if self._preconditioner is None else self._preconditioner.pmap_axis_name ) self._pmap_step = jax.pmap( self._step, axis_name=self.pmap_axis_name, donate_argnums=list(range(5)), in_axes=(0,) * 5 + (None,), ) self._pmap_init = jax.pmap( lambda p, *_: OptaxAndPreconditionState(self._optax_optimizer.init(p)), axis_name=self.pmap_axis_name, ) if self._preconditioner is not None: if not isinstance(self._preconditioner, Preconditioner): raise ValueError( "preconditioner must be a {}, but {} is given.".format( Preconditioner, type(self._preconditioner) ) ) preconditioner: Preconditioner = self._preconditioner def _init_preconditioner( params: Params, rng: PRNGKey, batch: Batch, func_state: Optional[FuncState] = None, ) -> PreconditionState: """Maybe initializes the PreconditionState.""" batch = self._batch_process_func(batch) func_args = kfac_jax.optimizer.make_func_args( params, func_state, rng, batch, has_state=self._value_func_has_state, has_rng=self._value_func_has_rng, ) return preconditioner.init(func_args, rng) self._pmap_init_preconditioner = jax.pmap( _init_preconditioner, axis_name=self.pmap_axis_name, ) def init( self, params: Params, rng: PRNGKey, batch: Batch, func_state: Optional[FuncState] = None, ) -> OptaxAndPreconditionState: """Initializes the optimizer and returns the appropriate optimizer state.""" return self._pmap_init(params, rng, batch, func_state) def _step( self, params: Params, state: OptaxAndPreconditionState, rng: PRNGKey, batch: Batch, func_state: Optional[FuncState] = None, global_step_int: Optional[int] = None, ) -> Union[ Tuple[Params, OptaxAndPreconditionState, FuncState, Mapping[str, Array]], Tuple[Params, OptaxAndPreconditionState, Mapping[str, Array]], ]: """A single step of optax.""" batch = self._batch_process_func(batch) func_args = kfac_jax.optimizer.make_func_args( params, func_state, rng, batch, has_state=self._value_func_has_state, has_rng=self._value_func_has_rng ) optax_state, precond_state = state.optax_state, state.precond_state if self._preconditioner is not None: precond_state = self._preconditioner.maybe_update( precond_state, func_args, rng, ) precond_state = self._preconditioner.increment_count(precond_state) out, grads = self._value_and_grad_func(*func_args) loss, new_func_state, stats = kfac_jax.optimizer.extract_func_outputs( out, has_aux=self._value_func_has_aux, has_state=self._value_func_has_state, ) loss, stats, grads = kfac_jax.utils.pmean_if_pmap( # pytype: disable=wrong-keyword-args (loss, stats, grads), axis_name=self.pmap_axis_name ) stats = stats or {} stats["loss"] = loss # Compute and apply updates via our optimizer. updates, new_optax_state = self._optax_optimizer.update( grads, optax_state, params, precond_state=precond_state, ) new_state = OptaxAndPreconditionState(new_optax_state, precond_state) new_params = optax.apply_updates(params, updates) # Add step and batch size batch_size = jax.tree_util.tree_leaves(batch)[0].shape[0] stats["step"] = global_step_int + 1 stats["batch_size"] = batch_size * jax.device_count() stats["data_seen"] = stats["step"] * stats["batch_size"] stats["learning_rate"] = self._learning_rate(global_step_int) if self._include_norms_in_stats: stats["grad_norm"] = kfac_jax.utils.norm(grads) stats["update_norm"] = kfac_jax.utils.norm(updates) stats["param_norm"] = kfac_jax.utils.norm(params) stats["rel_grad_norm"] = stats["grad_norm"] / stats["param_norm"] stats["rel_update_norm"] = stats["update_norm"] / stats["param_norm"] if self._include_per_param_norms_in_stats: stats.update(kfac_jax.utils.per_parameter_norm(grads, "grad_norm")) stats.update(kfac_jax.utils.per_parameter_norm(updates, "update_norm")) param_norms = kfac_jax.utils.per_parameter_norm(params, "param_norm") for key in param_norms: norm = param_norms[key] stats[key] = norm grad_key = key.replace("param", "grad") stats["rel_" + grad_key] = stats[grad_key] / norm upd_key = key.replace("param", "update") stats["rel_" + upd_key] = stats[upd_key] / norm if self._value_func_has_state: return new_params, new_state, new_func_state, stats else: return new_params, new_state, stats def step( self, params: Params, state: OptaxAndPreconditionState, rng: PRNGKey, data_iterator: Iterator[Batch], func_state: Optional[FuncState] = None, global_step_int: Optional[int] = None, ) -> Union[ Tuple[Params, Any, FuncState, Mapping[str, Array]], Tuple[Params, Any, Mapping[str, Array]], ]: """A step with similar interface to KFAC.""" batch = next(data_iterator) if self._preconditioner is not None and state.precond_state is None: precond_state = self._pmap_init_preconditioner( params, rng, batch, func_state ) state = OptaxAndPreconditionState(state.optax_state, precond_state) return self._pmap_step( params, state, rng, batch, func_state, global_step_int, ) def tf1_rmsprop( learning_rate_fn: Callable[[Numeric], Numeric], decay: float = .9, momentum: float = 0., epsilon: float = 1e-8 ) -> optax.GradientTransformation: """RMSProp update equivalent to tf.compat.v1.train.RMSPropOptimizer.""" def tf1_scale_by_rms(decay_=0.9, epsilon_=1e-8): """Same as optax.scale_by_rms, but initializes second moment to one.""" def init_fn(params): nu = jax.tree_util.tree_map(jnp.ones_like, params) # second moment return optax.ScaleByRmsState(nu=nu) def _update_moment(updates, moments, decay, order): return jax.tree_util.tree_map( lambda g, t: (1 - decay) * (g ** order) + decay * t, updates, moments) def update_fn(updates, state, params=None): del params nu = _update_moment(updates, state.nu, decay_, 2) updates = jax.tree_util.tree_map( lambda g, n: g / (jnp.sqrt(n + epsilon_)), updates, nu) return updates, optax.ScaleByRmsState(nu=nu) return optax.GradientTransformation(init_fn, update_fn) return optax.chain( tf1_scale_by_rms(decay_=decay, epsilon_=epsilon), optax.trace(decay=momentum, nesterov=False), optax.scale_by_schedule(learning_rate_fn), optax.scale(-1.)) def linear_interpolation( x: Numeric, interpolation_points: Tuple[Tuple[float, float], ...] ) -> Array: """Performs linear interpolation between the interpolation points.""" xs, ys = zip(*interpolation_points) masks = [x < ci for ci in xs[1:]] min_iter = jnp.zeros_like(x) max_iter = jnp.zeros_like(x) max_val = jnp.zeros_like(x) min_val = jnp.zeros_like(x) p = jnp.ones_like(x) for i in range(len(masks) - 1): pi = p * masks[i] min_iter = pi * xs[i] + (1 - pi) * min_iter max_iter = pi * xs[i + 1] + (1 - pi) * max_iter max_val = pi * ys[i] + (1 - pi) * max_val min_val = pi * ys[i + 1] + (1 - pi) * min_val p = p * (1 - masks[i]) min_iter = p * xs[-2] + (1 - p) * min_iter max_iter = p * xs[-1] + (1 - p) * max_iter max_val = p * ys[-2] + (1 - p) * max_val min_val = p * ys[-1] + (1 - p) * min_val diff = (min_val - max_val) progress = (x - min_iter) / (max_iter - min_iter - 1) return max_val + diff * jnp.minimum(progress, 1.0) def imagenet_sgd_schedule( global_step: Numeric, dataset_size: int, train_total_batch_size: Optional[int], **_: Any, ) -> Array: """Standard linear scaling schedule for ImageNet.""" if train_total_batch_size is None: raise ValueError("Batch size must be known.") # Can be found in Section 5.1 of https://arxiv.org/pdf/1706.02677.pdf steps_per_epoch = dataset_size / train_total_batch_size current_epoch = global_step / steps_per_epoch lr = (0.1 * train_total_batch_size) / 256 lr_linear_till = 5 boundaries = jnp.array((30, 60, 80)) * steps_per_epoch values = jnp.array([1., 0.1, 0.01, 0.001]) * lr index = jnp.sum(boundaries < global_step) lr = jnp.take(values, index) return lr * jnp.minimum(1., current_epoch / lr_linear_till) def fixed_schedule( global_step: Numeric, value: Numeric, **_: Any, ) -> Array: """Fixed/constant schedule.""" return jnp.ones_like(global_step) * value def kfac_resnet50_schedule( global_step: Numeric, **_: Any, ) -> Array: """Custom schedule for KFAC.""" return jnp.power(10.0, linear_interpolation( x=global_step, interpolation_points=( (0, -6), (50, -3.1), (5000, -3.1), (11000, -3.23), (20000, -5.0), (200000, -5.7), (1000001, -6)) )) # TODO(jamesmartens,kazukiosawa,botev): Some possible future improvements to # the schedules code: # - Put the logic to calculate "warmup_data" (or "warmup_steps") and # "total_data" (or "total_steps") in a place so that we can apply warmup to # an arbitrary schedule. # - Use existing `optax.schedule` operations (e.g. `exponential_decay`, # `piecewise_constant_schedule`) as much as possible to make the kfac_jax # codebase simple and compact. # - Optax's `warmup_cosine_decay_schedule` and # `warmup_exponential_decay_schedule` are implemented by simply combining # `linear_schedule` and the corresponding schedule. So we can prepare a # general warmup scheduler factory that returns a combination of `linear_ # schedule` and the given base scheduler based on the arguments e.g. warmup_ # steps. # TODO(jamesmartens,kazukiosawa,botev): change these argument names to be not be # specific to learning rates. def cosine_schedule( global_step: Numeric, dataset_size: int, train_total_batch_size: Optional[int], total_steps: Optional[int], total_epochs: Optional[float], peak_learning_rate: float, initial_learning_rate: float = 1e-7, end_learning_rate: float = 0.0, warmup_epochs: Optional[float] = None, warmup_steps: Optional[int] = None, warmup_fraction: Optional[float] = None, data_seen: Optional[Numeric] = None, **_: Any, ) -> Numeric: """A cosine schedule described in the TAT paper.""" if (total_steps is None) == (total_epochs is None): raise ValueError("Exactly one of `total_steps` and `total_epochs` must be " "set.") n = sum(x is not None for x in [warmup_epochs, warmup_steps, warmup_fraction]) if n != 1: raise ValueError(f"Exactly one of warmup_steps={warmup_steps}, " f"warmup_epochs={warmup_epochs} and warmup_fraction=" f"{warmup_fraction} must be set.") if warmup_epochs is not None or total_epochs is not None: if data_seen is None: if train_total_batch_size is not None: data_seen = global_step * train_total_batch_size else: raise ValueError("One of 'train_total_batch_size' or 'data_seen' must " "passed when 'total_epochs' or 'warmup_epochs' are " "passed.") if ((warmup_epochs is None or total_epochs is None) and train_total_batch_size is None): raise ValueError("'train_total_batch_size' must be passed if only one of " "'total_epochs' or 'warmup_epochs' are passed.") if warmup_epochs is not None: warmup_data = warmup_epochs * dataset_size elif warmup_fraction is not None: warmup_data = warmup_fraction * total_steps * train_total_batch_size else: warmup_data = warmup_steps * train_total_batch_size if total_epochs is not None: total_data = total_epochs * dataset_size else: total_data = total_steps * train_total_batch_size # Optax uses chex which has an inconsistent definition of "Numeric" from # what we use here. return optax.warmup_cosine_decay_schedule( # pytype: disable=bad-return-type init_value=initial_learning_rate, peak_value=peak_learning_rate, end_value=end_learning_rate, warmup_steps=warmup_data, decay_steps=total_data, )(data_seen) else: if warmup_fraction is not None: warmup_steps = warmup_fraction * total_steps # Optax uses chex which has an inconsistent definition of "Numeric" from # what we use here. return optax.warmup_cosine_decay_schedule( # pytype: disable=bad-return-type init_value=initial_learning_rate, peak_value=peak_learning_rate, end_value=end_learning_rate, warmup_steps=warmup_steps, decay_steps=total_steps, )(global_step) # TODO(jamesmartens,kazukiosawa,botev): change these argument names to be not be # specific to learning rates. Also, initial_learning_rate is misnamed since this # is value is never actually used, but is just a "base" multiplying for the # decay factors. def stepwise_schedule( global_step: Numeric, dataset_size: int, train_total_batch_size: Optional[int], lr_decay_factors: Sequence[float], initial_learning_rate: float, epoch_boundaries: Optional[Sequence[float]] = None, warmup_epochs: Optional[float] = None, step_boundaries: Optional[Sequence[float]] = None, warmup_steps: Optional[int] = None, data_seen: Optional[Numeric] = None, **_: Any, ) -> Numeric: """A basic stepwise schedule.""" if (epoch_boundaries is None) == (step_boundaries is None): raise ValueError("Exactly one of `epoch_boundaries` and `step_boundaries` " "can must be passed.") if (warmup_epochs is None) == (warmup_steps is None): raise ValueError("Exactly one of `warmup_epochs` and `warmup_steps` must " "be set.") values = jnp.array(lr_decay_factors) * initial_learning_rate if warmup_epochs is not None or epoch_boundaries is not None: if data_seen is None: if train_total_batch_size is not None: data_seen = global_step * train_total_batch_size else: raise ValueError("One of 'train_total_batch_size' or 'data_seen' must " "passed when 'epoch_boundaries' or 'warmup_epochs' " "are passed.") if ((warmup_epochs is None or epoch_boundaries is None) and train_total_batch_size is None): raise ValueError("'train_total_batch_size' must be passed if only one of " "'epoch_boundaries' or 'warmup_epochs' are passed.") if warmup_epochs is not None: warmup_data = warmup_epochs * dataset_size else: warmup_data = warmup_steps * train_total_batch_size if epoch_boundaries is not None: data_boundaries = jnp.array(epoch_boundaries) * dataset_size else: data_boundaries = jnp.array(step_boundaries) * train_total_batch_size index = jnp.sum(data_boundaries <= data_seen) value = jnp.take(values, index) return value * jnp.minimum(1., data_seen / warmup_data) else: step_boundaries = jnp.array(step_boundaries) index = jnp.sum(step_boundaries <= global_step) value = jnp.take(values, index) return value * jnp.minimum(1., global_step / warmup_steps) def exponential_decay_schedule( global_step: int, dataset_size: int, train_total_batch_size: Optional[int], total_steps: Optional[int], total_epochs: Optional[float], init_value: float, end_value: float, decay_epochs: Optional[float] = None, decay_steps: Optional[int] = None, decay_fraction: Optional[float] = None, **_: Any, ): """Exponential decay schedule.""" if (total_steps is None) == (total_epochs is None): raise ValueError("Only one of `steps` and `epochs` can be set.") n = sum(x is not None for x in [decay_epochs, decay_steps, decay_fraction]) if n != 1: raise ValueError( f"Exactly one of warmup_steps={decay_steps}, " f"warmup_epochs={decay_epochs} and warmpu_fraction=" f"{decay_fraction} must be set." ) if ( decay_epochs is not None or total_epochs is not None ) and train_total_batch_size is None: raise ValueError( "Batch size must be known when passing epochs or warmup_epochs." ) if decay_epochs is not None: decay_steps = decay_epochs * dataset_size / train_total_batch_size elif decay_fraction is not None: decay_steps = decay_fraction * total_steps return optax.exponential_decay( init_value=init_value, end_value=end_value, decay_rate=end_value / init_value, transition_steps=decay_steps, )(global_step) def construct_schedule( name: str, **kwargs, ) -> Callable[[Numeric], Array]: """Constructs the actual schedule from its name and extra kwargs.""" if name == "fixed": return functools.partial(fixed_schedule, **kwargs) elif name == "imagenet_sgd": return functools.partial(imagenet_sgd_schedule, **kwargs) elif name == "kfac_resnet50": return functools.partial(kfac_resnet50_schedule, **kwargs) elif name == "cosine": return functools.partial(cosine_schedule, **kwargs) elif name == "stepwise": return functools.partial(stepwise_schedule, **kwargs) elif name == "exponential_decay": return functools.partial(exponential_decay_schedule, **kwargs) else: raise NotImplementedError(name) def kfac_bn_registration_kwargs(bn_registration: str) -> Mapping[ str, Union[Tuple[str, ...], Mapping[str, Type[kfac_jax.CurvatureBlock]]] ]: """Constructs KFAC kwargs for the given batch-norm registration strategy.""" if bn_registration == "generic": return dict(patterns_to_skip=("scale_and_shift", "scale_only")) elif bn_registration == "full": return dict( layer_tag_to_block_cls=dict( scale_and_shift_tag=kfac_jax.ScaleAndShiftFull, ) ) elif bn_registration != "diag": raise ValueError(f"Unknown batch_norm_registration={bn_registration}.") return {} def create_optimizer( name: str, config: config_dict.ConfigDict, train_model_func: kfac_jax.optimizer.ValueFunc, l2_reg: Numeric, has_aux: bool, has_func_state: bool, has_rng: bool, dataset_size: int, train_total_batch_size: int, total_steps: Optional[int], total_epochs: Optional[float], ) -> Union[OptaxWrapper, kfac_jax.Optimizer]: """Creates an optimizer from the provided configuration.""" value_and_grad_func = jax.value_and_grad(train_model_func, has_aux=has_aux) kwargs = dict(**config[name]) logging.info("Using %s optimizer.", name) if "kfac" in name: # Update kwargs regarding batch norm registration extra_kwargs = kfac_bn_registration_kwargs( kwargs.pop("batch_norm_registration", "diag")) kwargs.update(extra_kwargs) if name == "kfac": for sched_name in ["learning_rate_schedule", "momentum_schedule", "damping_schedule"]: if kwargs.get(sched_name) is not None: kwargs[sched_name] = construct_schedule( dataset_size=dataset_size, train_total_batch_size=train_total_batch_size, total_steps=total_steps, total_epochs=total_epochs, **kwargs[sched_name] ) return kfac_jax.Optimizer( value_and_grad_func=value_and_grad_func, l2_reg=l2_reg, value_func_has_aux=has_aux, value_func_has_state=has_func_state, value_func_has_rng=has_rng, multi_device=True, **kwargs, ) elif hasattr(optax, name): learning_rate_schedule = construct_schedule( dataset_size=dataset_size, train_total_batch_size=train_total_batch_size, total_steps=total_steps, total_epochs=total_epochs, **kwargs.pop("learning_rate_schedule") ) optax_ctor = lambda lr: (getattr(optax, name)(learning_rate=lr, **kwargs)) return OptaxWrapper( value_and_grad_func=value_and_grad_func, value_func_has_aux=has_aux, value_func_has_rng=has_rng, value_func_has_state=has_func_state, learning_rate=learning_rate_schedule, optax_optimizer_ctor=optax_ctor, ) else: raise NotImplementedError()
kfac-jax-main
examples/optimizers.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for computing and automatically registering losses.""" from typing import Optional, Sequence, Tuple, Dict import haiku as hk import jax from jax import lax import jax.numpy as jnp from jax.scipy import special import kfac_jax Array = kfac_jax.utils.Array Numeric = kfac_jax.utils.Numeric Params = kfac_jax.utils.Params def l2_regularizer( params: Params, haiku_exclude_batch_norm: bool, haiku_exclude_biases: bool, ) -> Array: """Computes an L2 regularizer.""" if haiku_exclude_batch_norm: params = hk.data_structures.filter( # pytype: disable=wrong-arg-types lambda m, n, p: "batchnorm" not in m, params) if haiku_exclude_biases: params = hk.data_structures.filter( # pytype: disable=wrong-arg-types lambda m, n, p: n != "b", params ) return 0.5 * kfac_jax.utils.inner_product(params, params) def sigmoid_cross_entropy( logits: Array, labels: Array, weight: float = 1.0, register_loss: bool = True, ) -> Array: """Sigmoid cross-entropy loss.""" if register_loss: kfac_jax.register_sigmoid_cross_entropy_loss(logits, labels, weight) # Code is copied from Tensorflow. zeros = jnp.zeros_like(logits) relu_logits = jnp.where(logits >= zeros, logits, zeros) neg_abs_logits = jnp.where(logits >= zeros, -logits, logits) log_1p = jnp.log1p(jnp.exp(neg_abs_logits)) return weight * jnp.add(relu_logits - logits * labels, log_1p) def softmax_cross_entropy( logits: Array, labels: Array, weight: Numeric = 1.0, register_loss: bool = True, mask: Optional[Array] = None, ) -> Array: """Softmax cross entropy loss.""" if register_loss: if not isinstance(weight, float): raise NotImplementedError("Non-constant loss weights are not currently " "supported.") # Currently the registration functions only support 2D array inputs values # for `logits`, and so we need the reshapes below. kfac_jax.register_softmax_cross_entropy_loss( logits.reshape([-1, logits.shape[-1]]), targets=labels.reshape([-1]), mask=mask.reshape([-1]) if mask is not None else None, weight=weight) max_logits = jnp.max(logits, keepdims=True, axis=-1) # It's unclear whether this stop_gradient is a good idea. # See https://github.com/google/jax/issues/13529 max_logits = lax.stop_gradient(max_logits) logits = logits - max_logits log_z = special.logsumexp(logits, axis=-1) if logits.shape == labels.shape: # Labels are encoded as 1-hot vectors loss = -jnp.sum(logits * labels, axis=-1) + log_z elif logits.ndim == labels.ndim + 1: # Labels are encoded as integers # Taken from Optax's softmax_cross_entropy_with_integer_labels: label_logits = jnp.take_along_axis( logits, labels[..., None], axis=-1)[..., 0] loss = -label_logits + log_z else: raise ValueError(f"The provided labels must have the same rank as the " f"logits - {logits.ndim}, or one less, but got " f"{labels.ndim}.") if mask is not None: loss = loss * mask loss = weight * loss # sum over all but the batch dimension loss = jnp.sum(loss, axis=range(1, loss.ndim)) return loss def squared_error( prediction: Array, targets: Array, weight: float = 1.0, register_loss: bool = True, ) -> Array: """Squared error loss.""" if prediction.shape != targets.shape: raise ValueError("prediction and targets should have the same shape.") if register_loss: kfac_jax.register_squared_error_loss(prediction, targets, weight) return weight * jnp.sum(jnp.square(prediction - targets), axis=-1) def top_k_accuracy( logits_or_probs: Array, labels: Array, k: int = 1, ) -> Array: """Top-k accuracy.""" if labels.ndim == logits_or_probs.ndim: # One hot labels labels = jnp.argmax(labels, axis=-1) elif labels.ndim + 1 != logits_or_probs.ndim: raise ValueError(f"The provided labels must have the same rank as the " f"logits_or_probs - {logits_or_probs.ndim}, or one less, " f"{labels.ndim}.") if k == 1: indices = jnp.argmax(logits_or_probs, axis=-1) correct = jnp.equal(indices, labels) else: _, indices = lax.top_k(logits_or_probs, k=k) correct = jnp.equal(indices, labels[..., None]) correct = jnp.sum(correct, axis=-1) return jnp.mean(correct.astype(logits_or_probs.dtype)) def add_label_smoothing( labels: Array, label_smoothing: float, num_classes: int, labels_are_one_hot: bool = False, ) -> Array: """Adds label smoothing to the labels.""" if label_smoothing < 0. or label_smoothing > 1.: raise ValueError(f"label_smoothing is {label_smoothing} but should be in " f"[0, 1].") if label_smoothing > 0: if not labels_are_one_hot: labels = jax.nn.one_hot(labels, num_classes) assert labels.shape[-1] == num_classes smooth_positives = 1. - label_smoothing smooth_negatives = label_smoothing / num_classes labels = smooth_positives * labels + smooth_negatives return labels def classifier_loss_and_stats( logits: Array, labels_as_int: Array, params: Params, l2_reg: Numeric, haiku_exclude_batch_norm: bool, haiku_exclude_biases: bool, label_smoothing: float = 0.0, top_k_stats: Sequence[int] = (1, 5), average_loss: bool = True, register_loss: bool = True, mask: Optional[Array] = None, normalization_mode: str = "batch_size_only", ) -> Tuple[Array, Dict[str, Array]]: """Softmax cross-entropy with regularizer and accuracy statistics.""" batch_size = logits.shape[0] if labels_as_int.shape[0] != batch_size: raise ValueError(f"Size of first dimension of logits ({batch_size}) " f"(i.e. batch size) doesn't match that of labels " f"({labels_as_int.shape[0]})") if mask is not None and mask.shape[0] != batch_size: raise ValueError(f"Size of first dimension of logits ({batch_size}) " f"(i.e. batch size) doesn't match that of mask " f"({mask.shape[0]})") if normalization_mode == "batch_size_only": weight = 1.0 elif normalization_mode == "all_dims": weight = 1.0 / kfac_jax.utils.product(logits.shape[1:-1]) elif normalization_mode == "all_dims_nonmasked": assert mask is not None weight = batch_size / jnp.sum(mask) else: raise ValueError(f"Unrecognized value for normalization_mode: " f"{normalization_mode}") labels = add_label_smoothing(labels_as_int, label_smoothing, logits.shape[-1]) softmax_loss = softmax_cross_entropy( logits, labels, weight=weight, register_loss=register_loss, mask=mask) averaged_raw_loss = jnp.sum(softmax_loss, axis=0) / batch_size loss = averaged_raw_loss if average_loss else softmax_loss l2_reg_val = l2_regularizer( params, haiku_exclude_batch_norm, haiku_exclude_biases) regularized_loss = loss + l2_reg * l2_reg_val stats = dict( raw_loss=averaged_raw_loss, l2_reg_val=l2_reg_val, ) for k in top_k_stats: stats[f"top_{k}_accuracy"] = top_k_accuracy(logits, labels_as_int, k) return regularized_loss, stats
kfac-jax-main
examples/losses.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Jaxline experiment classes and utilities.""" import abc import collections import copy import functools import os import time from typing import Any, Callable, Dict, Iterator, Optional, Tuple, Union from absl import logging import jax import jax.numpy as jnp from jaxline import experiment from jaxline import utils as pipe_utils import kfac_jax from examples import datasets from examples import optimizers import ml_collections # Types for annotation Array = kfac_jax.utils.Array Numeric = kfac_jax.utils.Numeric PRNGKey = kfac_jax.utils.PRNGKey Params = kfac_jax.utils.Params Batch = kfac_jax.utils.Batch FuncState = kfac_jax.utils.FuncState InitFunc = Callable[[PRNGKey, Batch], Params] BatchSizeCalculatorCtor = Callable[[int, int, str], "BatchSizeCalculator"] ExperimentBatchSizes = collections.namedtuple( "ExperimentBatchSizes", ["train", "eval"] ) def is_exactly_one_not_none(*args): return sum(a is not None for a in args) == 1 class BatchSizeCalculator: """A class for computing the batch size in different ways.""" def __init__(self, total: int, per_device: int, mode: str): if total == -1: total = None if per_device == -1: per_device = None if not is_exactly_one_not_none(total, per_device): raise ValueError( "Exactly one of the ``total`` and ``per_device`` arguments must " "be set to a value and the other one must be ``None``." ) self._mode = mode self._total = total self._per_device = per_device @property @functools.lru_cache(maxsize=1) def num_local_devices(self) -> int: """The number of local devices.""" return jax.local_device_count() @property @functools.lru_cache(maxsize=1) def num_devices(self) -> int: """The total number of devices.""" return jax.device_count() @property @functools.lru_cache(maxsize=1) def per_device(self) -> int: """The per-device batch size.""" if self._per_device is not None: return self._per_device if self._total % self.num_devices != 0: raise ValueError( "The total batch size must be divisible by the number of devices." ) return self._total // self.num_devices @property @functools.lru_cache(maxsize=1) def per_host(self) -> int: """The per-host batch size.""" return self.per_device * self.num_local_devices @property @functools.lru_cache(maxsize=1) def total(self) -> int: """The total batch size.""" return self.per_device * self.num_devices def log_machines_setup(self): """Logs the machine setup and batch sizes.""" logging.info("Worker with mode %s", self._mode) logging.info( "Number of hosts[%d]: %d", jax.process_index(), jax.process_count() ) logging.info( "Number of devices[%d]: %d/%d", jax.process_index(), self.num_local_devices, self.num_devices, ) logging.info( "Device batch size[%d]: (%d x %d)/%d", jax.process_index(), self.num_devices, self.per_device, self.total, ) class SupervisedExperiment(abc.ABC): """Abstract supervised experiment. Attributes: mode: Either 'train' or 'eval' specifying whether to run training or evaluation of the experiment. init_rng: The Jax PRNG key that is used to seed the initialization of the model parameters. seed_rng: An RNG used fo seeding the dataset iterators. config: The experiment config. has_aux: Whether the model function returns any auxiliary data. has_rng: Whether the model function needs an PRNG key. has_func_state: Whether the model function has a state. eval_splits: Evaluation splits of the evaluation dataset loader. batch_size: An instance of `ExperimentBatchSizes`. init_parameters_func: A function that initializes the parameters and optionally the state of the model if it has one. params_init: A function that initializes the model parameters. model_loss_func: A function that computes the loss for the model. train_model_func: The `model_loss_func` with `is_training` set to `True`. eval_model_func: The `model_loss_func` with `is_training` set to `False`. eval_batch: A pmapped version of `self._evaluate_single_batch`. optimizer: The optimizer instance used for training. """ def __init__( self, mode: str, init_rng: PRNGKey, config: ml_collections.ConfigDict, init_parameters_func: InitFunc, model_loss_func: kfac_jax.optimizer.ValueFunc, has_aux: bool, has_rng: bool, has_func_state: bool, eval_splits: Tuple[str, ...] = ("train", "test"), batch_size_calculator_ctor: BatchSizeCalculatorCtor = BatchSizeCalculator, ): """Initializes experiment. Args: mode: Either 'train' or 'eval' specifying whether to run training or evaluation of the experiment. init_rng: The Jax PRNG key that is used to seed any randomness of the experiment. config: The experiment config. init_parameters_func: A function that initializes the parameters and optionally the state of the model if it has one. model_loss_func: A function that computes the loss for the model. has_aux: Whether the model function returns auxiliary data. has_rng: Whether the model function requires an RNG. has_func_state: Whether the model function has a state. eval_splits: Evaluation splits of the evaluation dataset loader. batch_size_calculator_ctor: A constructor function to create a batch size calculator. """ self.mode = mode self.init_rng, seed_rng = jax.random.split(init_rng) self.seed_rng = jax.random.fold_in(seed_rng, jax.process_index()) self.config = config self.has_aux = has_aux self.has_rng = has_rng self.has_func_state = has_func_state self.eval_splits = eval_splits self.batch_size = ExperimentBatchSizes( train=batch_size_calculator_ctor( # pytype: disable=wrong-keyword-args mode="train", **self.config.batch_size.train ), eval=batch_size_calculator_ctor( # pytype: disable=wrong-keyword-args mode="eval", **self.config.batch_size.eval ), ) self.params_init = jax.pmap(init_parameters_func) self.model_loss_func = model_loss_func self.train_model_func = functools.partial( self.model_loss_func, is_training=True ) self.eval_model_func = functools.partial( self.model_loss_func, is_training=False ) self.eval_batch = jax.pmap( self._evaluate_single_batch, axis_name="eval_axis" ) # Log some useful information getattr(self.batch_size, self.mode).log_machines_setup() # Create the optimizer self.optimizer = self.create_optimizer() # Initialize the state self._train_input, self._eval_input, self._init_batch = None, None, None self._params, self._state, self._opt_state = None, None, None self._python_step = 0 self._num_tensors = 0 self._num_parameters = 0 self._optimizer_state_size = 0 @property @abc.abstractmethod def dataset_size(self) -> int: """The number of data points in the training set.""" @property def train_input(self) -> Iterator[Batch]: """Returns the current training iterator.""" if self._train_input is None: logging.info("Initializing training data iterator.") seed_rng = jax.random.fold_in(self.seed_rng, self._python_step) self._train_input = pipe_utils.py_prefetch( functools.partial( self._build_train_input, split="train", seed=int(seed_rng[0]), device_batch_size=self.batch_size.train.per_device, ) ) return self._train_input @property def train_inputs( self, ) -> Union[Iterator[Batch], Tuple[Iterator[Batch], Iterator[Batch]]]: """The training data iterator.""" return self.train_input @property def eval_input(self) -> Dict[str, Callable[[], Iterator[Batch]]]: """Returns all evaluation iterators constructors.""" if self._eval_input is None: logging.info("Initializing evaluation data iterator.") seed_rng = jax.random.fold_in(self.seed_rng, self._python_step) self._eval_input = {} for split in self.eval_splits: self._eval_input[split] = functools.partial( self._build_eval_input, split=split, seed=int(seed_rng[1]), device_batch_size=self.batch_size.eval.per_device, ) return self._eval_input @property def init_batch(self) -> Batch: """A fake batch size used to initialize the model parameters and state.""" if self._init_batch is None: if self.mode == "train": self._init_batch, iterator = kfac_jax.utils.fake_element_from_iterator( self.train_input ) self._train_input = iterator else: self._init_batch = next(self.eval_input["train"]()) return self._init_batch def progress( self, global_step: Numeric, ) -> Numeric: """Computes the current progress of the training as a number in [0,1].""" if self.config.training.steps is not None: return global_step / self.config.training.steps else: data_seen = self.batch_size.train.total * global_step total_data = self.dataset_size * self.config.training.epochs return data_seen / total_data def terminate_training( self, global_step: int, config: ml_collections.ConfigDict, ) -> bool: del config # not used return int(self.progress(global_step)) >= 1 def create_optimizer( self, ) -> Union[optimizers.OptaxWrapper, kfac_jax.Optimizer]: """Creates the optimizer specified in the experiment's config.""" optimizer_config = copy.deepcopy(self.config.optimizer) return optimizers.create_optimizer( name=self.config.optimizer.name, config=optimizer_config, train_model_func=self.train_model_func, l2_reg=self.config.l2_reg, has_aux=self.has_aux, has_func_state=self.has_func_state, has_rng=self.has_rng, dataset_size=self.dataset_size, train_total_batch_size=self.batch_size.train.total, total_steps=self.config.training.steps, total_epochs=self.config.training.epochs, ) def maybe_initialize_state(self): """Initializes all the experiment's state variables.""" if self._params is not None: logging.info("Loaded from checkpoint, not initializing parameters.") return init_rng = kfac_jax.utils.replicate_all_local_devices(self.init_rng) # Initialize parameters and optional state params_rng, optimizer_rng = kfac_jax.utils.p_split(init_rng) logging.info("Initializing parameters.") if self.has_func_state: self._params, self._state = self.params_init(params_rng, self.init_batch) else: self._params = self.params_init(params_rng, self.init_batch) # Initialize optimizer state logging.info("Initializing optimizer state.") self._opt_state = self.optimizer.init( self._params, optimizer_rng, self.init_batch, self._state ) if not self.has_func_state: # Needed for checkpointing self._state = () # Log parameters def format_path_entry(entry: Any) -> str: if isinstance(entry, jax.tree_util.DictKey): return str(entry.key) elif isinstance(entry, jax.tree_util.SequenceKey): return str(entry.idx) else: return str(entry) self._num_tensors = 0 self._num_parameters = 0 logging.info("%s %s %s", "=" * 20, "Parameters", "=" * 20) for path, var in jax.tree_util.tree_flatten_with_path(self._params)[0]: # Because of pmap var = var[0] logging.info( "%s - %s, %s", "-".join(format_path_entry(p) for p in path), var.shape, var.dtype, ) self._num_parameters = self._num_parameters + var.size self._num_tensors = self._num_tensors + 1 logging.info("Total parameters: %s", f"{self._num_parameters:,}") # Log optimizer state self._optimizer_state_size = 0 logging.info("%s %s %s", "=" * 20, "Optimizer State", "=" * 20) easy_state = kfac_jax.utils.serialize_state_tree(self._opt_state) for path, var in jax.tree_util.tree_flatten_with_path(easy_state)[0]: if isinstance(var, str): # For __class__ entries continue # Because of pmap var = var[0] logging.info( "%s - %s, %s", "/".join(format_path_entry(p) for p in path), var.shape, var.dtype, ) self._optimizer_state_size = self._optimizer_state_size + var.size logging.info("Total optimizer state: %s", f"{self._optimizer_state_size:,}") # _ _ # | |_ _ __ __ _(_)_ __ # | __| "__/ _` | | "_ \ # | |_| | | (_| | | | | | # \__|_| \__,_|_|_| |_| # # @abc.abstractmethod def _build_train_input( self, split: str, seed: int, device_batch_size: int, **_: Any, ) -> Iterator[Batch]: """Constructs the training dataset.""" def train_step(self, global_step: Array, rng: PRNGKey) -> Dict[str, Numeric]: """Performs a single training step.""" del global_step # Unused # Perform optimizer step result = self.optimizer.step( params=self._params, state=self._opt_state, rng=rng, data_iterator=self.train_inputs, func_state=self._state if self.has_func_state else None, global_step_int=self._python_step, ) # Unpack result if self.has_func_state: self._params, self._opt_state, self._state, stats = result else: self._params, self._opt_state, stats = result if "aux" in stats: # Average everything in aux and then put it in stats stats.update(stats.pop("aux", {})) stats["progress"] = self.progress(self._python_step) self._python_step += 1 for name in self.config.get("per_device_stats_to_log", []): gathered_stat = jnp.reshape( kfac_jax.utils.host_all_gather(stats[name]), [-1] ) for i in range(gathered_stat.shape[0]): stats[f"{name}_{i}"] = jnp.array([gathered_stat[i]]) return kfac_jax.utils.get_first(stats) # _ # _____ ____ _| | # / _ \ \ / / _` | | # | __/\ V / (_| | | # \___| \_/ \__,_|_| # @abc.abstractmethod def _build_eval_input( self, split: str, seed: int, device_batch_size: int, **_: Any, ) -> Iterator[Batch]: """Constructs the evaluation dataset.""" def _evaluate_single_batch( self, global_step: Array, params: Params, func_state: FuncState, opt_state: Union[kfac_jax.Optimizer.State, optimizers.OptaxState], rng: PRNGKey, batch: Batch, ) -> Dict[str, Array]: """Evaluates a single batch.""" del global_step # This might be used in subclasses func_args = kfac_jax.optimizer.make_func_args( params=params, func_state=func_state, rng=rng, batch=batch, has_state=self.has_func_state, has_rng=self.has_rng, ) loss, stats = self.eval_model_func(*func_args) stats["loss"] = loss if hasattr(opt_state, "data_seen"): stats["data_seen"] = opt_state.data_seen return stats def run_evaluation( self, global_step: Array, rng: PRNGKey, ) -> Dict[str, Numeric]: """Runs the evaluation of the currently loaded model parameters.""" all_stats = dict() # Evaluates both the train and eval split metrics for name, dataset_iter_thunk in self.eval_input.items(): # pytype: disable=attribute-error logging.info("Running evaluation for %s", name) averaged_stats = kfac_jax.utils.MultiChunkAccumulator.empty(True) for batch in dataset_iter_thunk(): key, rng = kfac_jax.utils.p_split(rng) stats = self.eval_batch( global_step, self._params, self._state, self._opt_state, key, batch ) averaged_stats.add(stats, 1) # Extract all stats for k, v in averaged_stats.value.items(): # pytype: disable=attribute-error all_stats[f"{name}_{k}"] = kfac_jax.utils.get_first(v) logging.info( "Evaluation for %s is completed with %d number of batches.", name, int(averaged_stats.weight[0]), ) all_stats["progress"] = self.progress(self._python_step) return all_stats # pytype: disable=bad-return-type class JaxlineExperiment(SupervisedExperiment, experiment.AbstractExperiment): """A Jaxline supervised experiment.""" CHECKPOINT_ATTRS = { "_params": "params", "_state": "state", "_opt_state": "opt_state", } NON_BROADCAST_CHECKPOINT_ATTRS = {"_python_step": "python_step"} def should_run_step( self, global_step: int, config: ml_collections.ConfigDict, ) -> bool: return not self.terminate_training(global_step, config) def step( # pytype: disable=signature-mismatch self, global_step: Array, rng: PRNGKey, **unused_kwargs, ) -> Dict[str, Numeric]: self.maybe_initialize_state() return self.train_step(global_step, rng) def evaluate( # pytype: disable=signature-mismatch self, global_step: Array, rng: PRNGKey, **unused_kwargs, ) -> Dict[str, Numeric]: return self.run_evaluation(global_step, rng) def train_standalone_supervised( random_seed: int, full_config: ml_collections.ConfigDict, experiment_ctor: Callable[ [str, PRNGKey, ml_collections.ConfigDict], JaxlineExperiment ], storage_folder: Optional[str], ) -> Dict[str, Array]: """Run an experiment without the Jaxline runtime.""" rng = jax.random.PRNGKey(random_seed) rng, init_rng = jax.random.split(rng) experiment_instance = experiment_ctor( "train", init_rng, full_config.experiment_kwargs.config, ) if storage_folder is not None: os.makedirs(storage_folder, exist_ok=True) rng = jax.random.fold_in(rng, jax.process_index()) rng = jax.random.split(rng, jax.local_device_count()) rng = kfac_jax.utils.broadcast_all_local_devices(rng) global_step = jnp.zeros([], dtype=jnp.int32) global_step = kfac_jax.utils.replicate_all_local_devices(global_step) stats = {} start_time = time.time() i = 0 while experiment_instance.should_run_step(i, full_config): if ( i % full_config.save_checkpoint_interval == 0 and storage_folder is not None ): # Optional save to file jnp.savez( f"{storage_folder}/snapshot_{i}.npz", *jax.tree_util.tree_leaves(experiment_instance.snapshot_state()), ) rng, step_rng = kfac_jax.utils.p_split(rng) # Run a step scalars = experiment_instance.step(global_step, step_rng) elapsed_time = jnp.asarray(time.time() - start_time) stats["time"] = stats.get("time", []) + [elapsed_time] for k in sorted(scalars): stats.setdefault(k, []).append(jnp.asarray(scalars[k])) # Logging if i % full_config.log_tensors_interval == 0: for k, v in stats.items(): if jnp.issubdtype(v[-1].dtype, jnp.integer): logging.info("%s: %d", k, v[-1]) else: logging.info("%s: %.3f", k, v[-1]) logging.info("-" * 20) i += 1 stats = {k: jnp.stack(v) for k, v in stats.items()} if storage_folder is not None: jnp.savez( f"{storage_folder}/snapshot_final.npz", *jax.tree_util.tree_leaves(experiment_instance.snapshot_state()), ) jnp.savez(f"{storage_folder}/stats.npz", **stats) return stats class MnistExperiment(JaxlineExperiment): """An experiment using the MNIST dataset.""" def __init__( self, supervised: bool, flatten_images: bool, mode: str, init_rng: PRNGKey, config: ml_collections.ConfigDict, init_parameters_func: InitFunc, model_loss_func: kfac_jax.optimizer.ValueFunc, has_aux: bool, has_rng: bool, has_func_state: bool, **kwargs, ): self._supervised = supervised self._flatten_images = flatten_images super().__init__( mode=mode, init_rng=init_rng, config=config, has_aux=has_aux, has_rng=has_rng, has_func_state=has_func_state, init_parameters_func=init_parameters_func, model_loss_func=model_loss_func, ) @property def dataset_size(self) -> int: return 60_000 def _build_train_input( self, split: str, seed: int, device_batch_size: int, **_: Any, ) -> Iterator[Batch]: assert split == "train" return datasets.mnist_dataset( split=split, has_labels=self._supervised, flatten_images=self._flatten_images, device_batch_size=device_batch_size, repeat=True, shuffle=True, drop_remainder=True, seed=seed, reshuffle_each_iteration=True, ) def _build_eval_input( self, split: str, seed: int, device_batch_size: int, **_: Any, ) -> Iterator[Batch]: assert split in self.eval_splits return datasets.mnist_dataset( split=split, has_labels=self._supervised, flatten_images=self._flatten_images, device_batch_size=device_batch_size, repeat=False, shuffle=False, drop_remainder=False, seed=seed, ) class ImageNetExperiment(JaxlineExperiment): """An experiment using the ImageNet dataset.""" def __init__( self, mode: str, init_rng: PRNGKey, config: ml_collections.ConfigDict, init_parameters_func: InitFunc, model_loss_func: kfac_jax.optimizer.ValueFunc, has_aux: bool, has_rng: bool, has_func_state: bool, ): super().__init__( mode=mode, init_rng=init_rng, config=config, init_parameters_func=init_parameters_func, model_loss_func=model_loss_func, has_aux=has_aux, has_rng=has_rng, has_func_state=has_func_state, ) @property @functools.lru_cache(maxsize=1) def dataset_size(self) -> int: return datasets.imagenet_num_examples_and_split("train_and_valid")[0] def _build_train_input( self, split: str, seed: int, device_batch_size: int, **_: Any, ) -> datasets.tf.data.Dataset: assert split == "train" return datasets.imagenet_dataset( split="train_and_valid", seed=seed, is_training=True, batch_dims=(jax.local_device_count(), device_batch_size), data_dir=None, ) def _build_eval_input( self, split: str, seed: int, device_batch_size: int, **_: Any, ) -> datasets.tf.data.Dataset: assert split in ("train", "test") return datasets.imagenet_dataset( split="train_eval" if split == "train" else "test", seed=seed, is_training=False, batch_dims=(jax.local_device_count(), device_batch_size), )
kfac-jax-main
examples/training.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Haiku implementation of a small convolutional classifier for MNIST.""" import functools from typing import Mapping, Tuple, Union, Dict import haiku as hk import jax import jax.numpy as jnp import kfac_jax from examples import losses from examples import training Array = kfac_jax.utils.Array Numeric = kfac_jax.utils.Numeric PRNGKey = kfac_jax.utils.PRNGKey def convolutional_classifier() -> hk.Transformed: """Constructs a Haiku transformed object of the classifier network.""" def func(batch: Union[Array, Mapping[str, Array]]) -> Array: """Evaluates the classifier.""" if isinstance(batch, Mapping): batch = batch["images"] if batch.ndim == 3: # Add extra channel dimension batch = jnp.expand_dims(batch, axis=-1) model = hk.Sequential([ hk.Conv2D(2, kernel_shape=(5, 5)), jax.nn.relu, hk.MaxPool((2, 2), strides=(2, 2), padding="SAME"), hk.Conv2D(4, kernel_shape=(5, 5)), jax.nn.relu, hk.MaxPool((2, 2), strides=(2, 2), padding="SAME"), hk.Flatten(), hk.Linear(32), jax.nn.relu, hk.Linear(10) ]) return model(batch) return hk.without_apply_rng(hk.transform(func)) def classifier_loss( params: hk.Params, batch: Mapping[str, Array], l2_reg: Numeric, is_training: bool, average_loss: bool = True, ) -> Tuple[Array, Dict[str, Array]]: """Evaluates the loss of the classifier network.""" logits = convolutional_classifier().apply(params, batch["images"]) loss, stats = losses.classifier_loss_and_stats( logits=logits, labels_as_int=batch["labels"], params=params, l2_reg=l2_reg if is_training else 0.0, haiku_exclude_batch_norm=False, haiku_exclude_biases=False, average_loss=average_loss, top_k_stats=(1,), ) return loss, stats class ClassifierMnistExperiment(training.MnistExperiment): """Jaxline experiment class for running the MNIST classifier.""" def __init__(self, mode: str, init_rng: PRNGKey, config): super().__init__( supervised=True, flatten_images=False, mode=mode, init_rng=init_rng, config=config, init_parameters_func=convolutional_classifier().init, model_loss_func=functools.partial( classifier_loss, l2_reg=config.l2_reg), has_aux=True, has_rng=False, has_func_state=False, )
kfac-jax-main
examples/classifier_mnist/experiment.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training the MNIST convolutional classifier with Jaxline.""" import functools from absl import app from absl import flags from jaxline import base_config from jaxline import platform from examples.classifier_mnist import experiment from ml_collections import config_dict Experiment = experiment.ClassifierMnistExperiment def get_config() -> config_dict.ConfigDict: """Creates the config for the experiment.""" config = base_config.get_base_config() config.random_seed = 190843501 config.training_steps = None config.interval_type = None config.logging_interval_type = "steps" config.log_train_data_interval = 10 config.log_tensors_interval = 1 config.checkpoint_interval_type = "steps" config.save_checkpoint_interval = 100 config.checkpoint_dir = "/tmp/kfac_jax_jaxline/" config.train_checkpoint_all_hosts = False # Experiment config. config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( l2_reg=1e-5, training=dict( steps=5_000, epochs=None, ), batch_size=dict( train=dict( total=60_000, # the entire dataset per_device=-1, ), eval=dict( total=10_000, per_device=-1, ), ), optimizer=dict( name="kfac", kfac=dict( inverse_update_period=5, damping_adaptation_interval=5, num_burnin_steps=5, curvature_ema=0.95, # As mentioned in examples/README.md, we do NOT recommend # using these adaptive options for stochastic # optimization: use_adaptive_damping=True, use_adaptive_learning_rate=True, use_adaptive_momentum=True, damping_adaptation_decay=0.95, initial_damping=150.0, min_damping=1e-5, max_damping=1000.0, ), ) ), ) ) config.lock() return config if __name__ == "__main__": flags.mark_flag_as_required("config") app.run(functools.partial(platform.main, Experiment))
kfac-jax-main
examples/classifier_mnist/pipeline.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Vanilla network (derived from a ResNet) with LReLU from the TAT paper.""" import functools from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple, Union import haiku as hk from jax import nn import jax.numpy as jnp import kfac_jax from examples import losses from examples import training import ml_collections import numpy as np Array = kfac_jax.utils.Array Numeric = kfac_jax.utils.Numeric PRNGKey = kfac_jax.utils.PRNGKey Shape = kfac_jax.utils.Shape DType = kfac_jax.utils.DType FloatStrOrBool = Union[str, float, bool] class ScaledUniformOrthogonal(hk.initializers.Initializer): """SUO (+ Delta) initializer for fully-connected and convolutional layers.""" def __init__(self, scale: float = 1.0, axis: int = -1): """Construct a Haiku initializer which uses the SUO distribution. Args: scale: A float giving an additional scale factor applied on top of the standard rescaling used in the SUO distribution. This should be left at its default value when using DKS/TAT. (Default: 1.0) axis: An int giving the axis corresponding to the "output dimension" of the parameter tensor. (Default: -1) """ if axis != -1: raise ValueError("Invalid axis value for Delta initializations. " "Must be -1.") self.scale = scale self.axis = axis def __call__(self, shape: Shape, dtype: DType) -> Array: # pytype: disable=signature-mismatch # numpy-scalars # This has essentially copied from https://github.com/deepmind/dks if self.axis != -1: raise ValueError("Invalid axis value for Delta initializations. " "Must be -1.") if len(shape) != 2: # We assume 'weights' is a filter bank when len(shape) != 2 # In JAX, conv filter banks have the shape # [loc_dim_1, loc_dim_2, in_dim, out_dim] in_dim = shape[-2] out_dim = shape[-1] rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0) nonzero_part = hk.initializers.Orthogonal( scale=(rescale_factor * self.scale), axis=-1)(shape[-2:], dtype) if any(s % 2 != 1 for s in shape[:-2]): raise ValueError("All spatial axes must have odd length for Delta " "initializations.") midpoints = tuple((s - 1) // 2 for s in shape[:-2]) return jnp.zeros(shape, dtype).at[midpoints].set(nonzero_part) else: in_dim = np.prod(np.delete(shape, self.axis)) out_dim = shape[self.axis] rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0) return hk.initializers.Orthogonal( scale=(rescale_factor * self.scale), axis=self.axis)(shape, dtype) class BlockV2(hk.Module): """ResNet V2 block without batch norm or residual connections.""" def __init__( self, channels: int, stride: Union[int, Sequence[int]], bottleneck: bool, activation: Callable[[jnp.ndarray], jnp.ndarray], w_init: Optional[Any], name: Optional[str] = None, ): """Initializes the module instance.""" super().__init__(name=name) channel_div = 4 if bottleneck else 1 conv_0 = hk.Conv2D( output_channels=channels // channel_div, kernel_shape=1 if bottleneck else 3, stride=1 if bottleneck else stride, w_init=w_init, with_bias=True, padding="SAME", name="conv_0") conv_1 = hk.Conv2D( output_channels=channels // channel_div, kernel_shape=3, stride=stride if bottleneck else 1, w_init=w_init, with_bias=True, padding="SAME", name="conv_1") layers = (conv_0, conv_1) if bottleneck: conv_2 = hk.Conv2D( output_channels=channels, kernel_shape=1, stride=1, w_init=w_init, with_bias=True, padding="SAME", name="conv_2") layers = layers + (conv_2,) self.layers = layers self.activation = activation def __call__(self, inputs: Array, **_: Any) -> Array: out = inputs for conv_i in self.layers: out = self.activation(out) out = conv_i(out) return out class BlockGroup(hk.Module): """Higher level block for network implementation.""" def __init__( self, channels: int, num_blocks: int, stride: Union[int, Sequence[int]], bottleneck: bool, activation: Callable[[jnp.ndarray], jnp.ndarray], w_init: Optional[Any], name: Optional[str] = None, ): """Initializes the block group.""" super().__init__(name=name) self.blocks = [] for i in range(num_blocks): self.blocks.append(BlockV2( channels=channels, stride=(1 if i else stride), bottleneck=bottleneck, activation=activation, w_init=w_init, name=f"block_{i}" )) def __call__(self, inputs: Array, **kwargs: Any) -> Array: out = inputs for block in self.blocks: out = block(out, **kwargs) return out def _check_length(length: int, value: Sequence[int], name: str): """Verifies the length of the model.""" if len(value) != length: raise ValueError(f"`{name}` must be of length 4 not {len(value)}") # The values below are generated using the TAT method with parameter eta=0.9 _ACTIVATIONS_DICT = { 50: lambda x: nn.leaky_relu(x, 0.4259071946144104) * 1.301119175166785, 101: lambda x: nn.leaky_relu(x, 0.5704395323991776) * 1.2284042441106242, 152: lambda x: nn.leaky_relu(x, 0.6386479139328003) * 1.1918827706862754, } class LReLUNet(hk.Module): """Vanilla network (derived from a ResNet) with LReLU from the TAT paper.""" CONFIGS = { 50: { "blocks_per_group": (3, 4, 6, 3), "bottleneck": True, "channels_per_group": (256, 512, 1024, 2048), }, 101: { "blocks_per_group": (3, 4, 23, 3), "bottleneck": True, "channels_per_group": (256, 512, 1024, 2048), }, 152: { "blocks_per_group": (3, 8, 36, 3), "bottleneck": True, "channels_per_group": (256, 512, 1024, 2048), }, } def __init__( self, num_classes: int, depth: int, w_init: Optional[Any] = ScaledUniformOrthogonal(), logits_config: Optional[Mapping[str, Any]] = None, initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None, dropout_rate: float = 0.0, name: Optional[str] = None, ): """Initializes the network module. The model has been used in ... It mimics a ResNet, but has all batch normalization and residual connections removed. Args: num_classes: The number of classes to classify the inputs into. depth: The number of layers. w_init: Haiku initializer used to initialize the weights. logits_config: A dictionary of keyword arguments for the logits layer. initial_conv_config: Keyword arguments passed to the constructor of the initial :class:`~haiku.Conv2D` module. dropout_rate: A float giving the dropout rate for penultimate layer of the network (i.e. right before the layer which produces the class logits). (Default: 0.0) name: Name of the Sonnet module. """ if depth not in _ACTIVATIONS_DICT: raise ValueError(f"Depth {depth} not supported.") super().__init__(name=name) self.depth = depth self.dropout_rate = dropout_rate blocks_per_group = LReLUNet.CONFIGS[depth]["blocks_per_group"] channels_per_group = LReLUNet.CONFIGS[depth]["channels_per_group"] bottleneck = LReLUNet.CONFIGS[depth]["bottleneck"] logits_config = dict(logits_config or {}) logits_config.setdefault("w_init", w_init) logits_config.setdefault("name", "logits") # Number of blocks in each group. _check_length(4, blocks_per_group, "blocks_per_group") _check_length(4, channels_per_group, "channels_per_group") initial_conv_config = dict(initial_conv_config or {}) initial_conv_config.setdefault("output_channels", 64) initial_conv_config.setdefault("kernel_shape", 7) initial_conv_config.setdefault("stride", 2) initial_conv_config.setdefault("with_bias", True) initial_conv_config.setdefault("padding", "SAME") initial_conv_config.setdefault("name", "initial_conv") initial_conv_config.setdefault("w_init", w_init) self.activation = _ACTIVATIONS_DICT[depth] self.initial_conv = hk.Conv2D(**initial_conv_config) self.block_groups = [] strides = (1, 2, 2, 2) for i in range(4): self.block_groups.append(BlockGroup( channels=channels_per_group[i], num_blocks=blocks_per_group[i], stride=strides[i], bottleneck=bottleneck, activation=self.activation, w_init=w_init, name=f"block_group_{i}", )) self.logits = hk.Linear(num_classes, **logits_config) def __call__( self, inputs: Array, is_training: bool, **kwargs: Any ) -> Array: out = inputs out = self.initial_conv(out) out = hk.max_pool( out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding="SAME") for block_group in self.block_groups: out = block_group(out, is_training=is_training, **kwargs) out = self.activation(out) out = jnp.mean(out, axis=(1, 2)) if self.dropout_rate > 0.0 and is_training: out = hk.dropout(hk.next_rng_key(), self.dropout_rate, out) return self.logits(out) def lrelunet( num_classes: int = 1000, depth: int = 101, **kwargs: Any, ) -> hk.Transformed: """Constructs a Haiku transformed object of the LReLUNet101 network.""" def func( batch: Union[Array, Mapping[str, Array]], is_training: bool ) -> Array: """Evaluates the network.""" if isinstance(batch, dict): batch = batch["images"] model = LReLUNet(num_classes=num_classes, depth=depth, **kwargs) return model(batch, is_training=is_training) return hk.transform(func) def lrelunet_loss( params: hk.Params, rng: PRNGKey, batch: Mapping[str, Array], is_training: bool, l2_reg: Numeric, label_smoothing: float = 0.1, average_loss: bool = True, num_classes: int = 1000, depth: int = 101, **kwargs: Any, ) -> Tuple[ Array, Union[Dict[str, Array], Tuple[hk.State, Dict[str, Array]]] ]: """Evaluates the loss of the LReLUNet model.""" logits = lrelunet(num_classes=num_classes, depth=depth, **kwargs).apply( params, rng, batch["images"], is_training) return losses.classifier_loss_and_stats( logits=logits, labels_as_int=batch["labels"], params=params, l2_reg=l2_reg if is_training else 0.0, haiku_exclude_batch_norm=True, haiku_exclude_biases=True, label_smoothing=label_smoothing if is_training else 0.0, average_loss=average_loss, ) class LReLUNetImageNetExperiment(training.ImageNetExperiment): """Jaxline experiment class for running the LReLUNet on ImageNet.""" def __init__( self, mode: str, init_rng: PRNGKey, config: ml_collections.ConfigDict, ): """Initializes the network instance.""" super().__init__( mode=mode, init_rng=init_rng, config=config, init_parameters_func=functools.partial( lrelunet(num_classes=1000, **config.model_kwargs).init, is_training=True, ), model_loss_func=functools.partial( lrelunet_loss, l2_reg=config.l2_reg, num_classes=1000, **config.model_kwargs, **config.loss_kwargs, ), has_aux=True, has_rng=True, has_func_state=False, )
kfac-jax-main
examples/lrelunet101_imagenet/experiment.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training the LReLUNet101 on ImageNet with Jaxline.""" import functools from absl import app from absl import flags from jaxline import base_config from jaxline import platform from examples.lrelunet101_imagenet import experiment from ml_collections import config_dict Experiment = experiment.LReLUNetImageNetExperiment def get_config() -> config_dict.ConfigDict: """Creates the config for the experiment.""" config = base_config.get_base_config() config.random_seed = 12310912 config.training_steps = None config.interval_type = None config.logging_interval_type = "steps" config.log_train_data_interval = 100 config.log_tensors_interval = 100 config.checkpoint_interval_type = "steps" config.save_checkpoint_interval = 1000 config.checkpoint_dir = "/tmp/kfac_jax_jaxline/" config.train_checkpoint_all_hosts = False # Experiment config. config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( l2_reg=0.0, training=dict( steps=None, epochs=90, ), model_kwargs=dict( depth=101, dropout_rate=0.0, ), loss_kwargs=dict( label_smoothing=0.1, ), batch_size=dict( train=dict( total=1024, per_device=-1, ), eval=dict( total=4000, per_device=-1, ), ), optimizer=dict( name="kfac", kfac=dict( inverse_update_period=50, min_damping=1e-6, max_damping=1000.0, num_burnin_steps=5, curvature_ema=0.99, norm_constraint=0.001, use_adaptive_learning_rate=False, use_adaptive_momentum=False, use_adaptive_damping=False, learning_rate_schedule=dict( peak_learning_rate=3e-4, warmup_epochs=5, name="cosine", ), momentum_schedule=dict( name="fixed", value=0.9, ), damping_schedule=dict( name="fixed", value=0.001, ), ), sgd=dict( decay=0.9, nesterov=True, learning_rate_schedule=dict( peak_learning_rate=0.1, warmup_epochs=5, name="cosine", ), ), ) ) ) ) config.lock() return config if __name__ == "__main__": flags.mark_flag_as_required("config") app.run(functools.partial(platform.main, Experiment))
kfac-jax-main
examples/lrelunet101_imagenet/pipeline.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Using Haiku's Resnet50 v2 implementation for ImageNet.""" import functools from typing import Any, Dict, Mapping, Tuple, Union import chex import haiku as hk from examples import losses from examples import training from ml_collections import config_dict def resnet50( bn_decay_rate: float, batch_norm_synced: bool = False, zero_init: bool = True, num_classes: int = 1000, **kwargs: Any, ) -> hk.TransformedWithState: """Constructs a Haiku transformed object of the ResNet50 v2 network.""" bn_config = dict(decay_rate=bn_decay_rate) if batch_norm_synced: bn_config["cross_replica_axis"] = "kfac_axis" def func( batch: Union[chex.Array, Mapping[str, chex.Array]], is_training: bool ) -> chex.Array: """Evaluates the network.""" if isinstance(batch, dict): batch = batch["images"] model = hk.nets.ResNet50( num_classes, resnet_v2=True, bn_config=bn_config, logits_config=dict() if zero_init else dict(w_init=None), **kwargs, ) return model(batch, is_training=is_training) return hk.without_apply_rng(hk.transform_with_state(func)) def resnet50_loss( params: hk.Params, state: hk.State, batch: Mapping[str, chex.Array], is_training: bool, l2_reg: chex.Numeric, label_smoothing: float = 0.1, average_loss: bool = True, num_classes: int = 1000, bn_decay_rate: float = 0.9, batch_norm_synced: bool = False, **kwargs: Any, ) -> Tuple[ chex.Array, Union[Dict[str, chex.Array], Tuple[hk.State, Dict[str, chex.Array]]] ]: """Evaluates the loss of the Resnet50 model.""" logits, state = resnet50( bn_decay_rate=bn_decay_rate, batch_norm_synced=batch_norm_synced, num_classes=num_classes, **kwargs, ).apply(params, state, batch["images"], is_training=is_training) loss, stats = losses.classifier_loss_and_stats( logits=logits, labels_as_int=batch["labels"], params=params, l2_reg=l2_reg if is_training else 0.0, haiku_exclude_batch_norm=True, haiku_exclude_biases=True, label_smoothing=label_smoothing if is_training else 0.0, average_loss=average_loss, ) if is_training: return loss, (state, stats) else: return loss, stats class Resnet50ImageNetExperiment(training.ImageNetExperiment): """Jaxline experiment class for running the Resnet50 v2 on ImageNet.""" def __init__( self, mode: str, init_rng: chex.PRNGKey, config: config_dict.ConfigDict ): """Initializes the experiment.""" super().__init__( mode=mode, init_rng=init_rng, config=config, init_parameters_func=functools.partial( resnet50(num_classes=1000, **config.model_kwargs).init, is_training=True, ), model_loss_func=functools.partial( resnet50_loss, l2_reg=config.l2_reg, num_classes=1000, **config.model_kwargs, **config.loss_kwargs, ), has_aux=True, has_rng=False, has_func_state=True, )
kfac-jax-main
examples/resnet50_imagenet/experiment.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training the Resnet50 v2 on ImageNet with Jaxline.""" import functools from absl import app from absl import flags from jaxline import base_config from jaxline import platform from examples.resnet50_imagenet import experiment from ml_collections import config_dict Experiment = experiment.Resnet50ImageNetExperiment def get_config() -> config_dict.ConfigDict: """Creates the config for the experiment.""" config = base_config.get_base_config() config.random_seed = 21387715 config.training_steps = None config.interval_type = None config.logging_interval_type = "steps" config.log_train_data_interval = 100 config.log_tensors_interval = 100 config.checkpoint_interval_type = "steps" config.save_checkpoint_interval = 1000 config.checkpoint_dir = "/tmp/kfac_jax_jaxline/" config.train_checkpoint_all_hosts = False # Experiment config. config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( l2_reg=1e-5, training=dict( steps=200_000, epochs=None, ), model_kwargs=dict( zero_init=False, bn_decay_rate=0.9, batch_norm_synced=False, ), loss_kwargs=dict( label_smoothing=0.0, ), batch_size=dict( train=dict( total=1024, per_device=-1, ), eval=dict( total=4000, per_device=-1, ), ), optimizer=dict( name="kfac", kfac=dict( damping_adaptation_interval=50, damping_adaptation_decay=0.99963, inverse_update_period=50, norm_constraint=0.01, use_adaptive_learning_rate=False, use_adaptive_momentum=False, initial_damping=0.001, learning_rate_schedule=dict(name="kfac_resnet50"), momentum_schedule=dict(name="fixed", value=0.9), use_adaptive_damping=True, min_damping=1e-6, max_damping=1000.0, num_burnin_steps=5, curvature_ema=0.99, batch_norm_registration="diag", ), sgd=dict( decay=0.9, nesterov=True, learning_rate_schedule=dict(name="imagenet_sgd") ), ) ) ) ) config.lock() return config if __name__ == "__main__": flags.mark_flag_as_required("config") app.run(functools.partial(platform.main, Experiment))
kfac-jax-main
examples/resnet50_imagenet/pipeline.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Haiku implementation of the standard MNIST Autoencoder.""" import functools from typing import Mapping, Tuple, Union, Dict import haiku as hk import jax from jax import nn import jax.numpy as jnp import kfac_jax from examples import losses from examples import training Array = kfac_jax.utils.Array Numeric = kfac_jax.utils.Numeric PRNGKey = kfac_jax.utils.PRNGKey def autoencoder() -> hk.Transformed: """Constructs a Haiku transformed object of the autoencoder.""" def func(batch: Union[Array, Mapping[str, Array]]) -> Array: """Evaluates the autoencoder.""" if isinstance(batch, Mapping): batch = batch["images"] batch = batch.reshape([batch.shape[0], -1]) model = hk.Sequential([ hk.Linear(1000), jax.nn.tanh, hk.Linear(500), jax.nn.tanh, hk.Linear(250), jax.nn.tanh, hk.Linear(30), hk.Linear(250), jax.nn.tanh, hk.Linear(500), jax.nn.tanh, hk.Linear(1000), jax.nn.tanh, hk.Linear(batch.shape[-1]), ]) return model(batch) return hk.without_apply_rng(hk.transform(func)) def autoencoder_loss( params: hk.Params, batch: Union[Array, Mapping[str, Array]], l2_reg: Numeric, is_training: bool, average_loss: bool = True, ) -> Tuple[Array, Dict[str, Array]]: """Evaluates the loss of the autoencoder.""" if isinstance(batch, Mapping): batch = batch["images"] logits = autoencoder().apply(params, batch) cross_entropy = jnp.sum(losses.sigmoid_cross_entropy(logits, batch), axis=-1) averaged_cross_entropy = jnp.mean(cross_entropy) loss: Array = averaged_cross_entropy if average_loss else cross_entropy l2_reg_val = losses.l2_regularizer(params, False, False) if is_training: loss = loss + l2_reg * l2_reg_val error = nn.sigmoid(logits) - batch.reshape([batch.shape[0], -1]) mean_squared_error = jnp.mean(jnp.sum(error * error, axis=1), axis=0) return loss, dict( cross_entropy=averaged_cross_entropy, l2_reg_val=l2_reg_val, mean_squared_error=mean_squared_error, ) class AutoencoderMnistExperiment(training.MnistExperiment): """Jaxline experiment class for running the MNIST Autoencoder.""" def __init__(self, mode: str, init_rng: PRNGKey, config): super().__init__( supervised=False, flatten_images=True, mode=mode, init_rng=init_rng, config=config, init_parameters_func=autoencoder().init, model_loss_func=functools.partial( autoencoder_loss, l2_reg=config.l2_reg), has_aux=True, has_rng=False, has_func_state=False, )
kfac-jax-main
examples/autoencoder_mnist/experiment.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training the MNIST Autoencoder with Jaxline.""" import functools from absl import app from absl import flags from jaxline import base_config from jaxline import platform from examples.autoencoder_mnist import experiment from ml_collections import config_dict Experiment = experiment.AutoencoderMnistExperiment def get_config() -> config_dict.ConfigDict: """Creates the config for the experiment.""" config = base_config.get_base_config() config.random_seed = 123109801 config.training_steps = None config.interval_type = None config.logging_interval_type = "steps" config.log_train_data_interval = 10 config.log_tensors_interval = 1 config.checkpoint_interval_type = "steps" config.save_checkpoint_interval = 100 config.checkpoint_dir = "/tmp/kfac_jax_jaxline/" config.train_checkpoint_all_hosts = False # Experiment config. config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( l2_reg=1e-5, training=dict( steps=5_000, epochs=None, ), batch_size=dict( train=dict( total=60_000, # the entire dataset per_device=-1, ), eval=dict( total=10_000, per_device=-1, ), ), optimizer=dict( name="kfac", kfac=dict( inverse_update_period=5, damping_adaptation_interval=5, num_burnin_steps=5, curvature_ema=0.95, # As mentioned in examples/README.md, we do NOT recommend # using these adaptive options for stochastic # optimization: use_adaptive_damping=True, use_adaptive_learning_rate=True, use_adaptive_momentum=True, damping_adaptation_decay=0.95, initial_damping=150.0, min_damping=1e-5, max_damping=1000.0, ), ) ) ) ) config.lock() return config if __name__ == "__main__": flags.mark_flag_as_required("config") app.run(functools.partial(platform.main, Experiment))
kfac-jax-main
examples/autoencoder_mnist/pipeline.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Setup for pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = ['six', 'absl-py', 'numpy', 'wrapt', 'dm-tree'] EXTRA_PACKAGES = { 'tensorflow': [ 'tensorflow>=1.15', 'tensorflow-probability>=0.8' ], 'tensorflow with gpu': [ 'tensorflow-gpu>=1.15', 'tensorflow-probability>=0.8' ], } setup( name='trfl', version='1.2.0', description=('trfl is a library of building blocks for ' 'reinforcement learning algorithms.'), long_description='', url='http://www.github.com/deepmind/trfl/', author='DeepMind', author_email='[email protected]', # Contained modules and scripts. packages=find_packages(), install_requires=REQUIRED_PACKAGES, extras_require=EXTRA_PACKAGES, # Add in any packaged data. include_package_data=True, zip_safe=False, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System :: Unix', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development :: Libraries', ], license='Apache 2.0', keywords='trfl truffle tensorflow tensor machine reinforcement learning', test_suite='nose.collector', tests_require=['nose'], )
trfl-master
setup.py
# coding=utf8 # Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for retrace_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow.compat.v1 as tf from trfl import retrace_ops class RetraceOpsTest(tf.test.TestCase): """Tests for `Retrace` ops.""" def setUp(self): """Defines example data for, and an expected result of, Retrace operations. The example data comprises a minibatch of two sequences of four consecutive timesteps, allowing the data to be interpreted by Retrace as three successive transitions. """ super(RetraceOpsTest, self).setUp() ### Example input data: self.lambda_ = 0.9 self.qs = [ [[2.2, 3.2, 4.2], [5.2, 6.2, 7.2]], [[7.2, 6.2, 5.2], [4.2, 3.2, 2.2]], [[3.2, 5.2, 7.2], [4.2, 6.2, 9.2]], [[2.2, 8.2, 4.2], [9.2, 1.2, 8.2]]] self.targnet_qs = [ [[2., 3., 4.], [5., 6., 7.]], [[7., 6., 5.], [4., 3., 2.]], [[3., 5., 7.], [4., 6., 9.]], [[2., 8., 4.], [9., 1., 8.]]] self.actions = [ [2, 0], [1, 2], [0, 1], [2, 0]] self.rewards = [ [1.9, 2.9], [3.9, 4.9], [5.9, 6.9], [np.nan, # nan marks entries we should never use. np.nan]] self.pcontinues = [ [0.8, 0.9], [0.7, 0.8], [0.6, 0.5], [np.nan, np.nan]] self.target_policy_probs = [ [[np.nan] * 3, [np.nan] * 3], [[0.41, 0.28, 0.31], [0.19, 0.77, 0.04]], [[0.22, 0.44, 0.34], [0.14, 0.25, 0.61]], [[0.16, 0.72, 0.12], [0.33, 0.30, 0.37]]] self.behaviour_policy_probs = [ [np.nan, np.nan], [0.85, 0.86], [0.87, 0.88], [0.89, 0.84]] ### Expected results of Retrace as applied to the above: # NOTE: To keep the test code compact, we don't use the example data when # manually computing our expected results, but instead duplicate their # values explictly in those calculations. Some patterns in the values can # help you track who's who: for example, note that target network Q values # are integers, whilst learning network Q values all end in 0.2. # In a purely theoretical setting, we would compute the quantity we call # the "trace" using this recurrence relation: # # ΔQ_tm1 = δ_tm1 + λγπ(a_t | s_t)/μ(a_t | s_t) ⋅ ΔQ_t # δ_tm1 = r_t + γ𝔼_π[Q(s_t, .)] - Q(s_tm1, a_tm1) # # In a target network setting, you might rewrite ΔQ_t as ΔQ'_t, indicating # that this value is the next-timestep trace as computed when all # Q(s_tm1, a_tm1) terms (in δ_t, δ_t+1, ...) come from the target network, # not the learning network. # # To generate our collection of expected outputs, we'll first compute # "ΔQ'_tm1" (the "target network trace") at all timesteps. # # We start at the end of the sequence and work backward, like the # implementation does. targ_trace = np.zeros((3, 2)) targ_trace[2, 0] = (5.9 + 0.6*(0.16*2 + 0.72*8 + 0.12*4) - 3) # δ_tm1[2,0] targ_trace[2, 1] = (6.9 + 0.5*(0.33*9 + 0.30*1 + 0.37*8) - 6) # δ_tm1[2,1] targ_trace[1, 0] = (3.9 + 0.7*(0.22*3 + 0.44*5 + 0.34*7) - 6 + # δ_tm1[1,0] 0.9*0.7*0.22/0.87 * targ_trace[2, 0]) targ_trace[1, 1] = (4.9 + 0.8*(0.14*4 + 0.25*6 + 0.61*9) - 2 + # δ_tm1[1,1] 0.9*0.8*0.25/0.88 * targ_trace[2, 1]) targ_trace[0, 0] = (1.9 + 0.8*(0.41*7 + 0.28*6 + 0.31*5) - 4 + # δ_tm1[0,0] 0.9*0.8*0.28/0.85 * targ_trace[1, 0]) targ_trace[0, 1] = (2.9 + 0.9*(0.19*4 + 0.77*3 + 0.04*2) - 5 + # δ_tm1[0,1] 0.9*0.9*0.04/0.86 * targ_trace[1, 1]) # We can evaluate target Q values by adding targ_trace to single step # returns. target_q = np.zeros((3, 2)) target_q[2, 0] = (5.9 + 0.6*(0.16*2 + 0.72*8 + 0.12*4)) target_q[2, 1] = (6.9 + 0.5*(0.33*9 + 0.30*1 + 0.37*8)) target_q[1, 0] = (3.9 + 0.7*(0.22*3 + 0.44*5 + 0.34*7) + 0.9*0.7*0.22/0.87 * targ_trace[2, 0]) target_q[1, 1] = (4.9 + 0.8*(0.14*4 + 0.25*6 + 0.61*9) + 0.9*0.8*0.25/0.88 * targ_trace[2, 1]) target_q[0, 0] = (1.9 + 0.8*(0.41*7 + 0.28*6 + 0.31*5) + 0.9*0.8*0.28/0.85 * targ_trace[1, 0]) target_q[0, 1] = (2.9 + 0.9*(0.19*4 + 0.77*3 + 0.04*2) + 0.9*0.9*0.04/0.86 * targ_trace[1, 1]) # Now we can compute the "official" trace (ΔQ_tm1), which involves the # learning network. The only difference from the "target network trace" # calculations is the Q(s_tm1, a_tm1) terms we use: trace = np.zeros((3, 2)) # ↓ Q(s_tm1, a_tm1) trace[2, 0] = target_q[2, 0] - 3.2 # δ_tm1[2,0] trace[2, 1] = target_q[2, 1] - 6.2 # δ_tm1[2,1] trace[1, 0] = target_q[1, 0] - 6.2 # δ_tm1[1,0] trace[1, 1] = target_q[1, 1] - 2.2 # δ_tm1[1,1] trace[0, 0] = target_q[0, 0] - 4.2 # δ_tm1[0,0] trace[0, 1] = target_q[0, 1] - 5.2 # δ_tm1[0,0] self.expected_result = 0.5 * np.square(trace) self.target_q = target_q def testRetraceThreeTimeSteps(self): """Subject Retrace to a two-sequence, three-timestep minibatch.""" retrace = retrace_ops.retrace( self.lambda_, self.qs, self.targnet_qs, self.actions, self.rewards, self.pcontinues, self.target_policy_probs, self.behaviour_policy_probs) with self.test_session() as sess: self.assertAllClose(sess.run(retrace.loss), self.expected_result) def _get_retrace_core(self): """Constructs a tf subgraph from `retrace_core` op. A retrace core namedtuple is built from a two-sequence, three-timestep input minibatch. Returns: Tuple of size 3 containing non-differentiable inputs, differentiable inputs and retrace_core namedtuple. """ # Here we essentially replicate the preprocessing that `retrace` does # as it constructs the inputs to `retrace_core`. # These ops must be Tensors so that we can use them in the # `testNoOtherGradients` unit test. TensorFlow can only compute gradients # with respect to other parts of the graph lambda_ = tf.constant(self.lambda_) q_tm1 = tf.constant(self.qs[:3]) a_tm1 = tf.constant(self.actions[:3]) r_t = tf.constant(self.rewards[:3]) pcont_t = tf.constant(self.pcontinues[:3]) target_policy_t = tf.constant(self.target_policy_probs[1:4]) behaviour_policy_t = tf.constant(self.behaviour_policy_probs[1:4]) targnet_q_t = tf.constant(self.targnet_qs[1:4]) a_t = tf.constant(self.actions[1:4]) static_args = [lambda_, a_tm1, r_t, pcont_t, target_policy_t, behaviour_policy_t, targnet_q_t, a_t] diff_args = [q_tm1] return (static_args, diff_args, retrace_ops.retrace_core(lambda_, q_tm1, a_tm1, r_t, pcont_t, target_policy_t, behaviour_policy_t, targnet_q_t, a_t)) def testRetraceCoreTargetQThreeTimeSteps(self): """Tests whether retrace_core evaluates correct targets for regression.""" _, _, retrace = self._get_retrace_core() with self.test_session() as sess: self.assertAllClose(sess.run(retrace.extra.target), self.target_q) def testRetraceCoreLossThreeTimeSteps(self): """Tests whether retrace_core evaluates correct losses.""" _, _, retrace = self._get_retrace_core() with self.test_session() as sess: self.assertAllClose(sess.run(retrace.loss), self.expected_result) def testNoOtherGradients(self): """Tests no gradient propagates through things other than q_tm1.""" static_args, _, retrace = self._get_retrace_core() gradients = tf.gradients([retrace.loss], static_args) self.assertEqual(gradients, [None] * len(gradients)) def testMovingNetworkGradientIsEvaluated(self): """Tests that gradients are evaluated w.r.t. q_tm1.""" _, diff_args, retrace = self._get_retrace_core() gradients = tf.gradients([retrace.loss], diff_args) for gradient in gradients: self.assertNotEqual(gradient, None) def testRetraceHatesBadlyRankedInputs(self): """Ensure Retrace notices inputs with the wrong rank.""" # No problems if we create a Retrace using correctly-ranked arguments. proper_args = [self.lambda_, self.qs, self.targnet_qs, self.actions, self.rewards, self.pcontinues, self.target_policy_probs, self.behaviour_policy_probs] retrace_ops.retrace(*proper_args) # Now make a local copy of the args and try modifying each element to have # an inappropriate rank. We should get an error each time. for i in xrange(len(proper_args)): bad_args = list(proper_args) bad_args[i] = [bad_args[i]] with self.assertRaises(ValueError): retrace_ops.retrace(*bad_args) if __name__ == '__main__': tf.test.main()
trfl-master
trfl/retrace_ops_test.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for periodic_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf from trfl import periodic_ops class PeriodicallyTest(tf.test.TestCase): """Tests function periodically.""" def testPeriodically(self): """Tests that a function is called exactly every `period` steps.""" target = tf.Variable(0) period = 3 periodic_update = periodic_ops.periodically( body=lambda: target.assign_add(1).op, period=period) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) desired_values = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4] for desired_value in desired_values: sess.run(periodic_update) result = sess.run(target) self.assertEqual(desired_value, result) def testPeriodOne(self): """Tests that the function is called every time if period == 1.""" target = tf.Variable(0) periodic_update = periodic_ops.periodically( body=lambda: target.assign_add(1).op, period=1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) for desired_value in range(1, 11): _, result = sess.run([periodic_update, target]) self.assertEqual(desired_value, result) def testPeriodNone(self): """Tests that the function is never called if period == None.""" target = tf.Variable(0) periodic_update = periodic_ops.periodically( body=lambda: target.assign_add(1).op, period=None) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) desired_value = 0 for _ in range(1, 11): _, result = sess.run([periodic_update, target]) self.assertEqual(desired_value, result) def testFunctionNotCallable(self): """Tests value error when argument fn is not a callable.""" self.assertRaises( TypeError, periodic_ops.periodically, body=1, period=2) if __name__ == '__main__': tf.test.main()
trfl-master
trfl/periodic_ops_test.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for distribution_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools # Dependency imports from absl.testing import parameterized import tensorflow.compat.v1 as tf import tensorflow_probability as tfp from trfl import distribution_ops l2_project = distribution_ops.l2_project _MULTIVARIATE_GAUSSIAN_TYPES = [ tfp.distributions.MultivariateNormalDiagPlusLowRank, tfp.distributions.MultivariateNormalDiag, tfp.distributions.MultivariateNormalTriL, tfp.distributions.MultivariateNormalFullCovariance ] class FactorisedKLGaussianTest(tf.test.TestCase, parameterized.TestCase): def _create_gaussian(self, gaussian_type): mu = tf.random_normal([3]) if gaussian_type == tfp.distributions.MultivariateNormalDiag: scale_diag = tf.random_normal([3]) dist = tfp.distributions.MultivariateNormalDiag(mu, scale_diag) if gaussian_type == tfp.distributions.MultivariateNormalDiagPlusLowRank: scale_diag = tf.random_normal([3]) perturb_factor = tf.random_normal([3, 2]) scale_perturb_diag = tf.random_normal([2]) dist = tfp.distributions.MultivariateNormalDiagPlusLowRank( mu, scale_diag, scale_perturb_factor=perturb_factor, scale_perturb_diag=scale_perturb_diag) if gaussian_type == tfp.distributions.MultivariateNormalTriL: cov = tf.random_uniform([3, 3], minval=0, maxval=1.0) # Create a PSD matrix. cov = 0.5 * (cov + tf.transpose(cov)) + 3 * tf.eye(3) scale = tf.cholesky(cov) dist = tfp.distributions.MultivariateNormalTriL(mu, scale) if gaussian_type == tfp.distributions.MultivariateNormalFullCovariance: cov = tf.random_uniform([3, 3], minval=0, maxval=1.0) # Create a PSD matrix. cov = 0.5 * (cov + tf.transpose(cov)) + 3 * tf.eye(3) dist = tfp.distributions.MultivariateNormalFullCovariance(mu, cov) return (dist, mu, dist.covariance()) @parameterized.parameters( itertools.product(_MULTIVARIATE_GAUSSIAN_TYPES, _MULTIVARIATE_GAUSSIAN_TYPES)) def testFactorisedKLGaussian(self, dist1_type, dist2_type): """Tests that the factorised KL terms sum up to the true KL.""" dist1, dist1_mean, dist1_cov = self._create_gaussian(dist1_type) dist2, dist2_mean, dist2_cov = self._create_gaussian(dist2_type) both_diagonal = _is_diagonal(dist1.scale) and _is_diagonal(dist2.scale) if both_diagonal: dist1_cov = dist1.parameters['scale_diag'] dist2_cov = dist2.parameters['scale_diag'] kl = tfp.distributions.kl_divergence(dist1, dist2) kl_mean, kl_cov = distribution_ops.factorised_kl_gaussian( dist1_mean, dist1_cov, dist2_mean, dist2_cov, both_diagonal=both_diagonal) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) actual_kl, kl_mean_np, kl_cov_np = sess.run([kl, kl_mean, kl_cov]) self.assertAllClose(actual_kl, kl_mean_np + kl_cov_np, rtol=1e-4) def testShapeAssertion(self): dist_type = tfp.distributions.MultivariateNormalDiag _, dist1_mean, dist1_cov = self._create_gaussian(dist_type) _, dist2_mean, dist2_cov = self._create_gaussian(dist_type) shape_error_regexp = 'Shape (.*) must have rank [0-9]+' with self.assertRaisesRegexp(ValueError, shape_error_regexp): distribution_ops.factorised_kl_gaussian( dist1_mean, dist1_cov, dist2_mean, dist2_cov, both_diagonal=True) def testConsistentGradientsBothDiagonal(self): dist_type = tfp.distributions.MultivariateNormalDiag dist1, dist1_mean, _ = self._create_gaussian(dist_type) dist2, dist2_mean, _ = self._create_gaussian(dist_type) kl = tfp.distributions.kl_divergence(dist1, dist2) dist1_scale = dist1.parameters['scale_diag'] dist2_scale = dist2.parameters['scale_diag'] kl_mean, kl_cov = distribution_ops.factorised_kl_gaussian( dist1_mean, dist1_scale, dist2_mean, dist2_scale, both_diagonal=True) dist_params = [dist1_mean, dist2_mean, dist1_scale, dist2_scale] actual_kl_gradients = tf.gradients(kl, dist_params) factorised_kl_gradients = tf.gradients(kl_mean + kl_cov, dist_params) # Check that no gradients flow into the mean terms from `kl_cov` and # vice-versa. gradients = tf.gradients(kl_mean, [dist1_scale]) self.assertListEqual(gradients, [None]) gradients = tf.gradients(kl_cov, [dist1_mean, dist2_mean]) self.assertListEqual(gradients, [None, None]) with self.test_session() as sess: np_actual_kl, np_factorised_kl = sess.run( [actual_kl_gradients, factorised_kl_gradients]) self.assertAllClose(np_actual_kl, np_factorised_kl) def testConsistentGradientsFullCovariance(self): dist_type = tfp.distributions.MultivariateNormalFullCovariance dist1, dist1_mean, dist1_cov = self._create_gaussian(dist_type) dist2, dist2_mean, dist2_cov = self._create_gaussian(dist_type) kl = tfp.distributions.kl_divergence(dist1, dist2) kl_mean, kl_cov = distribution_ops.factorised_kl_gaussian( dist1_mean, dist1_cov, dist2_mean, dist2_cov, both_diagonal=False) dist1_cov = dist1.parameters['covariance_matrix'] dist2_cov = dist2.parameters['covariance_matrix'] dist_params = [ dist1_mean, dist2_mean, dist1_cov, dist2_cov, ] actual_kl_gradients = tf.gradients(kl, dist_params) factorised_kl_gradients = tf.gradients(kl_mean + kl_cov, dist_params) # Check that no gradients flow into the mean terms from `kl_cov` and # vice-versa. gradients = tf.gradients(kl_mean, [dist1_cov]) self.assertListEqual(gradients, [None]) gradients = tf.gradients(kl_cov, [dist1_mean, dist2_mean]) self.assertListEqual(gradients, [None, None]) with self.test_session() as sess: np_actual_kl, np_factorised_kl = sess.run( [actual_kl_gradients, factorised_kl_gradients]) self.assertAllClose(np_actual_kl, np_factorised_kl) # Check for diagonal Gaussian distributions. Based on the definition in # tensorflow_probability/python/distributions/mvn_linear_operator.py def _is_diagonal(x): """Helper to identify if `LinearOperator` has only a diagonal component.""" return (isinstance(x, tf.linalg.LinearOperatorIdentity) or isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or isinstance(x, tf.linalg.LinearOperatorDiag)) if __name__ == '__main__': tf.test.main()
trfl-master
trfl/distribution_ops_test.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for continuous_retrace_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from trfl import continuous_retrace_ops def _shaped_arange(*shape): """Runs np.arange, converts to float and reshapes.""" return np.arange(np.prod(shape), dtype=np.float32).reshape(*shape) def _ground_truth_calculation(discounts, log_rhos, rewards, q_values, values, bootstrap_value, lambda_): """Calculates the ground truth for Retrace in python/numpy.""" qs = [] seq_len = len(discounts) rhos = np.exp(log_rhos) cs = np.minimum(rhos, 1.0) cs *= lambda_ # This is a very inefficient way to calculate the Retrace ground truth. values_t_plus_1 = np.concatenate([values, bootstrap_value[None, :]], axis=0) for s in range(seq_len): q_s = np.copy(q_values[s]) # Very important copy... delta = rewards[s] + discounts[s] * values_t_plus_1[s + 1] - q_values[s] q_s += delta for t in range(s + 1, seq_len): q_s += ( np.prod(discounts[s:t], axis=0) * np.prod(cs[s + 1:t + 1], axis=0) * (rewards[t] + discounts[t] * values_t_plus_1[t + 1] - q_values[t])) qs.append(q_s) qs = np.stack(qs, axis=0) return qs class ContinuousRetraceTest(tf.test.TestCase): def testSingleElem(self): """Tests Retrace with a single element batch and lambda set to 1.0.""" batch_size = 1 lambda_ = 1.0 self._main_test(batch_size, lambda_) def testLargerBatch(self): """Tests Retrace with a larger batch.""" batch_size = 2 lambda_ = 1.0 self._main_test(batch_size, lambda_) def testLowerLambda(self): """Tests Retrace with a lower lambda.""" batch_size = 2 lambda_ = 0.5 self._main_test(batch_size, lambda_) def _main_test(self, batch_size, lambda_): """Tests Retrace against ground truth data calculated in python.""" seq_len = 5 # Create log_rhos such that rho will span from near-zero to above the # clipping thresholds. In particular, calculate log_rhos in [-2.5, 2.5), # so that rho is in approx [0.08, 12.2). log_rhos = _shaped_arange(seq_len, batch_size) / (batch_size * seq_len) log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5). values = { "discounts": np.array( # T, B where B_i: [0.9 / (i+1)] * T [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)]), "rewards": _shaped_arange(seq_len, batch_size), "q_values": _shaped_arange(seq_len, batch_size) / batch_size, "values": _shaped_arange(seq_len, batch_size) / batch_size, "bootstrap_value": _shaped_arange(batch_size) + 1.0, # B "log_rhos": log_rhos } placeholders = { key: tf.placeholder(tf.float32, shape=val.shape) for key, val in six.iteritems(values) } placeholders = { k: tf.placeholder(dtype=p.dtype, shape=[None] * len(p.shape)) for k, p in placeholders.items() } retrace_returns = continuous_retrace_ops.retrace_from_importance_weights( lambda_=lambda_, **placeholders) feed_dict = {placeholders[k]: v for k, v in values.items()} with self.test_session() as sess: retrace_outputvalues = sess.run(retrace_returns, feed_dict=feed_dict) ground_truth_data = _ground_truth_calculation(lambda_=lambda_, **values) self.assertAllClose(ground_truth_data, retrace_outputvalues.qs) if __name__ == "__main__": tf.test.main()
trfl-master
trfl/continuous_retrace_ops_test.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Ops to implement gradient clipping.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf def huber_loss(input_tensor, quadratic_linear_boundary, name=None): """Calculates huber loss of `input_tensor`. For each value x in `input_tensor`, the following is calculated: ``` 0.5 * x^2 if |x| <= d 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `quadratic_linear_boundary`. When `input_tensor` is a loss this results in a form of gradient clipping. This is, for instance, how gradients are clipped in DQN and its variants. Args: input_tensor: `Tensor`, input values to calculate the huber loss on. quadratic_linear_boundary: `float`, the point where the huber loss function changes from a quadratic to linear. name: `string`, name for the operation (optional). Returns: `Tensor` of the same shape as `input_tensor`, containing values calculated in the manner described above. Raises: ValueError: if quadratic_linear_boundary <= 0. """ if quadratic_linear_boundary < 0: raise ValueError("quadratic_linear_boundary must be > 0.") with tf.name_scope( name, default_name="huber_loss", values=[input_tensor, quadratic_linear_boundary]): abs_x = tf.abs(input_tensor) delta = quadratic_linear_boundary quad = tf.minimum(abs_x, delta) # The following expression is the same in value as # tf.maximum(abs_x - delta, 0), but importantly the gradient for the # expression when abs_x == delta is 0 (for tf.maximum it would be 1). This # is necessary to avoid doubling the gradient, since there is already a # non-zero contribution to the gradient from the quadratic term. lin = (abs_x - quad) return 0.5 * quad**2 + delta * lin
trfl-master
trfl/clipping_ops.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tensorflow ops for multistep return evaluation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf def _reverse_seq(sequence, sequence_lengths=None): """Reverse sequence along dim 0. Args: sequence: Tensor of shape [T, B, ...]. sequence_lengths: (optional) tensor of shape [B]. If `None`, only reverse along dim 0. Returns: Tensor of same shape as sequence with dim 0 reversed up to sequence_lengths. """ if sequence_lengths is None: return tf.reverse(sequence, [0]) sequence_lengths = tf.convert_to_tensor(sequence_lengths) with tf.control_dependencies( [tf.assert_equal(sequence.shape[1], sequence_lengths.shape[0])]): return tf.reverse_sequence( sequence, sequence_lengths, seq_axis=0, batch_axis=1) def scan_discounted_sum(sequence, decay, initial_value, reverse=False, sequence_lengths=None, back_prop=True, name="scan_discounted_sum"): """Evaluates a cumulative discounted sum along dimension 0. ```python if reverse = False: result[1] = sequence[1] + decay[1] * initial_value result[k] = sequence[k] + decay[k] * result[k - 1] if reverse = True: result[last] = sequence[last] + decay[last] * initial_value result[k] = sequence[k] + decay[k] * result[k + 1] ``` Respective dimensions T, B and ... have to be the same for all input tensors. T: temporal dimension of the sequence; B: batch dimension of the sequence. if sequence_lengths is set then x1 and x2 below are equivalent: ```python x1 = zero_pad_to_length( scan_discounted_sum( sequence[:length], decays[:length], **kwargs), length=T) x2 = scan_discounted_sum(sequence, decays, sequence_lengths=[length], **kwargs) ``` Args: sequence: Tensor of shape `[T, B, ...]` containing values to be summed. decay: Tensor of shape `[T, B, ...]` containing decays/discounts. initial_value: Tensor of shape `[B, ...]` containing initial value. reverse: Whether to process the sum in a reverse order. sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be (reversed and then) summed. back_prop: Whether to backpropagate. name: Sets the name_scope for this op. Returns: Cumulative sum with discount. Same shape and type as `sequence`. """ # Note this can be implemented in terms of cumprod and cumsum, # approximately as (ignoring boundary issues and initial_value): # # cumsum(decay_prods * sequence) / decay_prods # where decay_prods = reverse_cumprod(decay) # # One reason this hasn't been done is that multiplying then dividing again by # products of decays isn't ideal numerically, in particular if any of the # decays are zero it results in NaNs. with tf.name_scope(name, values=[sequence, decay, initial_value]): if sequence_lengths is not None: # Zero out sequence and decay beyond sequence_lengths. with tf.control_dependencies( [tf.assert_equal(sequence.shape[0], decay.shape[0])]): mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0], dtype=sequence.dtype) mask = tf.transpose(mask) # Adding trailing dimensions to mask to allow for broadcasting. to_seq = mask.shape.dims + [1] * (sequence.shape.ndims - mask.shape.ndims) sequence *= tf.reshape(mask, to_seq) to_decay = mask.shape.dims + [1] * (decay.shape.ndims - mask.shape.ndims) decay *= tf.reshape(mask, to_decay) sequences = [sequence, decay] if reverse: sequences = [_reverse_seq(s, sequence_lengths) for s in sequences] summed = tf.scan(lambda a, x: x[0] + x[1] * a, sequences, initializer=tf.convert_to_tensor(initial_value), parallel_iterations=1, back_prop=back_prop) if not back_prop: summed = tf.stop_gradient(summed) if reverse: summed = _reverse_seq(summed, sequence_lengths) return summed def multistep_forward_view(rewards, pcontinues, state_values, lambda_, back_prop=True, sequence_lengths=None, name="multistep_forward_view_op"): """Evaluates complex backups (forward view of eligibility traces). ```python result[t] = rewards[t] + pcontinues[t]*(lambda_[t]*result[t+1] + (1-lambda_[t])*state_values[t]) result[last] = rewards[last] + pcontinues[last]*state_values[last] ``` This operation evaluates multistep returns where lambda_ parameter controls mixing between full returns and boostrapping. It is users responsibility to provide state_values. Depending on how state_values are evaluated this function can evaluate targets for Q(lambda), Sarsa(lambda) or some other multistep boostrapping algorithm. More information about a forward view is given here: http://incompleteideas.net/sutton/book/ebook/node74.html Please note that instead of evaluating traces and then explicitly summing them we instead evaluate mixed returns in the reverse temporal order by using the recurrent relationship given above. The parameter lambda_ can either be a constant value (e.g for Peng's Q(lambda) and Sarsa(_lambda)) or alternatively it can be a tensor containing arbitrary values (Watkins' Q(lambda), Munos' Retrace, etc). The result of evaluating this recurrence relation is a weighted sum of n-step returns, as depicted in the diagram below. One strategy to prove this equivalence notes that many of the terms in adjacent n-step returns "telescope", or cancel out, when the returns are summed. Below L3 is lambda at time step 3 (important: this diagram is 1-indexed, not 0-indexed like Python). If lambda is scalar then L1=L2=...=Ln. g1,...,gn are discounts. ``` Weights: (1-L1) (1-L2)*l1 (1-L3)*l1*l2 ... L1*L2*...*L{n-1} Returns: |r1*(g1)+ |r1*(g1)+ |r1*(g1)+ |r1*(g1)+ v1*(g1) |r2*(g1*g2)+ |r2*(g1*g2)+ |r2*(g1*g2)+ v2*(g1*g2) |r3*(g1*g2*g3)+ |r3*(g1*g2*g3)+ v3*(g1*g2*g3) ... |rn*(g1*...*gn)+ vn*(g1*...*gn) ``` Args: rewards: Tensor of shape `[T, B]` containing rewards. pcontinues: Tensor of shape `[T, B]` containing discounts. state_values: Tensor of shape `[T, B]` containing state values. lambda_: Mixing parameter lambda. The parameter can either be a scalar or a Tensor of shape `[T, B]` if mixing is a function of state. back_prop: Whether to backpropagate. sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be (reversed and then) summed, same as in `scan_discounted_sum`. name: Sets the name_scope for this op. Returns: Tensor of shape `[T, B]` containing multistep returns. """ with tf.name_scope(name, values=[rewards, pcontinues, state_values]): # Regroup: # result[t] = (rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t]) + # pcontinues[t]*lambda_*result[t + 1] # Define: # sequence[t] = rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t] # discount[t] = pcontinues[t]*lambda_ # Substitute: # result[t] = sequence[t] + discount[t]*result[t + 1] # Boundary condition: # result[last] = rewards[last] + pcontinues[last]*state_values[last] # Add and subtract the same quantity at BC: # state_values[last] = # lambda_*state_values[last] + (1-lambda_)*state_values[last] # This makes: # result[last] = # (rewards[last] + pcontinues[last]*(1-lambda_)*state_values[last]) + # pcontinues[last]*lambda_*state_values[last] # Substitute in definitions for sequence and discount: # result[last] = sequence[last] + discount[last]*state_values[last] # Define: # initial_value=state_values[last] # We get the following recurrent relationship: # result[last] = sequence[last] + decay[last]*initial_value # result[k] = sequence[k] + decay[k] * result[k + 1] # This matches the form of scan_discounted_sum: # result = scan_sum_with_discount(sequence, discount, # initial_value = state_values[last]) sequence = rewards + pcontinues * state_values * (1 - lambda_) discount = pcontinues * lambda_ return scan_discounted_sum(sequence, discount, state_values[-1], reverse=True, sequence_lengths=sequence_lengths, back_prop=back_prop)
trfl-master
trfl/sequence_ops.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utilities for Reinforcement Learning ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections # Dependency imports from six.moves import zip import tensorflow.compat.v1 as tf LossOutput = collections.namedtuple("loss_output", ["loss", "extra"]) def best_effort_shape(tensor, with_rank=None): """Extract as much static shape information from a tensor as possible. Args: tensor: A `Tensor`. If `with_rank` is None, must have statically-known number of dimensions. with_rank: Optional, an integer number of dimensions to force the shape to be. Useful for tensors with no static shape information that must be of a particular rank. Default is None (number of dimensions must be statically known). Returns: An iterable with length equal to the number of dimensions in `tensor`, containing integers for the dimensions with statically-known size, and scalar `Tensor`s for dimensions with size only known at run-time. Raises: ValueError: If `with_rank` is None and `tensor` does not have statically-known number of dimensions. """ tensor_shape = tf.TensorShape(tensor.shape) if with_rank: tensor_shape = tensor_shape.with_rank(with_rank) if tensor_shape.ndims is None: raise ValueError( "`tensor` does not have statically-known number of dimensions.") shape_list = tensor_shape.as_list() for idx, dim in enumerate(shape_list): if not dim: shape_list[idx] = tf.shape(tensor)[idx] return shape_list def assert_rank_and_shape_compatibility(tensors, rank): """Asserts that the tensors have the correct rank and compatible shapes. Shapes (of equal rank) are compatible if corresponding dimensions are all equal or unspecified. E.g. `[2, 3]` is compatible with all of `[2, 3]`, `[None, 3]`, `[2, None]` and `[None, None]`. Args: tensors: List of tensors. rank: A scalar specifying the rank that the tensors passed need to have. Raises: ValueError: If the list of tensors is empty or fail the rank and mutual compatibility asserts. """ if not tensors: raise ValueError("List of tensors should be non-empty.") union_of_shapes = tf.TensorShape(None) for tensor in tensors: tensor_shape = tf.TensorShape(tensor.shape) tensor_shape.assert_has_rank(rank) union_of_shapes = union_of_shapes.merge_with(tensor_shape) def wrap_rank_shape_assert(tensors_list, expected_ranks, op_name): try: for tensors, rank in zip(tensors_list, expected_ranks): assert_rank_and_shape_compatibility(tensors, rank) except ValueError as e: error_message = ("{}: Error in rank and/or " "compatibility check, {}".format(op_name, e)) tf.logging.error(error_message) raise ValueError(error_message) def assert_arg_bounded(value, min_value, max_value, op_name, arg_name): if not min_value <= value <= max_value: raise ValueError( (op_name + ": " + arg_name + " has to lie in " + "[" + str(min_value) + ", " + str(max_value) + "]."))
trfl-master
trfl/base_ops.py
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Indexing ops. These ops support indexing the 2D tensors representing batches of values (shape: [B, dim]) or 3D tensors representing batches of sequences of values (shape: [T, B, dim]. `T` is the length of the rollout, `B` is the batch size, and `dim` the size of the dimension that must be indexed. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf def assert_compatible_shapes(value_shape, index_shape): """Check shapes of the indices and the tensor to be indexed. If all input shapes are known statically, obtain shapes of arguments and perform compatibility checks. Otherwise, print a warning. The only check we cannot perform statically (and do not attempt elsewhere) is making sure that each action index in actions is in [0, num_actions). Args: value_shape: static shape of the values. index_shape: static shape of the indices. """ # note: rank-0 "[]" TensorShape is still True. if value_shape and index_shape: try: msg = ("Shapes of \"values\" and \"indices\" do not correspond to " "minibatch (2-D) or sequence-minibatch (3-D) indexing") assert (value_shape.ndims, index_shape.ndims) in [(2, 1), (3, 2)], msg msg = ("\"values\" and \"indices\" have incompatible shapes of {} " "and {}, respectively").format(value_shape, index_shape) assert value_shape[:-1].is_compatible_with(index_shape), msg except AssertionError as e: raise ValueError(e) # Convert AssertionError to ValueError. else: # No shape information is known ahead of time. tf.logging.warning( "indexing function cannot get shapes for tensors \"values\" and " "\"indices\" at construction time, and so can't check that their " "shapes are valid or compatible. Incorrect indexing may occur at " "runtime without error!") def batched_index(values, indices, keepdims=None): """Equivalent to `values[:, indices]`. Performs indexing on batches and sequence-batches by reducing over zero-masked values. Compared to indexing with `tf.gather` this approach is more general and TPU-friendly, but may be less efficient if `num_values` is large. It works with tensors whose shapes are unspecified or partially-specified, but this op will only do shape checking on shape information available at graph construction time. When complete shape information is absent, certain shape incompatibilities may not be detected at runtime! See `indexing_ops_test` for detailed examples. Args: values: tensor of shape `[B, num_values]` or `[T, B, num_values]` indices: tensor of shape `[B]` or `[T, B]` containing indices. keepdims: If `True`, the returned tensor will have an added 1 dimension at the end (e.g. `[B, 1]` or `[T, B, 1]`). Returns: Tensor of shape `[B]` or `[T, B]` containing values for the given indices. Raises: ValueError if values and indices have sizes that are known statically (i.e. during graph construction), and those sizes are not compatible (see shape descriptions in Args list above). """ with tf.name_scope("batch_indexing", values=[values, indices]): values = tf.convert_to_tensor(values) indices = tf.convert_to_tensor(indices) assert_compatible_shapes(values.shape, indices.shape) one_hot_indices = tf.one_hot( indices, tf.shape(values)[-1], dtype=values.dtype) return tf.reduce_sum(values * one_hot_indices, axis=-1, keepdims=keepdims)
trfl-master
trfl/indexing_ops.py