python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Component common to all architecture streams(vector, visual and units)."""
from typing import Iterable, Optional
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures.components import util
from alphastar.commons import sample
import dm_env
from dm_env import specs
import jax.numpy as jnp
class ActionFromBehaviourFeatures(modular.BatchedComponent):
"""Copies the action from the behaviour features (for training)."""
def __init__(self,
argument_name: types.ArgumentName,
max_action_value: Optional[int] = None,
name: Optional[str] = None):
"""Initializes ActionFromBehaviourFeatures module.
Args:
argument_name: The name of the action argument to use.
max_action_value: An optional clipping of the max value for the action.
name: The name of this component.
"""
super().__init__(name=name)
self._argument_name = argument_name
self._max_action_value = max_action_value
@property
def input_spec(self) -> types.SpecDict:
"""Generates the specifications for the input."""
return types.SpecDict({
("behaviour_features", "action", self._argument_name): specs.Array(
(), jnp.int32)})
@property
def output_spec(self) -> types.SpecDict:
"""Generates the specifications for the output."""
return types.SpecDict({
("action", self._argument_name): specs.Array((), jnp.int32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs["behaviour_features", "action", self._argument_name]
if self._max_action_value is not None:
x = jnp.minimum(x, self._max_action_value)
outputs = types.StreamDict({("action", self._argument_name): x})
return outputs, {}
class Sample(modular.BatchedComponent):
"""Samples the logits into an action (for inference)."""
def __init__(self,
argument_name: types.ArgumentName,
num_logits: int,
sample_fn: sample.SampleFn,
name: Optional[str] = None):
"""Initializes Sample module.
Args:
argument_name: The name of the action argument to use.
num_logits: The size of the logits 1d vector.
sample_fn: The function to sample the logits, taking a float32 1d logits
vector as input and returning a int32 0d action.
name: The name of this component.
"""
super().__init__(name=name)
self._argument_name = argument_name
self._num_logits = num_logits
self._sample_fn = sample_fn
@property
def input_spec(self) -> types.SpecDict:
"""Generates the specifications for the input."""
return types.SpecDict({("logits", self._argument_name): specs.Array(
(self._num_logits,), jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
"""Generates the specifications for the output."""
return types.SpecDict({
("action", self._argument_name): specs.Array((), jnp.int32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
action = self._sample_fn(inputs["logits", self._argument_name])
outputs = types.StreamDict({("action", self._argument_name): action})
return outputs, {}
class ArgumentMasks(modular.BatchedComponent):
"""Compute the argument masks from the function argument."""
def __init__(self,
action_spec: types.ActionSpec,
input_name: types.StreamType = ("action", "function"),
output_name: types.StreamType = "argument_masks",
name: Optional[str] = None):
"""Initializes ArgumentMasks object.
Args:
action_spec: The action spec.
input_name: The name of the input to use, of shape [] and dtype int32.
output_name: The prefix of the name to give to the output. There is one
output per argument name in action_spec, each one of shape [] and
dtype bool.
name: The name of this component.
"""
super().__init__(name=name)
self._action_spec = action_spec
self._input_name = input_name
self._output_name = output_name
@property
def input_spec(self) -> types.SpecDict:
"""Generates the specifications for the input."""
return types.SpecDict({
self._input_name: specs.Array((), jnp.int32),
"step_type": specs.Array((), jnp.int32)})
@property
def output_spec(self) -> types.SpecDict:
"""Generates the specifications for the output."""
return types.SpecDict({(self._output_name, arg): specs.Array((), jnp.bool_)
for arg in self._action_spec})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
function_arg = inputs[self._input_name]
full_argument_masks = util.get_full_argument_masks(self._action_spec)
first_step_mask = jnp.not_equal(
inputs["step_type"], int(dm_env.StepType.FIRST))
outputs = types.StreamDict({
(self._output_name, arg): jnp.logical_and(
full_argument_masks[arg][function_arg], first_step_mask)
for arg in self._action_spec})
return outputs, {}
class FeatureFromPrevState(modular.Component):
"""Copies a feature from the prev_state.
During training, the feature must be in behaviour_features.
"""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
is_training: bool,
stream_shape: Iterable[int],
stream_dtype: jnp.dtype = jnp.float32,
name: Optional[str] = None):
"""Initializes FeatureFromPrevState module.
Args:
input_name: The name of the input to use, of shape `stream_shape` and
dtype `stream_dtype`.
output_name: The name to give to the output, of shape `stream_shape` and
dtype `stream_dtype`.
is_training: A boolean specifying whether this is training or inference.
During inference, `behaviour_features` must be in the inputs to provide
the values used by the behaviour model (or the recorded episode for
offline learning).
stream_shape: The shape of the input and output.
stream_dtype: The dtype of the input and output.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._is_training = is_training
self._stream_spec = specs.Array(stream_shape, stream_dtype)
@property
def input_spec(self) -> types.SpecDict:
"""Generates the specifications for the input."""
spec = types.SpecDict()
if self._is_training:
spec["behaviour_features", self._input_name] = self._stream_spec
return spec
@property
def prev_state_spec(self) -> types.SpecDict:
"""Generates the specifications for the previous state."""
return types.SpecDict({self._input_name: self._stream_spec})
@property
def output_spec(self) -> types.SpecDict:
"""Generates the specifications for the output."""
return types.SpecDict({self._output_name: self._stream_spec})
@property
def next_state_spec(self) -> types.SpecDict:
"""Generates the specifications for the next state."""
return types.SpecDict()
def _unroll(self,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> modular.UnrollOutputType:
outputs = types.StreamDict()
prev_state_input = prev_state[self._input_name][jnp.newaxis]
if self._is_training:
behaviour_inputs = inputs.get("behaviour_features")[self._input_name]
outputs = types.StreamDict({
self._output_name: jnp.concatenate(
[prev_state_input, behaviour_inputs[:-1]], axis=0)})
else:
outputs = types.StreamDict({self._output_name: prev_state_input})
return outputs, types.StreamDict(), {}
class FeatureToNextState(modular.Component):
"""Copies a feature from the prev_state."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
overlap_len: int,
stream_shape: Iterable[int],
stream_dtype: jnp.dtype = jnp.float32,
name: Optional[str] = None):
"""Initializes FeatureToNextState module.
Args:
input_name: The name of the input to use, of shape `stream_shape` and
dtype `stream_dtype`.
output_name: The name to give to the output, of shape `stream_shape` and
dtype `stream_dtype`.
overlap_len: The number of timesteps overlapping between two trajectories.
During training, the timestep passed to the next rollout is the one
immediately before the next rollout, which is not the last one if
overlap_len is not 0. Note that during inference, overlap_len must be 0
since the rollout length is 1.
stream_shape: The shape of the input and output.
stream_dtype: The dtype of the input and output.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._stream_spec = specs.Array(stream_shape, stream_dtype)
self._overlap_len = overlap_len
if overlap_len < 0:
raise ValueError(f"overlap_len must be non-negative, not {overlap_len}.")
@property
def input_spec(self) -> types.SpecDict:
"""Generates the specifications for the input."""
return types.SpecDict({self._input_name: self._stream_spec})
@property
def prev_state_spec(self) -> types.SpecDict:
"""Generates the specifications for the previous state."""
return types.SpecDict()
@property
def output_spec(self) -> types.SpecDict:
"""Generates the specifications for the output."""
return types.SpecDict()
@property
def next_state_spec(self) -> types.SpecDict:
"""Generates the specifications for the next state."""
return types.SpecDict({self._output_name: self._stream_spec})
def _unroll(self,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> modular.UnrollOutputType:
x = inputs[self._input_name]
unroll_len = x.shape[0]
if self._overlap_len >= unroll_len:
raise ValueError(f"overlap_len ({self._overlap_len}) is larger than the "
f"unroll length ({x.shape[0]}).")
effective_sequence_length = unroll_len - self._overlap_len - 1
next_state = types.StreamDict({
self._output_name: x[effective_sequence_length]})
return types.StreamDict(), next_state, {}
| alphastar-main | alphastar/architectures/components/common.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visual-based modules, acting on 2d feature maps."""
from typing import Callable, Optional, Sequence, Tuple, Union
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures.components import util
from alphastar.architectures.components.static_data import camera_masks
from alphastar.commons import sample
import chex
from dm_env import specs
import dm_pix as pix
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class FeatureEncoder(modular.BatchedComponent):
"""Encodes 2d feature maps applying a function and a convolution."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_spatial_size: Union[int, Tuple[int, int]],
input_feature_size: Optional[int],
downscale_factor: int,
output_features_size: int,
kernel_size: int,
fun: Callable[[chex.Array], chex.Array] = lambda x: x,
input_dtype: jnp.dtype = jnp.uint8,
name: Optional[str] = None):
"""Initializes FeatureEncoder module.
Args:
input_name: The name of the input to use, of shape
[input_spatial_size[0], input_spatial_size[1]] and dtype int32.
output_name: The name to give to the output, of shape
[input_spatial_size[0] / downscale_factor,
input_spatial_size[1] / downscale_factor,
output_features_size] and dtype float32.
input_spatial_size: The spatial size of the input to encode.
If the input is square, a single int can be used, otherwise
a pair is required.
input_feature_size: The number of feature planes of the input, or None
if the input does not have a feature dimension.
downscale_factor: The downscale factor to apply to the input.
output_features_size: The number of feature planes of the output.
kernel_size: The size of the convolution kernel to use. Note that with
downsampling, a `kernel_size` not multiple of the `downscale_factor`
will result in a checkerboard pattern, so it is not recommended.
fun: An optional function to apply to the input before applying the
convolution.
input_dtype: The type of the input.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
if isinstance(input_spatial_size, int):
self._input_spatial_size = (input_spatial_size, input_spatial_size)
else:
self._input_spatial_size = tuple(input_spatial_size)
for i in range(2):
if self._input_spatial_size[i] % downscale_factor:
raise ValueError(f'input_spatial_size[{i}] must be a multiple of '
f'downscale_factor ({downscale_factor}) but is '
f'({self._input_spatial_size[i]}).')
self._input_feature_size = input_feature_size
self._downscale_factor = downscale_factor
self._output_features_size = output_features_size
self._kernel_size = kernel_size
self._fun = fun
self._input_dtype = input_dtype
@property
def input_spec(self) -> types.SpecDict:
if self._input_feature_size is None:
input_size = self._input_spatial_size
else:
input_size = self._input_spatial_size + (self._input_feature_size,)
return types.SpecDict({
self._input_name: specs.Array(input_size, self._input_dtype)})
@property
def output_spec(self) -> types.SpecDict:
output_size = tuple(size // self._downscale_factor
for size in self._input_spatial_size)
return types.SpecDict({
self._output_name: specs.Array(
output_size + (self._output_features_size,), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
x = util.astype(x, jnp.float32)
x = self._fun(x)
if self._input_feature_size is None:
x = x[..., jnp.newaxis]
x = hk.Conv2D(output_channels=self._output_features_size,
kernel_shape=self._kernel_size,
stride=self._downscale_factor)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Embedding(modular.BatchedComponent):
"""Encodes a visual (2d) int32 input, embedding each pixel independently."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_spatial_size: int,
downscale_factor: int,
num_classes: int,
output_features_size: int,
kernel_size: int,
name: Optional[str] = None):
"""Initializes Embedding module.
Args:
input_name: The name of the input to use, of shape
[input_spatial_size, input_spatial_size] and dtype int32.
output_name: The name to give to the output, of shape
[input_spatial_size / downscale_factor,
input_spatial_size / downscale_factor,
output_features_size] and dtype float32.
input_spatial_size: The spatial size of the input to encode.
downscale_factor: The downscale factor to apply to the input.
num_classes: The number of values the input can take, ie. max(input)-1.
For safety, the input is clipped to stay within [0, num_classes-1], but
it probably should never be larger than num_classes-1.
output_features_size: The number of feature planes of the output.
kernel_size: The size of the convolution kernel to use. Note that with
downsampling, a `kernel_size` not multiple of the `downscale_factor`
will result in a checkerboard pattern, so it is not recommended.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._input_spatial_size = input_spatial_size
if input_spatial_size % downscale_factor:
raise ValueError(f'input_spatial_size ({input_spatial_size}) must be a '
f'multiple of downscale_factor ({downscale_factor}).')
self._downscale_factor = downscale_factor
self._num_classes = num_classes
self._output_features_size = output_features_size
self._kernel_size = kernel_size
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array(
(self._input_spatial_size, self._input_spatial_size), jnp.uint8)})
@property
def output_spec(self) -> types.SpecDict:
output_size = self._input_spatial_size // self._downscale_factor
return types.SpecDict({
self._output_name: specs.Array(
(output_size, output_size, self._output_features_size), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
x = jnp.minimum(x, self._num_classes - 1)
x = jax.nn.one_hot(x, self._num_classes, dtype=jnp.float32)
x = hk.Conv2D(output_channels=self._output_features_size,
kernel_shape=self._kernel_size,
stride=self._downscale_factor)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class CameraEncoder(modular.BatchedComponent):
"""Encodes the camera visual feature."""
def __init__(self,
output_name: types.StreamType,
input_spatial_size: int,
downscale_factor: int,
output_features_size: int,
name: Optional[str] = None):
"""Initializes CameraEncoder module..
Args:
output_name: The name to give to the output, of shape
[input_spatial_size / downscale_factor,
input_spatial_size / downscale_factor,
output_features_size] and dtype float32.
input_spatial_size: The spatial size of the input to encode.
downscale_factor: The downscale factor to apply to the input.
output_features_size: The number of feature planes of the output.
name: The name of this component.
"""
super().__init__(name=name)
self._output_name = output_name
self._input_spatial_size = input_spatial_size
if input_spatial_size % downscale_factor:
raise ValueError(f'input_spatial_size ({input_spatial_size}) must be a '
f'multiple of downscale_factor ({downscale_factor}).')
self._downscale_factor = downscale_factor
self._output_features_size = output_features_size
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
('observation', 'camera'): specs.Array(
(self._input_spatial_size, self._input_spatial_size), jnp.int32)})
@property
def output_spec(self) -> types.SpecDict:
output_size = self._input_spatial_size // self._downscale_factor
return types.SpecDict({
self._output_name: specs.Array(
(output_size, output_size, self._output_features_size), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs['observation', 'camera']
ds = self._downscale_factor
x = hk.AvgPool((ds, ds), (ds, ds), 'SAME')(x)[..., jnp.newaxis]
x = hk.Linear(self._output_features_size)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Downscale(modular.BatchedComponent):
"""Downscale the visual stream.."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_spatial_size: int,
input_features_size: int,
output_features_size: int,
downscale_factor: int,
kernel_size: int,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes Downscale module..
Args:
input_name: The name of the input to use, of shape
[input_spatial_size, input_spatial_size, input_features_size] and dtype
float32.
output_name: The name to give to the output, of shape
[input_spatial_size / downscale_factor,
input_spatial_size / downscale_factor,
output_features_size] and dtype float32.
input_spatial_size: The spatial size of the input.
input_features_size: The number of feature planes of the input.
output_features_size: The number of feature planes of the output.
downscale_factor: The downscale factor to apply to the input.
kernel_size: The size of the convolution kernel to use. Note that with
downsampling, a `kernel_size` not multiple of the `downscale_factor`
will result in a checkerboard pattern, so it is not recommended.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
if input_spatial_size % downscale_factor:
raise ValueError(f'input_spatial_size ({input_spatial_size}) must be a '
f'multiple of downscale_factor ({downscale_factor}).')
self._input_spatial_size = input_spatial_size
self._input_features_size = input_features_size
self._output_features_size = output_features_size
self._downscale_factor = downscale_factor
self._kernel_size = kernel_size
self._use_layer_norm = use_layer_norm
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_spatial_size,
self._input_spatial_size,
self._input_features_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
output_size = self._input_spatial_size // self._downscale_factor
return types.SpecDict({
self._output_name: specs.Array(
(output_size, output_size, self._output_features_size), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
if self._use_layer_norm:
x = util.visual_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Conv2D(
output_channels=self._output_features_size,
kernel_shape=self._kernel_size,
stride=self._downscale_factor)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Upscale(modular.BatchedComponent):
"""Upscale the visual stream."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_spatial_size: int,
input_features_size: int,
output_features_size: int,
upscale_factor: int,
kernel_size: int,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes Upscale module..
Args:
input_name: The name of the input to use, of shape
[input_spatial_size, input_spatial_size, input_features_size] and dtype
float32.
output_name: The name to give to the output, of shape
[input_spatial_size * upscale_factor,
input_spatial_size * upscale_factor,
output_features_size] and dtype float32.
input_spatial_size: The spatial size of the input.
input_features_size: The number of feature planes of the input.
output_features_size: The number of feature planes of the output.
upscale_factor: The upscale factor to apply to the input.
kernel_size: The size of the convolution kernel to use. Note that with
upsampling, a `kernel_size` not multiple of the `upscale_factor`
will result in a checkerboard pattern, so it is not recommended.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._input_spatial_size = input_spatial_size
self._input_features_size = input_features_size
self._output_features_size = output_features_size
self._upscale_factor = upscale_factor
self._kernel_size = kernel_size
self._use_layer_norm = use_layer_norm
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_spatial_size,
self._input_spatial_size,
self._input_features_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
output_size = self._input_spatial_size * self._upscale_factor
return types.SpecDict({
self._output_name: specs.Array(
(output_size, output_size, self._output_features_size), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
if self._use_layer_norm:
x = util.visual_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Conv2DTranspose(
output_channels=self._output_features_size,
kernel_shape=self._kernel_size,
stride=self._upscale_factor)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Resnet(modular.BatchedComponent):
"""Resnet processing of the visual stream."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_spatial_size: int,
input_features_size: int,
num_resblocks: int,
kernel_size: int = 3,
use_layer_norm: bool = True,
num_hidden_feature_planes: Optional[int] = None,
name: Optional[str] = None):
"""Initializes Resnet module..
Args:
input_name: The name of the input to use, of shape
[input_spatial_size, input_spatial_size, input_features_size] and dtype
float32.
output_name: The name to give to the output, of shape
[input_spatial_size, input_spatial_size, input_features_size] and dtype
float32.
input_spatial_size: The spatial size of the input.
input_features_size: The number of feature planes of the input.
num_resblocks: The number of residual blocks.
kernel_size: The size of the convolution kernel to use.
use_layer_norm: Whether to use layer normalization.
num_hidden_feature_planes: Optional number of feature planes in the
hidden layers of the residual blocks. If None, the number of feature
planes of the input is used.
name: The name of this component.
"""
super().__init__(name)
self._input_name = input_name
self._output_name = output_name
self._input_spec = specs.Array(
(input_spatial_size, input_spatial_size, input_features_size),
jnp.float32)
self._num_resblocks = num_resblocks
self._kernel_size = kernel_size
self._use_layer_norm = use_layer_norm
self._num_hidden_feature_planes = num_hidden_feature_planes
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({self._input_name: self._input_spec})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({self._output_name: self._input_spec})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
for _ in range(self._num_resblocks):
x = util.VisualResblock(
kernel_size=self._kernel_size,
hidden_size=self._num_hidden_feature_planes,
use_layer_norm=self._use_layer_norm)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class ToVector(modular.BatchedComponent):
"""Strided convolutions (downscales) followed by a linear layer."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_spatial_size: Union[int, Tuple[int, int]],
input_features_size: int,
vector_stream_size: int,
hidden_feature_sizes: Sequence[int],
downscale_factor: int = 2,
kernel_size: int = 4,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes ToVector module..
Args:
input_name: The name of the input to use, of shape
[input_spatial_size[0], input_spatial_size[1], input_features_size]
and dtype float32.
output_name: The name to give to the output, of shape [output_size] and
dtype float32.
input_spatial_size: The spatial size of the input. If the input is square,
a single int can be used, otherwise a pair is required.
input_features_size: The number of feature planes of the input.
vector_stream_size: The size of the output (1d vector representation).
hidden_feature_sizes: The list of number of feature planes in the
convolutional hidden layers, before reshaping into a single vector.
Each convolution is strided, decreasing the spatial resolution.
downscale_factor: The downscale factor of each strided convolution.
kernel_size: The size of the convolution kernel to use. Note that with
downsampling, a `kernel_size` not multiple of the `downscale_factor`
will result in a checkerboard pattern, so it is not recommended.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
if isinstance(input_spatial_size, int):
self._input_spatial_size = (input_spatial_size, input_spatial_size)
else:
self._input_spatial_size = tuple(input_spatial_size)
total_downscale_factor = downscale_factor ** len(hidden_feature_sizes)
for i in range(2):
if self._input_spatial_size[i] % total_downscale_factor:
raise ValueError(
f'input_spatial_size[{i}] ({input_spatial_size}) must be a '
f'multiple of downscale_factor ({downscale_factor}) to the power '
f'of len(hidden_feature_sizes) ({len(hidden_feature_sizes)}).')
self._input_features_size = input_features_size
self._vector_stream_size = vector_stream_size
self._hidden_feature_sizes = hidden_feature_sizes
self._use_layer_norm = use_layer_norm
self._downscale_factor = downscale_factor
self._kernel_size = kernel_size
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_spatial_size[0],
self._input_spatial_size[1],
self._input_features_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._vector_stream_size,), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
for num_hidden_features in self._hidden_feature_sizes:
if self._use_layer_norm:
x = util.visual_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Conv2D(
output_channels=num_hidden_features,
kernel_shape=self._kernel_size,
stride=self._downscale_factor)(x)
x = jnp.reshape(x, [-1])
if self._use_layer_norm:
x = util.vector_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(output_size=self._vector_stream_size)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Logits(modular.BatchedComponent):
"""Visual logits: generate a 2d headmap."""
def __init__(self,
input_name: types.StreamType,
input_spatial_size: int,
input_features_size: int,
logits_output_name: types.StreamType,
mask_output_name: types.StreamType,
upscale_factor: int,
kernel_size: int,
use_layer_norm: bool = True,
use_depth_to_space: bool = False,
name: Optional[str] = None):
"""Initializes Logits module..
Args:
input_name: The name of the input to use, of shape
[input_spatial_size, input_spatial_size, input_features_size] and dtype
float32.
input_spatial_size: The spatial size of the input.
input_features_size: The number of feature planes of the input.
logits_output_name: The name to give to the output for the logits,
of shape [(input_spatial_size*upscale_factor) ** 2] and dtype float32.
mask_output_name: The name to give to the output for the mask,
of shape [(input_spatial_size*upscale_factor) ** 2] and dtype bool.
upscale_factor: The upscale factor to apply to the input.
kernel_size: The size of the convolution kernel to use. Note that with
upsampling, a `kernel_size` not multiple of the `upscale_factor`
will result in a checkerboard pattern, so it is not recommended.
If `use_depth_to_space` is set to True, then the kernel size is actually
required to be a multiple of upscale_factor.
use_layer_norm: Whether to use layer normalization.
use_depth_to_space: If False, strided convolutions are used. If True, a
regular convolution (stride=1) is used, and the output is reshaped to
produce an upsampled result. The operations are equivalent, but result
in reshuffled weight vectors, so the trained models are not
interchangeable. Using this option gives a performance boost on some
hardwares (GPUs).
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._input_spatial_size = input_spatial_size
self._input_features_size = input_features_size
self._logits_output_name = logits_output_name
self._mask_output_name = mask_output_name
if upscale_factor < 1:
raise ValueError(
f'upscale_factor must be > 0, but was set to {upscale_factor}.')
self._upscale_factor = upscale_factor
self._kernel_size = kernel_size
if use_depth_to_space and (kernel_size % upscale_factor):
raise ValueError(
f'upscale_factor ({upscale_factor}) must be a multiple of kernel_size'
f' ({kernel_size}) when use_depth_to_space is set to True.')
self._use_layer_norm = use_layer_norm
self._use_depth_to_space = use_depth_to_space
@property
def input_spec(self) -> types.SpecDict:
camera_size = self._input_spatial_size * self._upscale_factor
return types.SpecDict({
self._input_name: specs.Array((self._input_spatial_size,
self._input_spatial_size,
self._input_features_size),
jnp.float32),
('observation', 'camera'): specs.Array(
(camera_size, camera_size), jnp.int32),
('action', 'function'): specs.Array((), jnp.int32)})
@property
def output_spec(self) -> types.SpecDict:
output_size = self._input_spatial_size * self._upscale_factor
return types.SpecDict({
self._logits_output_name: specs.Array(
(output_size * output_size,), jnp.float32),
self._mask_output_name: specs.Array(
(output_size * output_size,), jnp.bool_)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
# mask: with some functions arguments, the agent can only click on the
# screen (ie. in the camera view).
camera = inputs['observation', 'camera']
function_arg = inputs['action', 'function']
all_camera_only_functions = camera_masks.get_on_camera_only_functions_pt()
all_not_camera_only_functions = jnp.asarray(
np.logical_not(all_camera_only_functions), dtype=jnp.bool_)
is_not_camera_only = all_not_camera_only_functions[function_arg]
camera_mask = jnp.logical_or(is_not_camera_only[jnp.newaxis], camera)
camera_mask = jnp.reshape(camera_mask, [-1])
x = inputs[self._input_name]
if self._use_layer_norm:
x = util.visual_layer_norm(x)
x = jax.nn.relu(x)
if self._upscale_factor > 1:
if self._use_depth_to_space:
output_channels = self._upscale_factor * self._upscale_factor
kernel_shape = self._kernel_size // self._upscale_factor
x = hk.Conv2D(
output_channels=output_channels, kernel_shape=kernel_shape)(x)
x = pix.depth_to_space(x, self._upscale_factor)[:, :, 0]
else:
x = hk.Conv2DTranspose( # upscale
output_channels=1,
kernel_shape=self._kernel_size,
stride=self._upscale_factor)(x)[:, :, 0]
else:
x = hk.Conv2D(
output_channels=1,
kernel_shape=self._kernel_size)(x)[:, :, 0]
logits = jnp.reshape(x, [-1])
logits = sample.mask_logits(logits, camera_mask)
outputs = types.StreamDict({self._logits_output_name: logits,
self._mask_output_name: camera_mask})
return outputs, {}
| alphastar-main | alphastar/architectures/components/visual.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for visual."""
from typing import Callable, Optional, Sequence, Tuple, Union
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.architectures import util
from alphastar.architectures.components import test_utils
from alphastar.architectures.components import visual
import chex
import jax.numpy as jnp
class VisualTest(test_utils.ComponentTest):
"""Basic tests for the visual components."""
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 3, [4, 2]],
input_feature_size=[None, 3],
downscale_factor=[1, 2, 3],
output_features_size=[1, 5],
kernel_size=[1, 2, 3],
fun=[jnp.sqrt, jnp.log1p],
input_dtype=[jnp.uint8, jnp.float32])
def test_FeatureEncoder(self,
is_training: bool,
input_spatial_size: Union[int, Tuple[int, int]],
input_feature_size: Optional[int],
downscale_factor: int,
output_features_size: int,
kernel_size: int,
fun: Callable[[chex.Array], chex.Array],
input_dtype: jnp.dtype):
kwargs = dict(
input_name='input_stream',
output_name='output_stream',
input_spatial_size=input_spatial_size,
input_feature_size=input_feature_size,
downscale_factor=downscale_factor,
output_features_size=output_features_size,
kernel_size=kernel_size,
fun=fun,
input_dtype=input_dtype)
if isinstance(input_spatial_size, int):
input_spatial_size = (input_spatial_size, input_spatial_size)
if ((input_spatial_size[0] % downscale_factor != 0) or
(input_spatial_size[1] % downscale_factor != 0)):
with self.assertRaises(ValueError):
_ = visual.FeatureEncoder(**kwargs)
else:
component = visual.FeatureEncoder(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 3],
downscale_factor=[1, 2, 3],
num_classes=[1, 4],
output_features_size=[1, 5],
kernel_size=[1, 2, 3])
def test_Embedding(self,
is_training: bool,
input_spatial_size: int,
downscale_factor: int,
num_classes: int,
output_features_size: int,
kernel_size: int):
kwargs = dict(
input_name='input_stream',
output_name='output_stream',
input_spatial_size=input_spatial_size,
downscale_factor=downscale_factor,
num_classes=num_classes,
output_features_size=output_features_size,
kernel_size=kernel_size)
if input_spatial_size % downscale_factor != 0:
with self.assertRaises(ValueError):
_ = visual.Embedding(**kwargs)
else:
component = visual.Embedding(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 3],
downscale_factor=[1, 2, 3],
output_features_size=[1, 5])
def test_CameraEncoder(self,
is_training: bool,
input_spatial_size: int,
downscale_factor: int,
output_features_size: int):
kwargs = dict(
output_name='output_stream',
input_spatial_size=input_spatial_size,
downscale_factor=downscale_factor,
output_features_size=output_features_size)
if input_spatial_size % downscale_factor != 0:
with self.assertRaises(ValueError):
_ = visual.CameraEncoder(**kwargs)
else:
component = visual.CameraEncoder(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 4],
input_features_size=[1, 3],
output_features_size=[1, 5],
downscale_factor=[1, 2, 3],
kernel_size=[1, 2, 3],
use_layer_norm=[True, False])
def test_Downscale(self,
is_training: bool,
input_spatial_size: int,
input_features_size: int,
output_features_size: int,
downscale_factor: int,
kernel_size: int,
use_layer_norm: bool):
kwargs = dict(input_name='input_stream',
output_name='output_stream',
input_spatial_size=input_spatial_size,
input_features_size=input_features_size,
output_features_size=output_features_size,
downscale_factor=downscale_factor,
kernel_size=kernel_size,
use_layer_norm=use_layer_norm)
if input_spatial_size % downscale_factor != 0:
with self.assertRaises(ValueError):
_ = visual.Downscale(**kwargs)
else:
component = visual.Downscale(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 4],
input_features_size=[1, 3],
output_features_size=[1, 5],
downscale_factor=[1, 2, 3],
kernel_size=[1, 2, 3],
use_layer_norm=[True, False])
def test_Upscale(self,
is_training: bool,
input_spatial_size: int,
input_features_size: int,
output_features_size: int,
downscale_factor: int,
kernel_size: int,
use_layer_norm: bool):
component = visual.Upscale(input_name='input_stream',
output_name='output_stream',
input_spatial_size=input_spatial_size,
input_features_size=input_features_size,
output_features_size=output_features_size,
upscale_factor=downscale_factor,
kernel_size=kernel_size,
use_layer_norm=use_layer_norm)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 4],
input_features_size=[1, 3],
num_resblocks=[0, 2],
kernel_size=[1, 2, 3],
use_layer_norm=[True, False],
num_hidden_feature_planes=[None, 3])
def test_Resnet(self,
is_training: bool,
input_spatial_size: int,
input_features_size: int,
num_resblocks: int,
kernel_size: int,
use_layer_norm: bool,
num_hidden_feature_planes: Optional[int] = None):
component = visual.Resnet(
input_name='input_stream',
output_name='output_stream',
input_spatial_size=input_spatial_size,
input_features_size=input_features_size,
num_resblocks=num_resblocks,
kernel_size=kernel_size,
use_layer_norm=use_layer_norm,
num_hidden_feature_planes=num_hidden_feature_planes)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 4, [3, 4]],
input_features_size=[1, 3],
output_size=[3],
hidden_feature_sizes=[[], [2, 3]],
downscale_factor=[1, 2, 3],
kernel_size=[1, 2, 3],
use_layer_norm=[True, False])
def test_ToVector(self,
is_training: bool,
input_spatial_size: int,
input_features_size: int,
output_size: int,
hidden_feature_sizes: Sequence[int],
downscale_factor: int,
kernel_size: int,
use_layer_norm: bool):
kwargs = dict(input_name='input_stream',
output_name='output_stream',
input_spatial_size=input_spatial_size,
input_features_size=input_features_size,
vector_stream_size=output_size,
hidden_feature_sizes=hidden_feature_sizes,
downscale_factor=downscale_factor,
kernel_size=kernel_size,
use_layer_norm=use_layer_norm)
if isinstance(input_spatial_size, int):
input_spatial_size = (input_spatial_size, input_spatial_size)
total_downscale_factor = downscale_factor ** len(hidden_feature_sizes)
if ((input_spatial_size[0] % total_downscale_factor != 0) or
(input_spatial_size[1] % total_downscale_factor != 0)):
with self.assertRaises(ValueError):
_ = visual.ToVector(**kwargs)
else:
component = visual.ToVector(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_spatial_size=[1, 4],
input_features_size=[1, 3],
upscale_factor=[1, 2, 3],
kernel_size=[1, 2, 3],
use_layer_norm=[True, False],
use_depth_to_space=[True, False])
def test_Logits(self,
is_training: bool,
input_spatial_size: int,
input_features_size: int,
upscale_factor: int,
kernel_size: int,
use_layer_norm: bool,
use_depth_to_space: bool):
kwargs = dict(input_name='input_stream',
input_spatial_size=input_spatial_size,
input_features_size=input_features_size,
logits_output_name=('logits', util.Argument.WORLD),
mask_output_name=('masks', util.Argument.WORLD),
upscale_factor=upscale_factor,
kernel_size=kernel_size,
use_layer_norm=use_layer_norm,
use_depth_to_space=use_depth_to_space)
if use_depth_to_space and kernel_size % upscale_factor != 0:
with self.assertRaises(ValueError):
_ = visual.Logits(**kwargs)
else:
component = visual.Logits(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/components/visual_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for units."""
from typing import Optional, Sequence
from absl.testing import absltest
from absl.testing import parameterized
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures.components import common
from alphastar.architectures.components import merge
from alphastar.architectures.components import test_utils
from alphastar.architectures.components import units
from alphastar.architectures.components import util
from alphastar.architectures.components.static_data import unit_encoder_data
from alphastar.commons import sample
from dm_env import specs
import jax.numpy as jnp
import numpy as np
_UNIT_TAGS = 'unit_tags'
class UnitsTest(test_utils.ComponentTest):
"""Basic tests for the units components."""
def test_encode_one_hot(self):
raw_unit = np.zeros(47, np.int32)
raw_unit[1] = 1 # alliance
self.assertEqual(units._encode_one_hot(jnp.asarray(raw_unit), 1), (1, 5))
def test_encode_capped_one_hot(self):
raw_unit = np.zeros(47, np.int32)
raw_unit[1] = 1 # alliance
self.assertEqual(
units._encode_capped_one_hot(jnp.asarray(raw_unit), 1), (1, 5))
raw_unit[1] = 10_000 # alliance
self.assertEqual(
units._encode_capped_one_hot(jnp.asarray(raw_unit), 1), (4, 5))
def test_encode_sqrt_one_hot(self):
raw_unit = np.zeros(47, np.int32)
raw_unit[2] = 142 # health
raw_unit[3] = 0 # shield
self.assertEqual(
units._encode_sqrt_one_hot(jnp.asarray(raw_unit), 2), (11, 39))
self.assertEqual(
units._encode_sqrt_one_hot(jnp.asarray(raw_unit), 3), (0, 32))
raw_unit[2] = 10_000 # health
self.assertEqual(
units._encode_sqrt_one_hot(jnp.asarray(raw_unit), 2), (38, 39))
def test_encode_divided_one_hot(self):
raw_unit = np.zeros(47, np.int32)
raw_unit[2] = 142 # health
self.assertEqual(
units._encode_divided_one_hot(jnp.asarray(raw_unit), 2, 10), (14, 151))
self.assertEqual(
units._encode_divided_one_hot(jnp.asarray(raw_unit), 2, 9), (15, 167))
self.assertEqual(
units._encode_divided_one_hot(jnp.asarray(raw_unit), 2, 2), (71, 751))
raw_unit[2] = 10_000 # health
self.assertEqual(
units._encode_divided_one_hot(jnp.asarray(raw_unit), 2, 10), (150, 151))
def test_encode_mined_resource_one_hot(self):
raw_unit = np.zeros(47, np.int32)
raw_unit[0] = 149 # unit_type (VespeneGeyser)
raw_unit[21] = 1042 # vespene_contents
self.assertEqual(
units._encode_mined_resource_one_hot(jnp.asarray(raw_unit), 21, 256),
(34, 51))
raw_unit[0] = 18 # unit_type (Barracks)
self.assertEqual(
units._encode_mined_resource_one_hot(jnp.asarray(raw_unit), 21, 256),
(0, 51))
raw_unit[0] = 149 # unit_type (VespeneGeyser)
raw_unit[21] = 10_000 # vespene_contents
self.assertEqual(
units._encode_mined_resource_one_hot(jnp.asarray(raw_unit), 21, 256),
(0, 51))
def test_encode_addon_lookup(self):
addon_lookup = unit_encoder_data.get_addon_lookup(256)
self.assertEqual(
units._encode_lookup(jnp.asarray(34), addon_lookup), (1, 7))
self.assertEqual(
units._encode_lookup(jnp.asarray(0), addon_lookup), (0, 7))
self.assertEqual(
units._encode_lookup(jnp.asarray(10_000), addon_lookup), (0, 7))
def test_encode_order_id_lookup(self):
action_spec = types.SpecDict({
'function': specs.BoundedArray((), jnp.int32, 0, 555)})
function_list = util.get_function_list(action_spec)
function_names = [f.name for f in function_list]
order_id_lookup = unit_encoder_data.get_order_id_lookup(function_names)
id_1 = [x.id for x in function_list if x.name == 'Patrol_Patrol_unit'][0]
id_2 = [x.id for x in function_list if x.name == 'Patrol_unit'][0]
id_3 = [x.id for x in function_list if x.name == 'Load_unit'][0]
lookup_1, num1 = units._encode_lookup(jnp.asarray(id_1), order_id_lookup)
lookup_2, _ = units._encode_lookup(jnp.asarray(id_2), order_id_lookup)
lookup_3, _ = units._encode_lookup(jnp.asarray(id_3), order_id_lookup)
self.assertEqual(lookup_1, lookup_2)
self.assertEqual(num1, max(order_id_lookup) + 1)
self.assertNotEqual(lookup_1, lookup_3)
id_4 = [x.id for x in function_list if x.name == 'no_op'][0]
id_5 = [x.id for x in function_list
if x.name == 'Research_ZergMeleeWeaponsLevel2_quick'][0]
build_queue_order_id_lookup = (
unit_encoder_data.get_build_queue_order_id_lookup(function_names))
lookup_1b, num_1b = units._encode_lookup(
jnp.asarray(id_1), build_queue_order_id_lookup)
lookup_3b, _ = units._encode_lookup(
jnp.asarray(id_3), build_queue_order_id_lookup)
lookup_4b, _ = units._encode_lookup(
jnp.asarray(id_4), build_queue_order_id_lookup)
lookup_5b, _ = units._encode_lookup(
jnp.asarray(id_5), build_queue_order_id_lookup)
self.assertEqual(lookup_1b, lookup_3b)
self.assertEqual(num_1b, max(build_queue_order_id_lookup) + 1)
self.assertEqual(lookup_1b, lookup_4b)
self.assertNotEqual(lookup_4b, lookup_5b)
def test_features_embedding(self):
raw_unit = np.zeros(47, np.int32)
raw_unit[6] = 42 # build_progress
raw_unit[7] = 6 # health_ratio
raw_unit[8] = 255 # shield_ratio
raw_unit[9] = 0 # energy_ratio
raw_unit[13] = 3 # y (not used)
raw_unit[36] = 4 # order_progress_1
raw_unit[37] = 78 # order_progress_1
raw_unit[38] = 4 # order_id_2 (not used)
rescales = {
6: 1. / 100,
7: 1. / 255,
8: 1. / 255,
9: 1. / 255,
36: 1. / 100,
37: 1. / 100}
embeddings = units._features_embedding(jnp.asarray(raw_unit), rescales)
np.testing.assert_allclose(embeddings, jnp.asarray([
0.42, 6./255, 1., 0., 0.04, 0.78]))
def test_binary_scale_embedding(self):
np.testing.assert_array_equal(
units._binary_scale_embedding(jnp.asarray(42), 256),
jnp.asarray([0, 1, 0, 1, 0, 1, 0, 0], jnp.float32))
np.testing.assert_array_equal(
units._binary_scale_embedding(jnp.asarray(119), 128),
jnp.asarray([1, 1, 1, 0, 1, 1, 1], jnp.float32))
def test_remap_and_one_hot_embedding(self):
action_spec = types.SpecDict({
'function': specs.BoundedArray((), jnp.int32, 0, 555)})
function_list = util.get_function_list(action_spec)
function_names = [f.name for f in function_list]
id_1 = [x.id for x in function_list if x.name == 'Halt_Building_quick'][0]
id_2 = [x.id for x in function_list if x.name == 'Halt_quick'][0]
order_id_lookup = unit_encoder_data.get_order_id_lookup(function_names)
one_hot_1 = units._remap_and_one_hot_embedding(
jnp.asarray(id_1), order_id_lookup)
one_hot_2 = units._remap_and_one_hot_embedding(
jnp.asarray(id_2), order_id_lookup)
np.testing.assert_array_equal(one_hot_1, one_hot_2)
@parameterized.product(
is_training=[True, False],
output_size=[1, 4])
def test_UnitsEncoder(self,
is_training: bool,
output_size: int):
input_spec, action_spec = test_utils.get_test_specs(is_training)
component = units.UnitsEncoder(
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
output_name='output_stream',
num_raw_unit_features=input_spec['observation', 'raw_units'].shape[1],
units_stream_size=output_size,
action_spec=action_spec,
num_unit_types=256,
num_buff_types=5)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
units_stream_size=[3],
transformer_num_layers=[0, 2],
transformer_num_heads=[1, 2],
transformer_key_size=[4],
transformer_value_size=[5],
resblocks_num_before=[0, 1],
resblocks_num_after=[0, 1],
resblocks_hidden_size=[None, 3],
use_layer_norm=[True, False])
def test_Transformer(self,
is_training: bool,
units_stream_size: int,
transformer_num_layers: int,
transformer_num_heads: int,
transformer_key_size: int,
transformer_value_size: int,
resblocks_num_before: int,
resblocks_num_after: int,
resblocks_hidden_size: Optional[int],
use_layer_norm: bool):
input_spec, _ = test_utils.get_test_specs(is_training)
component = units.Transformer(
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
units_stream_size=units_stream_size,
transformer_num_layers=transformer_num_layers,
transformer_num_heads=transformer_num_heads,
transformer_key_size=transformer_key_size,
transformer_value_size=transformer_value_size,
resblocks_num_before=resblocks_num_before,
resblocks_num_after=resblocks_num_after,
resblocks_hidden_size=resblocks_hidden_size,
use_layer_norm=use_layer_norm,
input_name='input_stream',
output_name='output_stream')
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_size=[1, 4],
layer_sizes=[[1], [3, 2]],
use_layer_norm=[True, False])
def test_MLP(self,
is_training: bool,
input_size: int,
layer_sizes: Sequence[int],
use_layer_norm: bool):
input_spec, _ = test_utils.get_test_specs(is_training)
component = units.MLP(
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
units_stream_size=input_size,
layer_sizes=layer_sizes,
use_layer_norm=use_layer_norm,
input_name='input_stream',
output_name='output_stream')
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
units_stream_size=[1, 3],
units_hidden_sizes=[[], [1], [2, 4]],
vector_stream_size=[1, 4],
use_layer_norm=[True, False])
def test_ToVector(self,
is_training: bool,
units_stream_size: int,
units_hidden_sizes: Sequence[int],
vector_stream_size: int,
use_layer_norm: bool = True):
input_spec, _ = test_utils.get_test_specs(is_training)
component = units.ToVector(
input_name='input_stream',
output_name='output_stream',
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
units_stream_size=units_stream_size,
units_hidden_sizes=units_hidden_sizes,
vector_stream_size=vector_stream_size,
use_layer_norm=use_layer_norm)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
units_stream_size=[2],
units_world_dim=[1, 4],
units_hidden_sizes=[[], [1], [2, 4]],
output_spatial_size=[1, 4],
output_features_size=[1, 3],
kernel_size=[1, 2, 3],
use_layer_norm=[True, False])
def test_ToVisualScatter(self,
is_training: bool,
units_stream_size: int,
units_world_dim: int,
units_hidden_sizes: Sequence[int],
output_spatial_size: int,
output_features_size: int,
kernel_size: int,
use_layer_norm: bool):
input_spec, _ = test_utils.get_test_specs(is_training)
kwargs = dict(
input_name='input_stream',
output_name='output_stream',
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
num_raw_unit_features=input_spec['observation', 'raw_units'].shape[1],
units_stream_size=units_stream_size,
units_world_dim=units_world_dim,
units_hidden_sizes=units_hidden_sizes,
output_spatial_size=output_spatial_size,
output_features_size=output_features_size,
kernel_size=kernel_size,
use_layer_norm=use_layer_norm)
if units_world_dim % output_spatial_size != 0:
with self.assertRaises(ValueError):
units.ToVisualScatter(**kwargs)
else:
component = units.ToVisualScatter(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
query_input_size=[4],
keys_input_size=[3],
unit_tags_masking=list(units.UnitTagsMasking),
num_layers_query=[0, 1],
num_layers_keys=[0, 1],
key_size=[3, 4, 5],
use_layer_norm=[True, False])
def test_PointerLogits(self,
is_training: bool,
unit_tags_masking: units.UnitTagsMasking,
query_input_size: int,
keys_input_size: int,
num_layers_query: int,
num_layers_keys: int,
key_size: int,
use_layer_norm: bool):
input_spec, _ = test_utils.get_test_specs(is_training)
kwargs = dict(
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
num_raw_unit_features=input_spec['observation', 'raw_units'].shape[1],
logits_output_name=('logits', 'target_unit_tag'),
mask_output_name=('masks', 'target_unit_tag'),
query_input_size=query_input_size,
keys_input_size=keys_input_size,
unit_tags_masking=unit_tags_masking,
query_input_name='vector_stream',
keys_input_name='units_stream',
num_layers_query=num_layers_query,
num_layers_keys=num_layers_keys,
key_size=key_size,
use_layer_norm=use_layer_norm)
expect_error = False
if num_layers_query == 0 and key_size != query_input_size:
expect_error = True
if num_layers_keys == 0 and key_size != keys_input_size:
expect_error = True
if expect_error:
with self.assertRaises(ValueError):
units.PointerLogits(**kwargs)
else:
component = units.PointerLogits(**kwargs)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
vector_input_size=[1, 3])
def test_FinalizeUnitTagsLogits(self,
is_training: bool,
vector_input_size: int):
input_spec, _ = test_utils.get_test_specs(is_training)
component = units.FinalizeUnitTagsLogits(
input_logits_name='pre_logits_stream',
input_mask_name='pre_mask_stream',
output_logits_name='logits_stream',
output_mask_name='mask_stream',
vector_input_name='vector_input',
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
vector_input_size=vector_input_size)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.parameters(True, False)
def test_UnitTagsHead(self, is_training: bool):
input_spec, action_spec = test_utils.get_test_specs(is_training)
inner_component = modular.SequentialComponent()
num_units = input_spec['observation', 'raw_units'].shape[0]
inner_component.append(units.PointerLogits(
max_num_observed_units=num_units,
num_raw_unit_features=input_spec['observation', 'raw_units'].shape[1],
logits_output_name=('logits', _UNIT_TAGS),
mask_output_name=('masks', _UNIT_TAGS),
query_input_size=num_units,
keys_input_size=6,
unit_tags_masking=units.UnitTagsMasking.SELECTABLE,
query_input_name='query_stream',
keys_input_name='keys_stream',
num_layers_query=1,
num_layers_keys=0,
key_size=6,
use_layer_norm=True))
inner_component.append(merge.SumMerge(
input_names=[('logits', _UNIT_TAGS),
'per_step_input_stream'],
output_name='query_stream',
stream_shape=(num_units,)))
inner_component.append(common.Sample(
argument_name=_UNIT_TAGS,
num_logits=input_spec['observation', 'raw_units'].shape[0],
sample_fn=sample.sample))
component = units.UnitTagsHead(
inner_component=inner_component,
constant_inputs=['keys_stream', ('observation', 'raw_units')],
carries=['query_stream'],
per_step_inputs=['per_step_input_stream'],
per_step_outputs=[('masks', _UNIT_TAGS),
('action', _UNIT_TAGS)],
max_num_selected_units=action_spec['unit_tags'].shape[0],
max_num_observed_units=num_units,
action_output=('action', _UNIT_TAGS))
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/components/units_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vector."""
from typing import Callable, Optional, Sequence, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from alphastar import types
from alphastar.architectures.components import test_utils
from alphastar.architectures.components import vector
import chex
import jax.numpy as jnp
class VectorTest(test_utils.ComponentTest):
"""Basic tests for the vector components."""
@parameterized.product(
is_training=[True, False],
num_features=[1, 3],
output_size=[1, 4],
fun=[jnp.sqrt, jnp.log1p])
def test_VectorEncoder(self,
is_training: bool,
num_features: int,
output_size: int,
fun: Callable[[chex.Array], chex.Array]):
component = vector.VectorEncoder(input_name='input_stream',
output_name='output_stream',
num_features=num_features,
output_size=output_size,
fun=fun)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
num_classes=[1, 3],
output_size=[1, 4],
mask_name=[None, 'mask_stream'],
fun=[None, lambda x: x * 0.1])
def test_Embedding(self,
is_training: bool,
num_classes: int,
output_size: int,
mask_name: Optional[types.StreamType] = None,
fun: Optional[float] = None):
component = vector.Embedding(input_name='input_stream',
output_name='output_stream',
num_classes=num_classes,
output_size=output_size,
mask_name=mask_name,
fun=fun)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_size=[1, 4],
num_classes=[1, 3])
def test_FixedLengthToMask(self,
is_training: bool,
input_size: int,
num_classes: int):
component = vector.FixedLengthToMask(input_name='input_stream',
output_name='output_stream',
input_size=input_size,
num_classes=num_classes)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_size=[1, 4],
output_size=[1, 3],
mask_name=[None, 'mask_stream'])
def test_BinaryVectorEmbedding(self,
is_training: bool,
input_size: int,
output_size: int,
mask_name: Optional[types.StreamType] = None):
component = vector.BinaryVectorEmbedding(input_name='input_stream',
output_name='output_stream',
input_size=input_size,
output_size=output_size,
mask_name=mask_name)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
encoding_size=[2, 4],
output_size=[1, 3])
def test_ClockFeatureEncoder(self,
is_training: bool,
encoding_size: int,
output_size: int):
component = vector.ClockFeatureEncoder(input_name='input_stream',
output_name='output_stream',
encoding_size=encoding_size,
output_size=output_size)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_size=[1, 4],
num_resblocks=[0, 1, 2],
use_layer_norm=[True, False])
def test_Resnet(self,
is_training: bool,
input_size: int,
num_resblocks: int,
use_layer_norm: bool):
component = vector.Resnet(input_name='input_stream',
output_name='output_stream',
input_size=input_size,
num_resblocks=num_resblocks,
use_layer_norm=use_layer_norm)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_size=[1, 4],
output_features_size=[1, 3],
features_size_and_upscale=[[3, [3], 1], [8, [5, 3], 2], [3, [3], 3]],
use_layer_norm=[True, False],
kernel_size=[1, 2, 3])
def test_ToVisual(self,
is_training: bool,
input_size: int,
output_features_size: int,
features_size_and_upscale: Tuple[int, Sequence[int], int],
use_layer_norm: bool,
kernel_size: int):
output_spatial_size, hidden_feature_sizes, upscale_factor = (
features_size_and_upscale)
component = vector.ToVisual(input_name='input_stream',
output_name='output_stream',
input_size=input_size,
output_spatial_size=output_spatial_size,
output_features_size=output_features_size,
hidden_feature_sizes=hidden_feature_sizes,
upscale_factor=upscale_factor,
use_layer_norm=use_layer_norm,
kernel_size=kernel_size)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
num_logits=[1, 4],
input_size=[1, 3],
num_linear_layers=[1, 2],
use_layer_norm=[True, False])
def test_Logits(self,
is_training: bool,
num_logits: int,
input_size: int,
num_linear_layers: int = 2,
use_layer_norm: bool = True):
component = vector.Logits(num_logits=num_logits,
input_size=input_size,
logits_output_name=('logits', 'function'),
mask_output_name=('masks', 'function'),
input_name='input_stream',
num_linear_layers=num_linear_layers,
use_layer_norm=use_layer_norm)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/components/vector_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List of function arguments which are forbidden outside the camera view."""
import numpy as np
from pysc2.lib import actions as sc2_actions
def get_on_camera_only_functions_unit() -> np.ndarray:
"""Get numpy arrays for forbidden function aruments (for target_unit_tag).
A function is allowed if this function can target a unit outside the camera
view.
Returns:
A numpy array or size [num_functions] with allowed actions.
"""
# The comments represent the proportion of this action being taken inside the
# camera view. A ratio smaller than 1 means that it is possible to take the
# action outside the camera view, but a ratio too very close to 1 can
# probably be attributed to extremely rare cases of lag.
forbidden_strings_unit = [
"Effect_Heal_unit", # 1.0
"Load_unit", # 1.0
"UnloadAllAt_unit", # 0.999983450056
"Effect_Repair_unit", # 0.999948423984
"Effect_ParasiticBomb_unit", # 0.999918923301
"Effect_NeuralParasite_unit", # 0.999915218313
"Effect_InterferenceMatrix_unit", # 0.999906153954
"Effect_Transfusion_unit", # 0.999903032766
"Effect_KD8Charge_unit", # 0.999860876478
"Effect_GravitonBeam_unit", # 0.999811122204
"Effect_Feedback_unit", # 0.999803439803
"Effect_AntiArmorMissile_unit", # 0.999800478851
"Rally_Workers_unit", # 0.99979222557
"Harvest_Gather_unit", # 0.999783643444
"Effect_GhostSnipe_unit", # 0.999712542302
"Effect_CalldownMULE_unit", # 0.999684635984
"Effect_YamatoGun_unit", # 0.999666009288
"Effect_Abduct_unit", # 0.99961916632
"Effect_Restore_unit", # 0.999610894942
"Effect_LockOn_unit", # 0.999435436919
"Effect_CausticSpray_unit", # 0.999386466788
"Build_Assimilator_unit", # 0.999380867711
"Effect_ChronoBoostEnergyCost_unit", # 0.999369350264
"Effect_SupplyDrop_unit", # 0.999364486992
"Attack_unit", # 0.999226272993
"Build_Extractor_unit", # 0.999015262677
]
num_functions = len(sc2_actions.RAW_FUNCTIONS)
func_forbidden_unit = np.zeros((num_functions,), dtype=np.bool_)
for func in sc2_actions.RAW_FUNCTIONS:
if func.name in forbidden_strings_unit:
func_forbidden_unit[func.id] = True
return func_forbidden_unit
def get_on_camera_only_functions_pt() -> np.ndarray:
"""Get numpy arrays for forbidden function aruments (for world targets).
A function is allowed if this function can target a point outside the camera
view.
Returns:
A numpy array or size [num_functions] with allowed actions.
"""
# See get_on_camera_only_functions_unit for an explanation about this:
forbidden_strings_pt = [
"Build_SpawningPool_pt", # 0.999961021547
"Build_RoboticsFacility_pt", # 0.999951444054
"Build_DarkShrine_pt", # 0.999946532642
"Build_ShieldBattery_pt", # 0.999942637826
"Build_CyberneticsCore_pt", # 0.999935187586
"Build_FleetBeacon_pt", # 0.999916742986
"Build_Forge_pt", # 0.999885483468
"Build_Bunker_pt", # 0.999880587034
"Build_TwilightCouncil_pt", # 0.999878848251
"TrainWarp_Sentry_pt", # 0.999874913631
"Build_RoboticsBay_pt", # 0.999865885824
"Build_EvolutionChamber_pt", # 0.999857662698
"Build_Gateway_pt", # 0.99983839885
"Build_RoachWarren_pt", # 0.999834649776
"Build_LurkerDen_pt", # 0.999834011121
"Build_Reactor_pt", # 0.999822511059
"Build_PhotonCannon_pt", # 0.999820207885
"Build_TemplarArchive_pt", # 0.999805560957
"Build_Factory_pt", # 0.999803283379
"Build_UltraliskCavern_pt", # 0.999794175157
"Build_Stargate_pt", # 0.999792180443
"Effect_KD8Charge_pt", # 0.999764604339
"Build_BanelingNest_pt", # 0.999760468917
"Effect_ForceField_pt", # 0.999744805733
"Effect_BlindingCloud_pt", # 0.999743754004
"Build_Barracks_pt", # 0.999720537569
"Build_GhostAcademy_pt", # 0.99971667375
"Build_InfestationPit_pt", # 0.999707345625
"Build_Starport_pt", # 0.999704161829
"TrainWarp_Adept_pt", # 0.999697424477
"Build_SpineCrawler_pt", # 0.999697112121
"Build_NydusNetwork_pt", # 0.999696251747
"TrainWarp_HighTemplar_pt", # 0.999682031856
"TrainWarp_DarkTemplar_pt", # 0.999670937893
"Build_HydraliskDen_pt", # 0.999667068958
"Effect_PsiStorm_pt", # 0.999665857415
"Build_Nexus_pt", # 0.999633286184
"Build_Hatchery_pt", # 0.999602838197
"Build_TechLab_pt", # 0.999594232302
"Build_EngineeringBay_pt", # 0.999573728563
"Morph_Root_pt", # 0.999563520376
"Build_NydusWorm_pt", # 0.99955992372
"Build_Armory_pt", # 0.99951750906
"Build_SporeCrawler_pt", # 0.999503242441
"Effect_EMP_pt", # 0.999490282118
"Build_Spire_pt", # 0.999481813652
"Effect_FungalGrowth_pt", # 0.999471675961
"Build_SupplyDepot_pt", # 0.999392261968
"Effect_CorrosiveBile_pt", # 0.999334492724
"Build_FusionCore_pt", # 0.999280989359
"TrainWarp_Zealot_pt", # 0.999219225426
"TrainWarp_Stalker_pt", # 0.999179110584
"Build_Pylon_pt", # 0.999056181889
"Effect_TimeWarp_pt", # 0.999025341131
"Build_CommandCenter_pt", # 0.998844091799
"Build_MissileTurret_pt", # 0.998724833923
"Land_pt", # 0.998663265556
"Effect_InfestedTerrans_pt", # 0.998277927523
"Build_SensorTower_pt", # 0.998016675332
"Build_Refinery_pt", # 0.997900839664
"Build_StasisTrap_pt", # 0.997851289226
"Effect_OracleRevelation_pt", # 0.997267759563
"Effect_AutoTurret_pt", # 0.997062686567
"Effect_PurificationNova_pt", # 0.995978149949
]
num_functions = len(sc2_actions.RAW_FUNCTIONS)
func_forbidden_pt = np.zeros((num_functions,), dtype=np.bool_)
for func in sc2_actions.RAW_FUNCTIONS:
if func.name in forbidden_strings_pt:
func_forbidden_pt[func.id] = 1
return func_forbidden_pt
| alphastar-main | alphastar/architectures/components/static_data/camera_masks.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/architectures/components/static_data/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lookups for the drastic torso preprocessors."""
import enum
import itertools
from typing import List, Mapping, Optional
import chex
import numpy as np
from pysc2.env.converter.cc.game_data.python import uint8_lookup
from pysc2.lib import actions as sc2_actions
from pysc2.lib import units as sc2_units
from pysc2.lib.features import FeatureUnit
from s2clientprotocol import raw_pb2 as sc_raw
INITIAL_MINERALS_CONTENTS = {
sc2_units.Neutral.RichMineralField: 1500,
sc2_units.Neutral.RichMineralField750: 750,
sc2_units.Neutral.MineralField: 1800,
sc2_units.Neutral.MineralField750: 750,
sc2_units.Neutral.LabMineralField: 1800,
sc2_units.Neutral.LabMineralField750: 750,
sc2_units.Neutral.PurifierRichMineralField: 1500,
sc2_units.Neutral.PurifierRichMineralField750: 750,
sc2_units.Neutral.BattleStationMineralField: 1500,
sc2_units.Neutral.BattleStationMineralField750: 750,
sc2_units.Neutral.PurifierMineralField: 1500,
sc2_units.Neutral.PurifierMineralField750: 750,
}
INITIAL_VESPENE_CONTENTS = {
sc2_units.Neutral.VespeneGeyser: 2250,
sc2_units.Neutral.RichVespeneGeyser: 2500,
sc2_units.Neutral.PurifierVespeneGeyser: 2250,
sc2_units.Neutral.ShakurasVespeneGeyser: 2250,
}
INITIAL_RESOURCE_CONTENTS = dict(itertools.chain(
INITIAL_MINERALS_CONTENTS.items(), INITIAL_VESPENE_CONTENTS.items()))
MAX_VALUES = {
FeatureUnit.alliance: max(sc_raw.Alliance.values()),
FeatureUnit.health: 1500,
FeatureUnit.shield: 1000,
FeatureUnit.energy: 200,
FeatureUnit.cargo_space_taken: 8,
FeatureUnit.display_type: max(sc_raw.DisplayType.values()),
FeatureUnit.cloak: max(sc_raw.CloakState.values()),
FeatureUnit.is_powered: 1,
FeatureUnit.mineral_contents: max(INITIAL_MINERALS_CONTENTS.values()),
FeatureUnit.vespene_contents: max(INITIAL_VESPENE_CONTENTS.values()),
FeatureUnit.cargo_space_max: 8,
FeatureUnit.assigned_harvesters: 24,
FeatureUnit.ideal_harvesters: 16,
FeatureUnit.weapon_cooldown: 32,
FeatureUnit.order_length: 8,
FeatureUnit.hallucination: 1,
FeatureUnit.active: 1,
FeatureUnit.is_on_screen: 1,
FeatureUnit.is_blip: 1,
FeatureUnit.order_progress_0: 100,
FeatureUnit.order_progress_1: 100,
FeatureUnit.is_in_cargo: 1,
FeatureUnit.buff_duration_remain: 250,
FeatureUnit.attack_upgrade_level: 4,
FeatureUnit.armor_upgrade_level: 4,
FeatureUnit.shield_upgrade_level: 4,
# Previous arguments:
-2: 1,
-1: 1,
}
# This table maps the redundant and unused order id to their remapped version.
# Everything not in this map is not remapped, so this is future-proof.
REDUNDANT_GENERIC_ORDER_ID = {
"Attack_Attack_pt":
"Attack_pt",
"Attack_AttackBuilding_pt":
"Attack_pt",
"Attack_Attack_unit":
"Attack_unit",
"Attack_AttackBuilding_unit":
"Attack_unit",
"Attack_Battlecruiser_pt":
"Attack_pt",
"Attack_Battlecruiser_unit":
"Attack_unit",
"Attack_Redirect_pt":
"Attack_pt",
"Attack_Redirect_unit":
"Attack_unit",
"Behavior_BuildingAttackOff_quick":
"no_op",
"Behavior_BuildingAttackOn_quick":
"no_op",
"Behavior_CloakOff_Banshee_quick":
"Behavior_CloakOff_quick",
"Behavior_CloakOff_Ghost_quick":
"Behavior_CloakOff_quick",
"Behavior_CloakOn_Banshee_quick":
"Behavior_CloakOn_quick",
"Behavior_CloakOn_Ghost_quick":
"Behavior_CloakOn_quick",
"Behavior_HoldFireOff_Ghost_quick":
"Behavior_HoldFireOff_quick",
"Behavior_HoldFireOff_Lurker_quick":
"Behavior_HoldFireOff_quick",
"Behavior_HoldFireOn_Ghost_quick":
"Behavior_HoldFireOn_quick",
"Behavior_HoldFireOn_Lurker_quick":
"Behavior_HoldFireOn_quick",
"Build_CreepTumor_Queen_pt":
"Build_CreepTumor_pt",
"Build_CreepTumor_Tumor_pt":
"Build_CreepTumor_pt",
"Build_Reactor_Barracks_pt":
"Build_Reactor_pt",
"Build_Reactor_Barracks_quick":
"Build_Reactor_quick",
"Build_Reactor_Factory_pt":
"Build_Reactor_pt",
"Build_Reactor_Factory_quick":
"Build_Reactor_quick",
"Build_Reactor_Starport_pt":
"Build_Reactor_pt",
"Build_Reactor_Starport_quick":
"Build_Reactor_quick",
"Build_TechLab_Barracks_pt":
"Build_TechLab_pt",
"Build_TechLab_Barracks_quick":
"Build_TechLab_quick",
"Build_TechLab_Factory_pt":
"Build_TechLab_pt",
"Build_TechLab_Factory_quick":
"Build_TechLab_quick",
"Build_TechLab_Starport_pt":
"Build_TechLab_pt",
"Build_TechLab_Starport_quick":
"Build_TechLab_quick",
"BurrowDown_Baneling_quick":
"BurrowDown_quick",
"BurrowDown_Drone_quick":
"BurrowDown_quick",
"BurrowDown_Hydralisk_quick":
"BurrowDown_quick",
"BurrowDown_Infestor_quick":
"BurrowDown_quick",
"BurrowDown_InfestorTerran_quick":
"BurrowDown_quick",
"BurrowDown_Lurker_quick":
"BurrowDown_quick",
"BurrowDown_Queen_quick":
"BurrowDown_quick",
"BurrowDown_Ravager_quick":
"BurrowDown_quick",
"BurrowDown_Roach_quick":
"BurrowDown_quick",
"BurrowDown_SwarmHost_quick":
"BurrowDown_quick",
"BurrowDown_Ultralisk_quick":
"BurrowDown_quick",
"BurrowDown_WidowMine_quick":
"BurrowDown_quick",
"BurrowDown_Zergling_quick":
"BurrowDown_quick",
"BurrowUp_Baneling_autocast":
"BurrowUp_autocast",
"BurrowUp_Baneling_quick":
"BurrowUp_quick",
"BurrowUp_Drone_quick":
"BurrowUp_quick",
"BurrowUp_Hydralisk_autocast":
"BurrowUp_autocast",
"BurrowUp_Hydralisk_quick":
"BurrowUp_quick",
"BurrowUp_Infestor_quick":
"BurrowUp_quick",
"BurrowUp_InfestorTerran_autocast":
"BurrowUp_autocast",
"BurrowUp_InfestorTerran_quick":
"BurrowUp_quick",
"BurrowUp_Lurker_quick":
"BurrowUp_quick",
"BurrowUp_Queen_autocast":
"BurrowUp_autocast",
"BurrowUp_Queen_quick":
"BurrowUp_quick",
"BurrowUp_Ravager_autocast":
"BurrowUp_autocast",
"BurrowUp_Ravager_quick":
"BurrowUp_quick",
"BurrowUp_Roach_autocast":
"BurrowUp_autocast",
"BurrowUp_Roach_quick":
"BurrowUp_quick",
"BurrowUp_SwarmHost_quick":
"BurrowUp_quick",
"BurrowUp_Ultralisk_autocast":
"BurrowUp_autocast",
"BurrowUp_Ultralisk_quick":
"BurrowUp_quick",
"BurrowUp_WidowMine_quick":
"BurrowUp_quick",
"BurrowUp_Zergling_autocast":
"BurrowUp_autocast",
"BurrowUp_Zergling_quick":
"BurrowUp_quick",
"Cancel_AdeptPhaseShift_quick":
"Cancel_quick",
"Cancel_AdeptShadePhaseShift_quick":
"Cancel_quick",
"Cancel_BarracksADDON_quick":
"Cancel_quick",
"Cancel_BuildInProgress_quick":
"Cancel_quick",
"Cancel_CreepTumor_quick":
"Cancel_quick",
"Cancel_FactoryADDON_quick":
"Cancel_quick",
"Cancel_GravitonBeam_quick":
"Cancel_quick",
"Cancel_HangarQueue5_quick":
"Cancel_quick",
"Cancel_Last_quick":
"Cancel_quick",
"Cancel_LockOn_quick":
"Cancel_quick",
"Cancel_MorphBroodlord_quick":
"Cancel_quick",
"Cancel_MorphGreaterSpire_quick":
"Cancel_quick",
"Cancel_MorphHive_quick":
"Cancel_quick",
"Cancel_MorphLair_quick":
"Cancel_quick",
"Cancel_MorphLurkerDen_quick":
"Cancel_quick",
"Cancel_MorphLurker_quick":
"Cancel_quick",
"Cancel_MorphMothership_quick":
"Cancel_quick",
"Cancel_MorphOrbital_quick":
"Cancel_quick",
"Cancel_MorphOverlordTransport_quick":
"Cancel_quick",
"Cancel_MorphOverseer_quick":
"Cancel_quick",
"Cancel_MorphPlanetaryFortress_quick":
"Cancel_quick",
"Cancel_MorphRavager_quick":
"Cancel_quick",
"Cancel_MorphThorExplosiveMode_quick":
"Cancel_quick",
"Cancel_NeuralParasite_quick":
"Cancel_quick",
"Cancel_Nuke_quick":
"Cancel_quick",
"Cancel_Queue1_quick":
"Cancel_quick",
"Cancel_Queue5_quick":
"Cancel_quick",
"Cancel_QueueADDON_quick":
"Cancel_quick",
"Cancel_QueueCancelToSelection_quick":
"Cancel_quick",
"Cancel_QueuePassiveCancelToSelection_quick":
"Cancel_quick",
"Cancel_QueuePassive_quick":
"Cancel_quick",
"Cancel_SpineCrawlerRoot_quick":
"Cancel_quick",
"Cancel_SporeCrawlerRoot_quick":
"Cancel_quick",
"Cancel_StarportADDON_quick":
"Cancel_quick",
"Cancel_StasisTrap_quick":
"Cancel_quick",
"Cancel_VoidRayPrismaticAlignment_quick":
"Cancel_quick",
"Effect_Blink_Stalker_pt":
"Effect_Blink_pt",
"Effect_ChronoBoost_unit":
"Effect_ChronoBoostEnergyCost_unit",
"Effect_MassRecall_Mothership_pt":
"Effect_MassRecall_pt",
"Effect_MassRecall_Nexus_pt":
"Effect_MassRecall_pt",
"Effect_MassRecall_StrategicRecall_pt":
"Effect_MassRecall_pt",
"Effect_Repair_Mule_autocast":
"Effect_Repair_autocast",
"Effect_Repair_Mule_unit":
"Effect_Repair_unit",
"Effect_Repair_RepairDrone_autocast":
"Effect_Repair_autocast",
"Effect_Repair_RepairDrone_unit":
"Effect_Repair_unit",
"Effect_Repair_SCV_autocast":
"Effect_Repair_autocast",
"Effect_Repair_SCV_unit":
"Effect_Repair_unit",
"Effect_ShadowStride_pt":
"Effect_Blink_pt",
"Effect_Spray_pt":
"no_op",
"Effect_Spray_Protoss_pt":
"no_op",
"Effect_Spray_Terran_pt":
"no_op",
"Effect_Spray_Zerg_pt":
"no_op",
"Effect_Stim_Marauder_quick":
"Effect_Stim_quick",
"Effect_Stim_Marauder_Redirect_quick":
"Effect_Stim_quick",
"Effect_Stim_Marine_quick":
"Effect_Stim_quick",
"Effect_Stim_Marine_Redirect_quick":
"Effect_Stim_quick",
"Effect_WidowMineAttack_pt":
"Attack_pt",
"Effect_WidowMineAttack_unit":
"Attack_unit",
"Halt_Building_quick":
"Halt_quick",
"Halt_TerranBuild_quick":
"Halt_quick",
"Harvest_Gather_Drone_pt":
"Harvest_Gather_unit",
"Harvest_Gather_Mule_pt":
"Harvest_Gather_unit",
"Harvest_Gather_Probe_pt":
"Harvest_Gather_unit",
"Harvest_Gather_SCV_pt":
"Harvest_Gather_unit",
"Harvest_Gather_Drone_unit":
"Harvest_Gather_unit",
"Harvest_Gather_Mule_unit":
"Harvest_Gather_unit",
"Harvest_Gather_Probe_unit":
"Harvest_Gather_unit",
"Harvest_Gather_SCV_unit":
"Harvest_Gather_unit",
"Harvest_Return_Drone_quick":
"Harvest_Return_quick",
"Harvest_Return_Mule_quick":
"Harvest_Return_quick",
"Harvest_Return_Probe_quick":
"Harvest_Return_quick",
"Harvest_Return_SCV_quick":
"Harvest_Return_quick",
"HoldPosition_Battlecruiser_quick":
"HoldPosition_quick",
"HoldPosition_Hold_quick":
"HoldPosition_quick",
"Land_Barracks_pt":
"Land_pt",
"Land_CommandCenter_pt":
"Land_pt",
"Land_Factory_pt":
"Land_pt",
"Land_OrbitalCommand_pt":
"Land_pt",
"Land_Starport_pt":
"Land_pt",
"Lift_Barracks_quick":
"Lift_quick",
"Lift_CommandCenter_quick":
"Lift_quick",
"Lift_Factory_quick":
"Lift_quick",
"Lift_OrbitalCommand_quick":
"Lift_quick",
"Lift_Starport_quick":
"Lift_quick",
"LoadAll_CommandCenter_quick":
"LoadAll_quick",
"Load_Bunker_unit":
"Load_unit",
"Load_Medivac_unit":
"Load_unit",
"Load_NydusNetwork_unit":
"Load_unit",
"Load_NydusWorm_unit":
"Load_unit",
"Load_Overlord_unit":
"Load_unit",
"Load_WarpPrism_unit":
"Load_unit",
"Morph_LurkerDen_quick":
"Build_LurkerDen_pt",
"Morph_Mothership_quick":
"no_op",
"Morph_SpineCrawlerRoot_pt":
"Morph_Root_pt",
"Morph_SpineCrawlerUproot_quick":
"Morph_Uproot_quick",
"Morph_SporeCrawlerRoot_pt":
"Morph_Root_pt",
"Morph_SporeCrawlerUproot_quick":
"Morph_Uproot_quick",
"Move_Battlecruiser_pt":
"Move_pt",
"Move_Battlecruiser_unit":
"Move_unit",
"Move_Move_pt":
"Move_pt",
"Move_Move_unit":
"Move_unit",
"Patrol_Battlecruiser_pt":
"Patrol_pt",
"Patrol_Battlecruiser_unit":
"Patrol_unit",
"Patrol_Patrol_pt":
"Patrol_pt",
"Patrol_Patrol_unit":
"Patrol_unit",
"Rally_CommandCenter_pt":
"Rally_Building_pt",
"Rally_CommandCenter_unit":
"Rally_Building_unit",
"Rally_Hatchery_Units_pt":
"Rally_Building_pt",
"Rally_Hatchery_Units_unit":
"Rally_Building_unit",
"Rally_Hatchery_Workers_pt":
"Rally_Building_pt",
"Rally_Hatchery_Workers_unit":
"Rally_Building_unit",
"Rally_Morphing_Unit_pt":
"Rally_Building_pt",
"Rally_Morphing_Unit_unit":
"Rally_Building_unit",
"Rally_Nexus_pt":
"Rally_Building_pt",
"Rally_Nexus_unit":
"Rally_Building_unit",
"Rally_Units_pt":
"Rally_Building_pt",
"Rally_Units_unit":
"Rally_Building_unit",
"Rally_Workers_pt":
"Rally_Building_pt",
"Rally_Workers_unit":
"Rally_Building_unit",
"Research_NeosteelFrame_quick":
"Research_TerranStructureArmorUpgrade_quick",
"Research_ProtossAirArmorLevel1_quick":
"Research_ProtossAirArmor_quick",
"Research_ProtossAirArmorLevel2_quick":
"Research_ProtossAirArmor_quick",
"Research_ProtossAirArmorLevel3_quick":
"Research_ProtossAirArmor_quick",
"Research_ProtossAirWeaponsLevel1_quick":
"Research_ProtossAirWeapons_quick",
"Research_ProtossAirWeaponsLevel2_quick":
"Research_ProtossAirWeapons_quick",
"Research_ProtossAirWeaponsLevel3_quick":
"Research_ProtossAirWeapons_quick",
"Research_ProtossGroundArmorLevel1_quick":
"Research_ProtossGroundArmor_quick",
"Research_ProtossGroundArmorLevel2_quick":
"Research_ProtossGroundArmor_quick",
"Research_ProtossGroundArmorLevel3_quick":
"Research_ProtossGroundArmor_quick",
"Research_ProtossGroundWeaponsLevel1_quick":
"Research_ProtossGroundWeapons_quick",
"Research_ProtossGroundWeaponsLevel2_quick":
"Research_ProtossGroundWeapons_quick",
"Research_ProtossGroundWeaponsLevel3_quick":
"Research_ProtossGroundWeapons_quick",
"Research_ProtossShieldsLevel1_quick":
"Research_ProtossShields_quick",
"Research_ProtossShieldsLevel2_quick":
"Research_ProtossShields_quick",
"Research_ProtossShieldsLevel3_quick":
"Research_ProtossShields_quick",
"Research_TerranInfantryArmorLevel1_quick":
"Research_TerranInfantryArmor_quick",
"Research_TerranInfantryArmorLevel2_quick":
"Research_TerranInfantryArmor_quick",
"Research_TerranInfantryArmorLevel3_quick":
"Research_TerranInfantryArmor_quick",
"Research_TerranInfantryWeaponsLevel1_quick":
"Research_TerranInfantryWeapons_quick",
"Research_TerranInfantryWeaponsLevel2_quick":
"Research_TerranInfantryWeapons_quick",
"Research_TerranInfantryWeaponsLevel3_quick":
"Research_TerranInfantryWeapons_quick",
"Research_TerranShipWeaponsLevel1_quick":
"Research_TerranShipWeapons_quick",
"Research_TerranShipWeaponsLevel2_quick":
"Research_TerranShipWeapons_quick",
"Research_TerranShipWeaponsLevel3_quick":
"Research_TerranShipWeapons_quick",
"Research_TerranVehicleAndShipPlatingLevel1_quick":
"Research_TerranVehicleAndShipPlating_quick",
"Research_TerranVehicleAndShipPlatingLevel2_quick":
"Research_TerranVehicleAndShipPlating_quick",
"Research_TerranVehicleAndShipPlatingLevel3_quick":
"Research_TerranVehicleAndShipPlating_quick",
"Research_TerranVehicleWeaponsLevel1_quick":
"Research_TerranVehicleWeapons_quick",
"Research_TerranVehicleWeaponsLevel2_quick":
"Research_TerranVehicleWeapons_quick",
"Research_TerranVehicleWeaponsLevel3_quick":
"Research_TerranVehicleWeapons_quick",
"Research_ZergFlyerArmorLevel1_quick":
"Research_ZergFlyerArmor_quick",
"Research_ZergFlyerArmorLevel2_quick":
"Research_ZergFlyerArmor_quick",
"Research_ZergFlyerArmorLevel3_quick":
"Research_ZergFlyerArmor_quick",
"Research_ZergFlyerAttackLevel1_quick":
"Research_ZergFlyerAttack_quick",
"Research_ZergFlyerAttackLevel2_quick":
"Research_ZergFlyerAttack_quick",
"Research_ZergFlyerAttackLevel3_quick":
"Research_ZergFlyerAttack_quick",
"Research_ZergGroundArmorLevel1_quick":
"Research_ZergGroundArmor_quick",
"Research_ZergGroundArmorLevel2_quick":
"Research_ZergGroundArmor_quick",
"Research_ZergGroundArmorLevel3_quick":
"Research_ZergGroundArmor_quick",
"Research_ZergMeleeWeaponsLevel1_quick":
"Research_ZergMeleeWeapons_quick",
"Research_ZergMeleeWeaponsLevel2_quick":
"Research_ZergMeleeWeapons_quick",
"Research_ZergMeleeWeaponsLevel3_quick":
"Research_ZergMeleeWeapons_quick",
"Research_ZergMissileWeaponsLevel1_quick":
"Research_ZergMissileWeapons_quick",
"Research_ZergMissileWeaponsLevel2_quick":
"Research_ZergMissileWeapons_quick",
"Research_ZergMissileWeaponsLevel3_quick":
"Research_ZergMissileWeapons_quick",
"Stop_Battlecruiser_quick":
"Stop_quick",
"Stop_Building_quick":
"Stop_quick",
"Stop_Redirect_quick":
"Stop_quick",
"Stop_Stop_quick":
"Stop_quick",
"Train_MothershipCore_quick":
"no_op",
"UnloadAllAt_Medivac_pt":
"UnloadAllAt_pt",
"UnloadAllAt_Medivac_unit":
"UnloadAllAt_unit",
"UnloadAllAt_Overlord_pt":
"UnloadAllAt_pt",
"UnloadAllAt_Overlord_unit":
"UnloadAllAt_unit",
"UnloadAllAt_WarpPrism_pt":
"UnloadAllAt_pt",
"UnloadAllAt_WarpPrism_unit":
"UnloadAllAt_unit",
"UnloadAll_Bunker_quick":
"UnloadAll_quick",
"UnloadAll_CommandCenter_quick":
"UnloadAll_quick",
"UnloadAll_NydusNetwork_quick":
"UnloadAll_quick",
"UnloadAll_NydusWorm_quick":
"UnloadAll_quick",
}
ADDON_UNIT_TYPES = [
sc2_units.Terran.BarracksTechLab,
sc2_units.Terran.BarracksReactor,
sc2_units.Terran.FactoryTechLab,
sc2_units.Terran.FactoryReactor,
sc2_units.Terran.StarportTechLab,
sc2_units.Terran.StarportReactor,
]
class UA(enum.IntEnum):
"""Unit attributes."""
LIGHT = 0
ARMORED = 1
BIOLOGICAL = 2
MECHANICAL = 3
PSIONIC = 4
MASSIVE = 5
STRUCTURE = 6
DETECTOR = 7
SUMMONED = 8
FLYING = 9
ADDON = 10
BURROWED = 11
UNITS_ATTRIBUTES = {
# Protoss
sc2_units.Protoss.Adept: [UA.LIGHT, UA.BIOLOGICAL],
sc2_units.Protoss.AdeptPhaseShift: [UA.LIGHT, UA.BIOLOGICAL, UA.SUMMONED],
sc2_units.Protoss.Archon: [UA.PSIONIC, UA.MASSIVE],
sc2_units.Protoss.Assimilator: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.Carrier: [
UA.ARMORED, UA.MASSIVE, UA.MECHANICAL, UA.FLYING
],
sc2_units.Protoss.Colossus: [UA.ARMORED, UA.MASSIVE, UA.MECHANICAL],
sc2_units.Protoss.CyberneticsCore: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.DarkShrine: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.DarkTemplar: [UA.BIOLOGICAL, UA.LIGHT, UA.PSIONIC],
sc2_units.Protoss.Disruptor: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Protoss.DisruptorPhased: [UA.SUMMONED],
sc2_units.Protoss.FleetBeacon: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.ForceField: [UA.SUMMONED],
sc2_units.Protoss.Forge: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.Gateway: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.HighTemplar: [UA.BIOLOGICAL, UA.LIGHT, UA.PSIONIC],
sc2_units.Protoss.Immortal: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Protoss.Interceptor: [
UA.LIGHT, UA.MECHANICAL, UA.SUMMONED, UA.FLYING
],
sc2_units.Protoss.Mothership: [
UA.ARMORED, UA.MASSIVE, UA.PSIONIC, UA.MECHANICAL, UA.FLYING
],
# sc2_units.Protoss.MothershipCore: [UA.MECHANICAL, UA.ARMORED, UA.PSIONIC],
sc2_units.Protoss.Nexus: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.Observer: [
UA.LIGHT, UA.MECHANICAL, UA.DETECTOR, UA.FLYING
],
sc2_units.Protoss.ObserverSurveillanceMode: [
UA.LIGHT, UA.MECHANICAL, UA.DETECTOR, UA.FLYING
],
sc2_units.Protoss.Oracle: [
UA.MECHANICAL, UA.ARMORED, UA.PSIONIC, UA.FLYING
],
sc2_units.Protoss.Phoenix: [UA.LIGHT, UA.MECHANICAL, UA.FLYING],
sc2_units.Protoss.PhotonCannon: [UA.ARMORED, UA.STRUCTURE, UA.DETECTOR],
sc2_units.Protoss.Probe: [UA.LIGHT, UA.MECHANICAL],
sc2_units.Protoss.Pylon: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.PylonOvercharged: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.RoboticsBay: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.RoboticsFacility: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.Sentry: [UA.LIGHT, UA.MECHANICAL, UA.PSIONIC],
sc2_units.Protoss.ShieldBattery: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.Stalker: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Protoss.Stargate: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.StasisTrap: [UA.LIGHT, UA.STRUCTURE, UA.SUMMONED],
sc2_units.Protoss.Tempest: [
UA.ARMORED, UA.MECHANICAL, UA.MASSIVE, UA.FLYING
],
sc2_units.Protoss.TemplarArchive: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.TwilightCouncil: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.VoidRay: [UA.ARMORED, UA.MECHANICAL, UA.FLYING],
sc2_units.Protoss.WarpGate: [UA.ARMORED, UA.STRUCTURE],
sc2_units.Protoss.WarpPrism: [
UA.ARMORED, UA.MECHANICAL, UA.PSIONIC, UA.FLYING
],
sc2_units.Protoss.WarpPrismPhasing: [
UA.ARMORED, UA.MECHANICAL, UA.PSIONIC, UA.FLYING
],
sc2_units.Protoss.Zealot: [UA.LIGHT, UA.BIOLOGICAL],
# Terran
sc2_units.Terran.Armory: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.AutoTurret: [
UA.MECHANICAL, UA.STRUCTURE, UA.ARMORED, UA.SUMMONED
],
sc2_units.Terran.Banshee: [UA.LIGHT, UA.MECHANICAL, UA.FLYING],
sc2_units.Terran.Barracks: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.BarracksFlying: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.FLYING
],
sc2_units.Terran.BarracksReactor: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.BarracksTechLab: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.Battlecruiser: [
UA.ARMORED, UA.MECHANICAL, UA.MASSIVE, UA.FLYING
],
sc2_units.Terran.Bunker: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.CommandCenter: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.CommandCenterFlying: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.FLYING
],
sc2_units.Terran.Cyclone: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Terran.EngineeringBay: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.Factory: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.FactoryFlying: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.FLYING
],
sc2_units.Terran.FactoryReactor: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.FactoryTechLab: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.FusionCore: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.Ghost: [UA.BIOLOGICAL, UA.PSIONIC],
sc2_units.Terran.GhostAcademy: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.GhostAlternate: [UA.BIOLOGICAL, UA.PSIONIC],
sc2_units.Terran.GhostNova: [UA.BIOLOGICAL, UA.PSIONIC],
sc2_units.Terran.Hellion: [UA.LIGHT, UA.MECHANICAL],
sc2_units.Terran.Hellbat: [UA.BIOLOGICAL, UA.LIGHT, UA.MECHANICAL],
sc2_units.Terran.KD8Charge: [UA.SUMMONED],
sc2_units.Terran.Liberator: [UA.ARMORED, UA.MECHANICAL, UA.FLYING],
sc2_units.Terran.LiberatorAG: [UA.ARMORED, UA.MECHANICAL, UA.FLYING],
sc2_units.Terran.MULE: [UA.LIGHT, UA.MECHANICAL, UA.SUMMONED],
sc2_units.Terran.Marauder: [UA.ARMORED, UA.BIOLOGICAL],
sc2_units.Terran.Marine: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Terran.Medivac: [UA.ARMORED, UA.MECHANICAL, UA.FLYING],
sc2_units.Terran.MissileTurret: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.DETECTOR
],
sc2_units.Terran.Nuke: [UA.SUMMONED],
sc2_units.Terran.OrbitalCommand: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.OrbitalCommandFlying: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.FLYING
],
sc2_units.Terran.PlanetaryFortress: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL
],
sc2_units.Terran.PointDefenseDrone: [
UA.LIGHT, UA.MECHANICAL, UA.STRUCTURE, UA.SUMMONED
],
sc2_units.Terran.Raven: [UA.LIGHT, UA.MECHANICAL, UA.DETECTOR, UA.FLYING],
sc2_units.Terran.Reactor: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.Reaper: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Terran.Refinery: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.RepairDrone: [UA.SUMMONED],
sc2_units.Terran.SCV: [UA.BIOLOGICAL, UA.LIGHT, UA.MECHANICAL],
sc2_units.Terran.SensorTower: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.SiegeTank: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Terran.SiegeTankSieged: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Terran.Starport: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.StarportFlying: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.FLYING
],
sc2_units.Terran.StarportReactor: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.StarportTechLab: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.SupplyDepot: [UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL],
sc2_units.Terran.SupplyDepotLowered: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL
],
sc2_units.Terran.TechLab: [
UA.ARMORED, UA.STRUCTURE, UA.MECHANICAL, UA.ADDON
],
sc2_units.Terran.Thor: [UA.ARMORED, UA.MECHANICAL, UA.MASSIVE],
sc2_units.Terran.ThorHighImpactMode: [
UA.ARMORED, UA.MECHANICAL, UA.MASSIVE
],
sc2_units.Terran.VikingAssault: [UA.ARMORED, UA.MECHANICAL],
sc2_units.Terran.VikingFighter: [UA.ARMORED, UA.MECHANICAL, UA.FLYING],
sc2_units.Terran.WidowMine: [UA.MECHANICAL, UA.LIGHT],
sc2_units.Terran.WidowMineBurrowed: [UA.MECHANICAL, UA.LIGHT, UA.BURROWED],
# Zerg
sc2_units.Zerg.Baneling: [UA.BIOLOGICAL],
sc2_units.Zerg.BanelingBurrowed: [UA.BIOLOGICAL, UA.BURROWED],
sc2_units.Zerg.BanelingCocoon: [UA.BIOLOGICAL],
sc2_units.Zerg.BanelingNest: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.BroodLord: [
UA.ARMORED, UA.BIOLOGICAL, UA.MASSIVE, UA.FLYING
],
sc2_units.Zerg.BroodLordCocoon: [UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.Broodling: [UA.BIOLOGICAL, UA.LIGHT, UA.SUMMONED],
sc2_units.Zerg.BroodlingEscort: [
UA.BIOLOGICAL, UA.LIGHT, UA.FLYING, UA.SUMMONED
],
sc2_units.Zerg.Changeling: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.ChangelingMarine: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.ChangelingMarineShield: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.ChangelingZealot: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.ChangelingZergling: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.ChangelingZerglingWings: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.Cocoon: [UA.BIOLOGICAL],
sc2_units.Zerg.Corruptor: [UA.ARMORED, UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.CreepTumor: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.CreepTumorBurrowed: [
UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL, UA.BURROWED
],
sc2_units.Zerg.CreepTumorQueen: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Drone: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.DroneBurrowed: [UA.BIOLOGICAL, UA.LIGHT, UA.BURROWED],
sc2_units.Zerg.EvolutionChamber: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Extractor: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.GreaterSpire: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Hatchery: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Hive: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Hydralisk: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.HydraliskBurrowed: [UA.BIOLOGICAL, UA.LIGHT, UA.BURROWED],
sc2_units.Zerg.HydraliskDen: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.InfestationPit: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.InfestedTerran: [UA.BIOLOGICAL, UA.LIGHT, UA.SUMMONED],
sc2_units.Zerg.InfestedTerranBurrowed: [
UA.BIOLOGICAL, UA.LIGHT, UA.SUMMONED, UA.BURROWED
],
sc2_units.Zerg.InfestedTerranCocoon: [UA.BIOLOGICAL, UA.LIGHT, UA.SUMMONED],
sc2_units.Zerg.Infestor: [UA.ARMORED, UA.BIOLOGICAL, UA.PSIONIC],
sc2_units.Zerg.InfestorBurrowed: [
UA.ARMORED, UA.BIOLOGICAL, UA.PSIONIC, UA.BURROWED
],
sc2_units.Zerg.Lair: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Larva: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.Locust: [UA.LIGHT, UA.BIOLOGICAL],
sc2_units.Zerg.LocustFlying: [UA.LIGHT, UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.Lurker: [UA.BIOLOGICAL, UA.ARMORED],
sc2_units.Zerg.LurkerBurrowed: [UA.BIOLOGICAL, UA.ARMORED, UA.BURROWED],
sc2_units.Zerg.LurkerDen: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.LurkerCocoon: [UA.BIOLOGICAL],
sc2_units.Zerg.Mutalisk: [UA.BIOLOGICAL, UA.LIGHT, UA.FLYING],
sc2_units.Zerg.NydusCanal: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.NydusNetwork: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Overlord: [UA.ARMORED, UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.OverlordTransport: [UA.ARMORED, UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.OverlordTransportCocoon: [UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.Overseer: [
UA.ARMORED, UA.BIOLOGICAL, UA.FLYING, UA.DETECTOR
],
sc2_units.Zerg.OverseerCocoon: [UA.BIOLOGICAL, UA.FLYING],
sc2_units.Zerg.OverseerOversightMode: [
UA.ARMORED, UA.BIOLOGICAL, UA.FLYING, UA.DETECTOR
],
sc2_units.Zerg.ParasiticBombDummy: [UA.SUMMONED],
sc2_units.Zerg.Queen: [UA.BIOLOGICAL, UA.PSIONIC],
sc2_units.Zerg.QueenBurrowed: [UA.BIOLOGICAL, UA.PSIONIC, UA.BURROWED],
sc2_units.Zerg.Ravager: [UA.BIOLOGICAL],
sc2_units.Zerg.RavagerBurrowed: [UA.BIOLOGICAL, UA.BURROWED],
sc2_units.Zerg.RavagerCocoon: [UA.BIOLOGICAL],
sc2_units.Zerg.Roach: [UA.ARMORED, UA.BIOLOGICAL],
sc2_units.Zerg.RoachBurrowed: [UA.ARMORED, UA.BIOLOGICAL, UA.BURROWED],
sc2_units.Zerg.RoachWarren: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.SpawningPool: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.SpineCrawler: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.SpineCrawlerUprooted: [
UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL
],
sc2_units.Zerg.Spire: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.SporeCrawler: [
UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL, UA.DETECTOR
],
sc2_units.Zerg.SporeCrawlerUprooted: [
UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL
],
sc2_units.Zerg.SwarmHost: [UA.ARMORED, UA.BIOLOGICAL],
sc2_units.Zerg.SwarmHostBurrowed: [UA.ARMORED, UA.BIOLOGICAL, UA.BURROWED],
sc2_units.Zerg.Ultralisk: [UA.ARMORED, UA.BIOLOGICAL, UA.MASSIVE],
sc2_units.Zerg.UltraliskBurrowed: [
UA.ARMORED, UA.BIOLOGICAL, UA.MASSIVE, UA.BURROWED
],
sc2_units.Zerg.UltraliskCavern: [UA.ARMORED, UA.STRUCTURE, UA.BIOLOGICAL],
sc2_units.Zerg.Viper: [UA.ARMORED, UA.BIOLOGICAL, UA.PSIONIC, UA.FLYING],
sc2_units.Zerg.Zergling: [UA.BIOLOGICAL, UA.LIGHT],
sc2_units.Zerg.ZerglingBurrowed: [UA.BIOLOGICAL, UA.LIGHT, UA.BURROWED],
}
def get_attribute_lookup(num_unit_types: int) -> chex.Array:
"""Returns an boolean array specifying the attributes of each unit."""
attribute_lookup = np.zeros((num_unit_types, max(UA).value + 1),
dtype=np.float32)
for unit, attributes in UNITS_ATTRIBUTES.items():
unit_id = uint8_lookup.PySc2ToUint8(unit)
for attribute in attributes:
attribute_lookup[unit_id, attribute.value] = 1.
return attribute_lookup
def get_order_id_lookup(function_names: List[str],
redundant_list: Optional[Mapping[str, str]] = None
) -> np.ndarray:
"""Remaps function arguments to remove redundent ones."""
if redundant_list is None:
redundant_list = dict(REDUNDANT_GENERIC_ORDER_ID)
current_max = 0
lookup_map = {}
for i, function in enumerate(function_names):
if function not in redundant_list:
lookup_map[i] = current_max
current_max += 1
lookup = np.zeros((len(function_names),), dtype=np.int32)
for i, function in enumerate(function_names):
if function in redundant_list:
remapped_name = redundant_list[function]
lookup[i] = lookup_map[function_names.index(remapped_name)]
else:
lookup[i] = lookup_map[i]
return lookup
def get_build_queue_order_id_lookup(function_names: List[str]) -> np.ndarray:
"""Remaps function arguments to remove irrelevant ones to build queue."""
remap = dict(REDUNDANT_GENERIC_ORDER_ID) # copy
for fun in sc2_actions.RAW_FUNCTIONS:
name = fun.name
if (name == "no_op") or ("Train_" in name) or ("Research_" in name):
continue
else:
remap[name] = "no_op"
return get_order_id_lookup(function_names, remap)
def get_addon_lookup(num_unit_types: int) -> np.ndarray:
"""Remaps units to keep only the add-on types."""
lookup = np.zeros((num_unit_types,), dtype=np.int32)
for i, unit in enumerate(ADDON_UNIT_TYPES):
lookup[uint8_lookup.PySc2ToUint8(unit)] = i + 1
return lookup
| alphastar-main | alphastar/architectures/components/static_data/unit_encoder_data.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/unplugged/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the losses."""
from typing import Dict
from alphastar import types
from alphastar.commons import log_utils
import chex
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
def get_global_loss_masks(step_type: chex.Array,
argument_masks: types.StreamDict,
num_first_steps_to_ignore: int,
num_last_steps_to_ignore: int,
) -> types.StreamDict:
"""Gets the global loss masks (for each argument).
The mask is 0 if at least one of these conditions is true:
* The argument is masked
OR
* The step is an episode terminal state (since the action associated with
this observation is a dummy, and the next state will be always an initial
state).
OR
* The step is one of the first `num_first_steps_to_ignore` steps of the
rollout (this can be used for burn-in).
OR
* The step is one of the first `num_last_steps_to_ignore` steps of the
rollout (this can be used for bootstrapping).
Args:
step_type: Step type.
argument_masks: A StreamDict containing the argument masks, unit_tags can be
a single scalar mask (instad of one per unit_tags argument).
num_first_steps_to_ignore: The number of steps to mask at the beginning of
every trajectory.
num_last_steps_to_ignore: The number of steps to mask at the end of
every trajectory.
Returns:
A StreamDict containing the loss masks for each of the arguments.
"""
if (num_first_steps_to_ignore + num_last_steps_to_ignore >=
step_type.shape[0]):
raise ValueError(
"Total number of steps to ignore ("
f"{num_first_steps_to_ignore, num_last_steps_to_ignore}) "
f"should be less than the total timesteps {step_type.shape[0]}")
chex.assert_rank(step_type, 1)
# Step type and argument masks don't have identical tree structure but are
# expected to have the same shape at their leaves.
chex.assert_equal_shape(jax.tree_leaves([step_type, argument_masks]))
terminal_state_mask = jnp.not_equal(step_type, int(dm_env.StepType.LAST))
trajectory_mask = np.zeros(shape=(step_type.shape[0],), dtype=np.bool_)
trajectory_mask[num_first_steps_to_ignore:step_type.shape[0] -
num_last_steps_to_ignore] = True
trajectory_mask = jnp.asarray(trajectory_mask)
global_mask = jnp.logical_and(terminal_state_mask, trajectory_mask)
return jax.tree_map(lambda x: jnp.logical_and(x, global_mask), argument_masks)
def get_masked_log(data: chex.Array,
mask: chex.Array) -> Dict[log_utils.ReduceType, chex.Array]:
"""Gets the masked value and count of data to log."""
masked_data = jnp.where(mask, data, 0)
return {
log_utils.ReduceType.MEAN: masked_data,
log_utils.ReduceType.SUM: masked_data,
log_utils.ReduceType.NUM: mask.astype(jnp.int32)}
| alphastar-main | alphastar/unplugged/losses/util.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alphastar Losses."""
from alphastar.unplugged.losses.loss_base import Loss
from alphastar.unplugged.losses.loss_base import LossBuilder
from alphastar.unplugged.losses.loss_base import LossOutputType
from alphastar.unplugged.losses.supervised import Supervised
| alphastar-main | alphastar/unplugged/losses/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for supervised."""
from absl.testing import absltest
from alphastar import types
from alphastar.commons import log_utils
from alphastar.unplugged.losses import supervised
import dm_env
from dm_env import specs
import jax
import jax.numpy as jnp
import numpy as np
class SupervisedTest(absltest.TestCase):
def test_supervised_loss(self):
unroll_len = 50
max_num_selected_units = 10
burnin_len = 2
overlap_len = 3
action_spec = {
'function': specs.BoundedArray((), np.int32, minimum=0, maximum=10),
'delay': specs.BoundedArray((), np.int32, minimum=0, maximum=6),
'queued': specs.BoundedArray((), np.int32, minimum=0, maximum=2),
'repeat': specs.BoundedArray((), np.int32, minimum=0, maximum=4),
'unit_tags': specs.BoundedArray(
(max_num_selected_units,), np.int32, minimum=0, maximum=10),
'target_unit_tag': specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=10),
'world': specs.BoundedArray((), np.int32, minimum=0, maximum=25)}
action = {k: np.random.randint(v.maximum + 1, size=(unroll_len,) + v.shape)
for k, v in action_spec.items()}
logits = {k: np.random.rand(unroll_len, *v.shape, v.maximum + 1,)
for k, v in action_spec.items()}
masks = {k: np.random.randint(2, size=v.shape).astype(jnp.bool_)
for k, v in logits.items()}
argument_masks = {
k: np.random.randint(2, size=(unroll_len,)).astype(jnp.bool_)
for k in action_spec}
step_type = np.random.randint(3, size=(unroll_len,))
inputs = {
'action': action,
'logits': logits,
'masks': masks,
'argument_masks': argument_masks,
'step_type': step_type
}
weights = dict(
function=40.,
delay=9.,
queued=1.,
repeat=0.1,
target_unit_tag=30.,
unit_tags=320.,
world=11.)
supervised_loss = supervised.Supervised(
action_spec=action_spec,
weights=weights,
overlap_len=overlap_len,
burnin_len=burnin_len,
name='supervised')
jnp_input = jax.tree_map(jnp.asarray, inputs)
loss, logs = supervised_loss.loss(types.StreamDict(jnp_input))
self.assertEqual(
jnp.sum(loss),
jnp.sum(logs['[supervised] loss'][log_utils.ReduceType.MEAN]))
for i in range(unroll_len):
total_xentropy = np.zeros((), np.float32)
for arg in action_spec:
log_prob = np.array(jax.nn.log_softmax(logits[arg][i]))
xentropy = np.take_along_axis(
-log_prob, action[arg][i][..., np.newaxis], axis=-1)
xentropy = np.squeeze(xentropy, axis=-1)
if arg == 'unit_tags':
for j in range(xentropy.shape[0]):
xentropy[j] *= masks[arg][i, j][action[arg][i, j]]
xentropy = np.mean(xentropy)
else:
xentropy *= masks[arg][i][action[arg][i]]
weight = weights[arg]
if (argument_masks[arg][i]
and step_type[i] != int(dm_env.StepType.LAST)
and (burnin_len <= i < unroll_len - overlap_len)):
total_xentropy += weight * xentropy
np.testing.assert_allclose(loss[i], total_xentropy, rtol=1E-5)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/unplugged/losses/supervised_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supervised (Behaviour Cloning) losses."""
import functools
from typing import Dict, Mapping, Tuple
from alphastar import types
from alphastar.architectures import util as arch_util
from alphastar.commons import log_utils
from alphastar.unplugged.losses import loss_base
from alphastar.unplugged.losses import util as loss_util
import chex
from dm_env import specs
import jax
import jax.numpy as jnp
class Supervised(loss_base.Loss):
"""Supervised loss.
Logits are assumed to be already masked, ie. if a mask is 0, we assume that
the corresponding logit is -infinity.
"""
def __init__(self,
action_spec: types.ActionSpec,
weights: Mapping[types.ArgumentName, float],
burnin_len: int = 0,
overlap_len: int = 0,
name: str = 'Supervised'):
"""Initializes the module.
Args:
action_spec: The action specification.
weights: The weight of the cross-entropy loss for each part of the loss.
Its structure must match the action spec.
burnin_len: The number of steps used for burn-in. The loss is not applied
to these steps.
overlap_len: The number of steps at the end of the trajectory where we the
loss should not be applied.
name: Name of the module.
"""
super().__init__(name=name)
self._burnin_len = burnin_len
self._overlap_len = overlap_len
self._action_spec = action_spec
self._weights = weights
if set(weights.keys()) != set(action_spec.keys()):
raise ValueError('Weights keys do not match action spec keys. Got '
f'{set(weights.keys())} and {set(action_spec.keys())}.')
@property
def input_spec(self) -> types.SpecDict:
"""Gets the specs for inputs to supervised loss."""
spec = types.SpecDict()
spec['step_type'] = specs.Array((), jnp.int32)
for arg, arg_spec in self._action_spec.items():
spec['argument_masks', arg] = specs.Array((), jnp.bool_)
num_logits = arg_spec.maximum + 1
if arg == arch_util.Argument.UNIT_TAGS:
num_unit_tags = arg_spec.shape[0]
spec['logits', arg] = specs.Array(
(num_unit_tags, num_logits), jnp.float32)
spec['masks', arg] = specs.Array((num_unit_tags, num_logits), jnp.bool_)
spec['action', arg] = specs.Array((num_unit_tags,), jnp.int32)
else:
spec['logits', arg] = specs.Array((num_logits,), jnp.float32)
spec['masks', arg] = specs.Array((num_logits,), jnp.bool_)
spec['action', arg] = specs.Array((), jnp.int32)
return spec
def _single_arg_loss(self,
logits: chex.Array,
action: chex.Array,
mask: chex.Array,
global_mask: chex.Array,
weight: float,
) -> Tuple[chex.Array, Dict[str, chex.Array]]:
"""Returns the loss for a single argument of the action."""
chex.assert_rank([action, global_mask, logits, mask], [0, 0, 1, 1])
chex.assert_equal_shape([logits, mask])
xentropy = -jax.nn.log_softmax(logits)[action]
# correct_argmax is the MAP of the logits, used for metrics (logging) only
correct_argmax = jnp.equal(jnp.argmax(logits), action).astype(jnp.float32)
# Masking
# If the target action is masked, this means the data is not consistent.
# We set the loss to zero in this case to avoid divergence.
target_mask = mask[action]
xentropy_mask = jnp.logical_and(target_mask, global_mask)
xentropy = jnp.where(xentropy_mask, xentropy, 0)
loss = weight * xentropy
log = {}
log['xentropy'] = loss_util.get_masked_log(xentropy, global_mask)
log['loss'] = loss_util.get_masked_log(loss, global_mask)
log['argmax_accuracy'] = loss_util.get_masked_log(correct_argmax,
global_mask)
log['unmasked_ratio'] = loss_util.get_masked_log(global_mask,
jnp.ones_like(global_mask))
log['wrong_mask_ratio'] = loss_util.get_masked_log(1. - target_mask,
global_mask)
return loss, log
def _loss(self, inputs: types.StreamDict) -> loss_base.LossOutputType:
"""The loss function."""
global_masks = loss_util.get_global_loss_masks(
step_type=inputs['step_type'],
argument_masks=inputs.get('argument_masks'),
num_first_steps_to_ignore=self._burnin_len,
num_last_steps_to_ignore=self._overlap_len)
# Compute all the losses
losses, logs = [], {}
# Reduce functions used for unit_tags, reducing over axis 1 (see below):
unit_tags_reduce_fns = {k: functools.partial(fn, axis=1)
for k, fn in log_utils.REDUCE_FUNCTIONS.items()}
unit_tags_reduce_fns[log_utils.ReduceType.NON_REDUCED] = lambda x: x[:, 0]
for arg in self._action_spec:
single_arg_loss = jax.vmap(self._single_arg_loss, [0, 0, 0, 0, None])
if arg == arch_util.Argument.UNIT_TAGS:
# Unlike all other arguments, of size [unroll_len, ...], unit_tags have
# size [unroll_len, max_num_selected_units, ...] representing
# max_num_selected_units independent actions. We use a vmap to compute
# the loss independently over this axis:
single_arg_loss = jax.vmap(
single_arg_loss, [1, 1, 1, None, None], (1, 1))
arg_loss, arg_log = single_arg_loss(
inputs['logits', arg], inputs['action', arg],
inputs['masks', arg], global_masks[arg], self._weights[arg])
if arg == arch_util.Argument.UNIT_TAGS:
# We now reduce the unit_tags loss and logs over axis 1, so that they
# have the same shape as other arguments:
arg_loss = jnp.mean(arg_loss, axis=1)
arg_log = log_utils.reduce_logs(arg_log, unit_tags_reduce_fns)
losses.append(arg_loss)
for log_name, log in arg_log.items():
logs[f'[{self.name}] {arg}_{log_name}'] = log
total_loss = sum(losses)
logs[f'[{self.name}] loss'] = {log_utils.ReduceType.MEAN: total_loss}
return total_loss, logs
| alphastar-main | alphastar/unplugged/losses/supervised.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Alphastar losses."""
import abc
from typing import Callable
from typing import Mapping
from typing import Optional
from typing import Tuple
from alphastar import types
from alphastar.commons import log_utils
import chex
import jax
from jax import numpy as jnp
LossOutputType = Tuple[chex.Array, log_utils.Log]
class Loss(abc.ABC):
"""Basic AlphaStar loss function."""
def __init__(self, name: Optional[str] = "Loss"):
self._name = name or "Loss"
@property
def name(self) -> str:
return self._name
@property
@abc.abstractmethod
def input_spec(self) -> types.SpecDict:
"""Returns the spec of the input of this loss."""
@abc.abstractmethod
def _loss(self, inputs: types.StreamDict) -> LossOutputType:
"""Computes the output of the loss over unroll_len timesteps.
Args:
inputs: A StreamDict containing [unroll_len, ...] tensors.
Returns:
loss: A float tensor of shape [unroll_len] containing the loss per
timestep.
logs: Per-timestep logs (shape [unroll_len]). This is a dict containing,
for each entry, a dict with reduce functions as keys.
"""
def loss(self, inputs: types.StreamDict) -> LossOutputType:
"""Computes loss on unbatched unrolled inputs. See _loss too.
Inputs to this function are expected to be of the shape unroll_len and
are not expected to be batched further. For batched version of the loss,
check batched_loss.
Args:
inputs: Inputs used for loss computation, a StreamDict with elements of
shape [unroll_len, ...].
Returns:
loss: A float tensor of shape [unroll_len] containing the loss per
timestep.
logs: Per-timestep logs (shape [unroll_len]). This is a dict containing,
for each entry, a dict with reduce functions as keys.
Raises:
ValueError: when not all tree leaves are of the shape of unroll length.
"""
if not inputs:
raise ValueError("Losses must have at least one input.")
unroll_len = jax.tree_leaves(inputs)[0].shape[0]
try:
chex.assert_tree_shape_prefix(inputs, (unroll_len,))
except AssertionError as e:
raise ValueError(
f"All inputs should have same size as unroll length ({unroll_len}) "
f"-- {self.name}: {e}") from e
self.input_spec.validate(inputs,
num_leading_dims_to_ignore=1,
error_prefix=f"{self.name} inputs")
# We hide inputs not specified in input_spec to prevent accidental use.
inputs = inputs.filter(self.input_spec)
outputs, logs = self._loss(inputs)
chex.assert_shape(outputs, [unroll_len])
chex.assert_type(outputs, jnp.float32)
for k, v in logs.items():
if not isinstance(v, Mapping):
raise ValueError("Logs must be depth 2 nested dicts, "
f"but log {k} has type {type(v)}.")
for inner_k, inner_v in v.items():
if not isinstance(inner_v, chex.Array):
raise ValueError("Logs must be depth 2 nested dicts, "
f"but log {k}/{inner_k} has type {type(inner_v)}.")
if inner_v.shape != (unroll_len,):
raise ValueError(
f"Logs must have shape [unroll_len] ({unroll_len},), "
f" but {k}/{inner_k} has shape {inner_v.shape}.")
return outputs, logs
def batched_loss(self, inputs: types.StreamDict) -> LossOutputType:
"""Computes loss on inputs with a batch dimension.
Unless the loss explicitely needs access to the batch dimension, use
_loss instead, which applies a vmap to hide the batch dimension.
Args:
inputs: Inputs used for loss computation, a StreamDict with elements of
shape [batch_size, unroll_len, ...].
Returns:
loss: A float tensor of shape [batch_size, unroll_len] containing the loss
per timestep.
logs: Per-timestep logs (shape [batch_size, unroll_len]). This is a dict
containing, for each entry, a dict with reduce functions as keys.
Raises:
ValueError: when not all tree leaves are of the shape of
(batch size, unroll length).
"""
batch_size, unroll_len = jax.tree_leaves(inputs)[0].shape[:2]
try:
chex.assert_tree_shape_prefix(inputs, (batch_size, unroll_len))
except AssertionError as e:
raise ValueError(
"All inputs should have same size as (batch_size, unroll length) "
f"({batch_size}, {unroll_len}) "
f"-- {self.name}: {e}") from e
outputs, logs = jax.vmap(self.loss)(inputs)
chex.assert_shape(outputs, [batch_size, unroll_len])
return outputs, logs
LossBuilder = Callable[[types.ActionSpec], Loss]
| alphastar-main | alphastar/unplugged/losses/loss_base.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/unplugged/configs/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for the supervised StarCraft II JaxCraft pipeline."""
import datetime
from typing import Optional
from alphastar.architectures import architectures as arch_util
from alphastar.modules import optimizers
import ml_collections
from pysc2.env.converter.proto import converter_pb2
from s2clientprotocol import common_pb2
_EVAL_MAP_NAMES = ("KairosJunction", "KingsCove", "CyberForest",
"NewRepugnancy")
_EVAL_COMPETITORS = (
# Built-in AI
"very_easy",
"very_hard",
)
_DEFAULT_REPLAY_VERSIONS = ("4.8.2", "4.8.3", "4.8.4", "4.8.6", "4.9.0",
"4.9.1", "4.9.2",)
# These parameters are only used internally in this config:
_NUM_UPGRADES = 90 # v3
_NUM_BUFFS = 46 # v2
_USE_CAMERA = True
_NUM_UNIT_FEATURES = 46 # v7
_USE_PLACEHOLDER = True
_SHOW_BURROWED_SHADOWS = True
_MAX_NUM_SELECTED_UNITS = 64
_SCREEN_DIM = 1
_MINIMAP_DIM = 128
_WORLD_DIM = 256
# These parameters are used outside:
NUM_UNIT_TYPES = 243 # v4
NUM_RAW_FUNCTIONS = 556 # v4
def get_converter_settings(
use_supervised: bool,) -> converter_pb2.ConverterSettings:
return converter_pb2.ConverterSettings(
raw_settings=converter_pb2.ConverterSettings.RawSettings(
resolution=common_pb2.Size2DI(x=_WORLD_DIM, y=_WORLD_DIM),
max_unit_count=512,
num_unit_features=_NUM_UNIT_FEATURES,
max_unit_selection_size=_MAX_NUM_SELECTED_UNITS,
shuffle_unit_tags=True,
enable_action_repeat=True,
use_camera_position=_USE_CAMERA,
camera=_USE_CAMERA,
use_virtual_camera=_USE_CAMERA,
virtual_camera_dimensions=converter_pb2.ConverterSettings.RawSettings
.CameraDimensions(left=16, right=16, top=13, bottom=7),
add_effects_to_units=True,
add_cargo_to_units=True,
mask_offscreen_enemies=_USE_CAMERA),
minimap=common_pb2.Size2DI(x=_MINIMAP_DIM, y=_MINIMAP_DIM),
minimap_features=[
"height_map", "visibility_map", "creep", "player_relative", "alerts",
"pathable", "buildable"
],
num_action_types=NUM_RAW_FUNCTIONS,
num_unit_types=NUM_UNIT_TYPES,
num_upgrade_types=_NUM_UPGRADES,
max_num_upgrades=40,
camera_width_world_units=24,
mmr=6000,
supervised=use_supervised,
crop_to_playable_area=False)
SUPERVISED_CONVERTER_SETTINGS = get_converter_settings(use_supervised=True)
EVAL_CONVERTER_SETTINGS = get_converter_settings(use_supervised=False)
def get_config(arch_str: str) -> ml_collections.ConfigDict:
"""Sets up base config which can be overridden."""
config = ml_collections.ConfigDict()
# EVALUATION SETTINGS
config.eval = ml_collections.ConfigDict()
# We use common_pb2 instead of sc2_env to avoid depending on pygame.
config.eval.home_races = [
common_pb2.Protoss, common_pb2.Terran, common_pb2.Zerg
]
config.eval.away_races = [
common_pb2.Protoss, common_pb2.Terran, common_pb2.Zerg
]
config.eval.genrl_env_prob = 0.5
config.eval.map_names = _EVAL_MAP_NAMES
config.eval.competitor_names = _EVAL_COMPETITORS
# Specify the directory and the latest checkpoint from the directory will be
# evaluated.
config.eval.eval_checkpoint_dir = ""
config.eval.eval_checkpoint_path = ""
# Number of steps to run the eval module for each episode. Use for debugging.
config.eval.max_num_steps: Optional[int] = 1_000_000
config.eval.log_to_csv: bool = True
config.eval.evaluator_name: str = "EvalActor"
config.eval.evaluator_type: str = "checkpoint"
config.eval.num_threads_per_inference_device: int = 1
# Use this to set number of learner frames per learner step. Only used for
# logging purposes.
config.eval.default_learner_frames_per_step: int = 1
config.eval.rng_seed: int = 42
# TRAINING SETTINGS
config.train = ml_collections.ConfigDict()
config.train.learner_kwargs = ml_collections.ConfigDict(
dict(
unroll_len=1,
overlap_len=0,
batch_size=1024,
log_every_n_seconds=60,
reduce_metrics_all_devices=True,
log_to_csv=True))
config.train.max_number_of_frames: float = 1e10
config.train.init_checkpoint_path: Optional[str] = None
# All valid Kwargs for Checkpointer() in acme/tf/savers.py
config.train.checkpoint_kwargs = ml_collections.ConfigDict(
dict(
subdirectory="learner",
checkpoint_ttl_seconds=int(
datetime.timedelta(days=90).total_seconds()),
time_delta_minutes=5,
add_uid=True,
max_to_keep=5))
config.train.datasource = ml_collections.ConfigDict(
dict(
name="OfflineTFRecordDataSource",
kwargs=dict(
replay_versions=_DEFAULT_REPLAY_VERSIONS,
player_min_mmr=3500,
# This file is dynamically imported during training using
# importlib.
dataset_paths_fname="",
home_race=None,
away_race=None,
use_prev_features=True,
shuffle_buffer_size=1024,
extra_replay_filters=dict())),
convert_dict=True)
config.train.optimizer_kwargs = ml_collections.ConfigDict(
dict(
extra_weight_decay_mask_fn=None,
learning_rate=5e-4,
learning_rate_schedule_type=optimizers.LearningRateScheduleType
.COSINE,
lr_frames_before_decay=0,
lr_num_warmup_frames=0,
adam_b1=0.9,
adam_b2=0.98,
adam_eps=1e-8,
weight_decay=1e-5,
use_adamw=False,
before_adam_gradient_clipping_norm=10.0,
after_adam_gradient_clipping_norm=None,
weight_decay_filter_out=[],
staircase_lr_drop_factor=0.2))
config.train.loss = ml_collections.ConfigDict(
dict(
name="Supervised",
kwargs=dict(
weights=dict(
function=40.,
delay=9.,
queued=1.,
repeat=0.1,
target_unit_tag=30.,
unit_tags=320.,
world=11.),
burnin_len=0,
overlap_len=0,
name="supervised_loss")))
# ARCHITECTURE SETTINGS
config.architecture = ml_collections.ConfigDict(
dict(
name=arch_str,
kwargs=ml_collections.ConfigDict(dict(overlap_len=0, burnin_len=0))))
# TODO(b/207760816) : Make arch config shorter to call for ease of use.
config.architecture.kwargs.config = arch_util.get_default_config(arch_str)
config.converter_settings = ml_collections.ConfigDict(
dict(train=SUPERVISED_CONVERTER_SETTINGS, eval=EVAL_CONVERTER_SETTINGS))
return config
| alphastar-main | alphastar/unplugged/configs/alphastar_supervised.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""An example script that trains an Alphastar agent and saves checkpoints.
The default arguments in the config will run a full-fledged training of an
AlphaStar agent -- it does training, stores checkpoints and logs training
details such as different losses, gradients etc. to CSV by default. This script
does not do any evaluation during training. Please run `scripts/evaluate.py` in
parallel to evaluate from the stored checkpoints. More instructions on how to
run evaluation can be found in the docstring of `scripts/evaluate.py`.
To run training with small batch size over 16 frames (real data consists of
billions of frames) for debugging purposes, run:
```shell
python alphastar/scripts/train.py \
--config=${PWD}/configs/alphastar_supervised.py:alphastar.dummy \
--config.train.max_number_of_frames=16 \
--config.train.learner_kwargs.batch_size=4 \
--config.train.datasource.kwargs.shuffle_buffer_size=16 \
--config.train.optimizer_kwargs.lr_frames_before_decay=4 \
--config.train.learner_kwargs.unroll_len=3 \
--config.train.datasource.name=DummyDataSource
```
Information about different architecture names can be found in
`architectures/README.md`.
* For full fledged training, adjust the config kwargs accordingly or use the
defaults provided in the config.
* To warmstart from a checkpoint, set config.train.init_checkpoint_path
* Set config.train.checkpoint_kwargs.add_uid = True for getting the new
checkpoints in a unique directory. A UID is appended to the directory path
upon new instantiation. Set this to False if you want to write in a directory
without the UID (note that this could lead to overriding an existing
checkpoint if the checkpoint directory already exists.)
"""
import functools
from absl import app
from absl import flags
from absl import logging
from acme.jax import utils
from alphastar.architectures import architectures
from alphastar.modules import common as acme_common
from alphastar.modules import optimizers
from alphastar.unplugged import losses
from alphastar.unplugged.data import data_source
from alphastar.unplugged.data import data_source_base
from alphastar.unplugged.modules import learner
import jax
from ml_collections import config_flags
FLAGS = flags.FLAGS
_CONFIG = config_flags.DEFINE_config_file(
'config', help_string='Configuration file')
def main(_):
config = _CONFIG.value
num_devices = 1 # Number of devices over which training is run.
frames_per_step = int(config.train.learner_kwargs.batch_size *
config.train.learner_kwargs.unroll_len) * num_devices
architecture = architectures.get_architecture(config.architecture.name)
architecture = functools.partial(architecture,
**config.architecture.kwargs)
loss = functools.partial(
getattr(losses,
config.train.loss.name), **config.train.loss.kwargs)
optimizer, optimizer_logs_fn = optimizers.get_optimizer(
num_frames_per_learner_update=frames_per_step,
total_num_training_frames=config.train.max_number_of_frames,
**config.train.optimizer_kwargs)
data_source_kwargs = dict(
data_split=data_source_base.DataSplit.DEBUG,
converter_settings=config.converter_settings.train,
batch_size=config.train.learner_kwargs.batch_size,
unroll_len=config.train.learner_kwargs.unroll_len,
overlap_len=config.train.learner_kwargs.overlap_len,
**config.train.datasource.kwargs)
train_data_source = getattr(data_source, config.train.datasource.name)(
**data_source_kwargs)
logger = acme_common.make_default_logger(
'learner',
log_to_csv=config.train.learner_kwargs.log_to_csv,
time_delta=config.train.learner_kwargs.log_every_n_seconds,
asynchronous=True,
serialize_fn=utils.fetch_devicearray)
learner_node = learner.SupervisedLearner(
data_source=train_data_source,
architecture_builder=architecture,
loss_builder=loss,
optimizer=optimizer,
optimizer_logs_fn=optimizer_logs_fn,
logger=logger,
rng_key=jax.random.PRNGKey(22),
frames_per_step=frames_per_step,
increment_counts=False,
**config.train.learner_kwargs)
checkpointed_learner_node = acme_common.FramesLimitedCheckpointingRunner(
max_number_of_frames=config.train.max_number_of_frames,
num_frames_per_step=frames_per_step,
wrapped=learner_node,
**config.train.checkpoint_kwargs
)
checkpointed_learner_node.run()
logging.info('Finished training. GG!')
if __name__ == '__main__':
app.run(main)
| alphastar-main | alphastar/unplugged/scripts/train.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to run an evaluator for alphastar.
Running this script sets up an architecture, loads any associated checkpoints
if specified, spins up a game against a bot (with a randomly chosen map and
home+opponent races) at random using the PySC2 environment
wrappers. In this script, we run one full episode for a chosen setting. At the
end of the episode, the game outcome and useful stats are logged to a CSV file
by ACME loggers (in ~/acme). To get noise-free results that can be reported for
comparison across SOTA benchmarks, we need to average results across these maps
and races and to do that, we recommend running the script across several CPU
workers (~50 per setting) with different random seeds (`config.eval.rng_seed`).
We list a few ways in which you could use this script. Information about
different architecture names can be found in architectures/README.md.
To use this script just to check if an evaluator is working correctly,
run it with random parameters for a few steps (This plays a hundred steps only
and not a full episode):
```shell
python alphastar/scripts/evaluate.py \
--config=${PWD}/configs/alphastar_supervised.py:alphastar.dummy \
--config.eval.log_to_csv=False \
--config.eval.max_num_steps=100 \
--config.eval.evaluator_type='random_params'
```
To run evaluation on an existing checkpoint for one full episode, run:
```shell
python alphastar/scripts/evaluate.py \
--config=${PWD}/configs/alphastar_supervised.py:<ARCHITECTURE_NAME> \
--config.eval.eval_checkpoint_path=<EVAL_CHECKPOINT_PATH>
```
where `<ARCHITECTURE_NAME>` is the architecture that the checkpoint was trained
with and `<EVAL_CHECKPOINT_PATH>` is the path to the checkpoint to evaluate.
To run evaluation on the most recent checkpoint existing in a directory (which
is usually done when evaluation is done in parallel to training)
```shell
python alphastar/scripts/evaluate.py \
--config=${PWD}/configs/alphastar_supervised.py:<ARCHITECTURE_NAME> \
--config.eval.eval_checkpoint_dir=<EVAL_CHECKPOINT_DIR>
```
"""
import functools
from absl import app
from alphastar.architectures import architectures
from alphastar.loggers import eval_episode_logger
from alphastar.modules import evaluator
from ml_collections import config_flags
_CONFIG = config_flags.DEFINE_config_file(
'config', help_string='Configuration file')
def main(_):
config = _CONFIG.value
architecture = architectures.get_architecture(config.architecture.name)
architecture = functools.partial(architecture,
**config.architecture.kwargs)
eval_actor = evaluator.EvalActor(
architecture=architecture,
learner_frames_per_step=config.eval.default_learner_frames_per_step,
episode_logger=eval_episode_logger.EvalEpisodeLogger(
log_name='eval',
log_to_csv=config.eval.log_to_csv),
learner_node=None,
converter_settings=config.converter_settings.eval,
competitor_name='very_easy',
**config.eval)
eval_actor.run_episode(eval_actor.setup_agent())
if __name__ == '__main__':
app.run(main)
| alphastar-main | alphastar/unplugged/scripts/evaluate.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/unplugged/modules/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learner."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.architectures import architectures
from alphastar.commons import jax_utils
from alphastar.modules import optimizers
from alphastar.unplugged import losses
from alphastar.unplugged.configs import alphastar_supervised as expt_config_module
from alphastar.unplugged.data import data_source
from alphastar.unplugged.data import data_source_base
from alphastar.unplugged.modules import learner
import jax
def setUpModule():
# Disable JAX optimizations in order to speed up compilation.
jax_utils.disable_jax_optimizations()
def tearDownModule():
jax_utils.restore_jax_config()
class DistributedAgentLearnerTest(parameterized.TestCase):
"""Simple integration/smoke test for the distributed agent."""
# Evaluator needs to be tested separately as guitar test.
@parameterized.parameters('alphastar.dummy', 'alphastar.lite')
def test_agent_learner(self, architecture):
expt_config = expt_config_module.get_config(architecture)
expt_config.train.learner_kwargs.batch_size = 4
expt_config.train.learner_kwargs.unroll_len = 3
expt_config.train.learner_kwargs.log_to_csv = False
expt_config.train.datasource.kwargs.shuffle_buffer_size = 16
expt_config.train.max_number_of_frames = 96
expt_config.architecture.name = architecture
expt_config.train.optimizer_kwargs.lr_frames_before_decay = 12
expt_config.train.datasource.name = 'DummyDataSource'
frames_per_step = int(expt_config.train.learner_kwargs.batch_size *
expt_config.train.learner_kwargs.unroll_len)
architecture = architectures.get_architecture(expt_config.architecture.name)
architecture = functools.partial(architecture,
**expt_config.architecture.kwargs)
loss = functools.partial(
getattr(losses,
expt_config.train.loss.name), **expt_config.train.loss.kwargs)
optimizer, optimizer_logs_fn = optimizers.get_optimizer(
num_frames_per_learner_update=frames_per_step,
total_num_training_frames=expt_config.train.max_number_of_frames,
**expt_config.train.optimizer_kwargs)
train_data_source = getattr(data_source, expt_config.train.datasource.name)(
data_split=data_source_base.DataSplit.DEBUG,
converter_settings=expt_config.converter_settings.train,
batch_size=expt_config.train.learner_kwargs.batch_size,
unroll_len=expt_config.train.learner_kwargs.unroll_len,
overlap_len=expt_config.train.learner_kwargs.overlap_len,
**expt_config.train.datasource.kwargs)
learner_node = learner.SupervisedLearner(
data_source=train_data_source,
architecture_builder=architecture,
loss_builder=loss,
optimizer=optimizer,
optimizer_logs_fn=optimizer_logs_fn,
counter=None,
logger=None,
rng_key=jax.random.PRNGKey(22),
frames_per_step=frames_per_step,
increment_counts=False,
**expt_config.train.learner_kwargs)
for _ in range(2):
learner_node.step()
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/unplugged/modules/learner_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ACME based learner for Alphastar.
"""
# pylint: disable=logging-fstring-interpolation
import time
from typing import Any, Mapping, Optional, Sequence, Tuple
from absl import logging
import acme
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
from alphastar.architectures import modular
from alphastar.commons import log_utils
from alphastar.commons import metrics
from alphastar.modules import agent as agent_lib
from alphastar.modules import common
from alphastar.unplugged import losses
from alphastar.unplugged.data import data_source_base
from alphastar.unplugged.data import util as data_util
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import reverb
import tree
_PMAP_AXIS_NAME = 'data'
@chex.dataclass
class TrainingState:
"""Training state consists of network parameters and optimiser state."""
params: hk.Params
opt_state: optax.OptState
net_state: Any
step: int
rng: chex.PRNGKey
class SupervisedLearner(acme.Learner):
"""Supervised Learner module for newer style architectures (eg. v3)."""
def __init__(self,
architecture_builder: modular.ArchitectureBuilder,
loss_builder: losses.LossBuilder,
batch_size: int,
unroll_len: int,
overlap_len: int,
frames_per_step: int,
data_source: data_source_base.DataSource,
optimizer: optax.GradientTransformation,
optimizer_logs_fn,
rng_key: chex.PRNGKey,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
devices: Optional[Sequence[jax.xla.Device]] = None,
increment_counts: bool = True,
reduce_metrics_all_devices: bool = False,
log_every_n_seconds: int = 60,
log_to_csv: bool = True):
"""Initializes a supervised learner.
Args:
architecture_builder : A builder that constructs the agent architecture.
loss_builder : A builder function that constructs the training loss.
batch_size : Training batch size used per host.
unroll_len : Unroll length (sequence length) for the inputs.
overlap_len : Overlap length between successful sequences.
frames_per_step : Number of frames used per training step.
data_source : Data source that is used as data iterator for training.
optimizer: An optax optimizer module.
optimizer_logs_fn : Helper function that logs optimizer statistics.
rng_key: A JAX random number generator.
counter: An ACME counter object that keeps counts of different ordinal
statistcs in training.
logger: An ACME logger object that logs training metrics.
devices: XLA devices for the learner model.
increment_counts: Boolean to decide if the learner needs to increment
counts. This can be True for a primary learner and False for other
learners in multi-host training.
reduce_metrics_all_devices: Boolean to decide if metrics needed to be
reduced across all hosts in multi-host training.
log_every_n_seconds: Interval between logs in seconds.
log_to_csv: Boolean to decide if logging to CSV is necessary. By default,
training stats are logged to the terminal.
"""
local_devices = jax.local_devices()
devices = devices or jax.devices()
local_devices = [d for d in devices if d in local_devices]
logging.info(f'In total, there are {devices} devices and '
f'{local_devices} local devices. \n Devices are '
f' : {devices} and local devices are {local_devices}')
# Error checks.
if batch_size % len(local_devices) != 0:
raise ValueError(
f'Batch size ({batch_size}) must always be a multiple of number of '
f'training devices on each host ({local_devices}).')
# Setup and initialization.
action_spec = data_source.action_spec
input_spec = data_source.input_spec
agent = agent_lib.AlphaStarAgent(
architecture_builder(input_spec, action_spec, True))
training_loss = loss_builder(action_spec)
def _loss(params, state, key, data):
if 'prev_features' in data:
logging.log_first_n(logging.INFO, 'Using prev_features', n=1)
state.update(jax.tree_map(lambda x: x[:, 0], data.get('prev_features')))
del data['prev_features']
outputs, next_state, _ = agent.apply(params, key, data, state)
loss_inputs = data.copy()
loss_inputs.update(outputs)
loss, logs = training_loss.batched_loss(loss_inputs)
mean_loss = jnp.mean(loss, axis=[0, 1])
reduced_logs = log_utils.reduce_logs(logs)
return mean_loss, (reduced_logs, next_state)
def update_step(
state: TrainingState, sample: reverb.ReplaySample
) -> Tuple[TrainingState, Mapping[str, jnp.ndarray]]:
"""Computes an SGD step, returning new state and metrics for logging."""
# Compute gradients.
grad_fn = jax.value_and_grad(_loss, has_aux=True)
loss_key, new_key = jax.random.split(state.rng)
(loss_value,
(logs, net_state)), gradients = grad_fn(state.params, state.net_state,
loss_key, sample)
# Average gradients over pmap replicas before optimizer update.
gradients = jax.lax.pmean(gradients, _PMAP_AXIS_NAME)
# Apply updates.
updates, new_opt_state = optimizer.update(gradients, state.opt_state,
state.params)
new_params = optax.apply_updates(state.params, updates)
if reduce_metrics_all_devices:
loss_value = jax.lax.pmean(loss_value, _PMAP_AXIS_NAME)
logs = metrics.reduce_metrics(logs, axis_name=_PMAP_AXIS_NAME)
training_metrics = {
'loss': loss_value,
'gradient_norm': optax.global_norm(gradients),
'param_norm': optax.global_norm(new_params),
'param_updates_norm': optax.global_norm(updates),
}
training_metrics.update(common.flatten_metrics(logs))
training_metrics.update(
common.flatten_metrics(optimizer_logs_fn(new_opt_state)))
new_steps = state.step + 1
new_state = TrainingState(
params=new_params,
opt_state=new_opt_state,
net_state=net_state,
step=new_steps,
rng=new_key,
)
return new_state, training_metrics
def make_initial_state(key: jnp.ndarray) -> TrainingState:
"""Initialises the training state (parameters and optimiser state)."""
key, new_key = jax.random.split(key)
per_local_device_batch_size = int(batch_size / len(local_devices))
dummy_obs = data_util.get_dummy_observation(
input_spec=data_source.input_spec,
batch_size=per_local_device_batch_size,
unroll_len=unroll_len)
initial_state_key, params_key = jax.random.split(key)
initial_state = agent.initial_state(
initial_state_key, per_local_device_batch_size)
params = agent.init(params_key, dummy_obs, initial_state)
params_log_lines = ['All params:']
tree.map_structure_with_path(
lambda path, v: params_log_lines.append(f'{path} {v.shape}'), params)
logging.info('\n'.join(params_log_lines))
initial_opt_state = optimizer.init(params)
return TrainingState(
params=params,
net_state=initial_state,
opt_state=initial_opt_state,
step=0,
rng=new_key)
# Initialize state.
rng_key, init_rng = jax.random.split(rng_key)
state = make_initial_state(init_rng)
self._local_devices = local_devices
self._frames_per_step = frames_per_step
self._state = utils.replicate_in_all_devices(state, local_devices)
self._prefetched_data_iterator = data_source.get_generator()
self._update_step = jax.pmap(
update_step, axis_name=_PMAP_AXIS_NAME, devices=devices)
# Set up logging/counting.
self._counter = counter
self._logger = logger or common.make_default_logger(
'learner', time_delta=log_every_n_seconds, log_to_csv=log_to_csv)
self._increment_counts = increment_counts
def _preprocess(self, data):
"""Reshapes input so that it can be distributed across multiple cores."""
def add_core_dimension(x):
num_devices = len(self._local_devices)
if x.shape[0] % num_devices != 0:
raise ValueError(f'The batch size must be a multiple of the number of'
f' devices. Got batch size = {x.shape[0]} and number'
f' of devices = {num_devices}.')
prefix = (num_devices, x.shape[0] // num_devices)
return np.reshape(x, prefix + x.shape[1:])
multi_inputs = jax.tree_map(add_core_dimension, data)
return multi_inputs
def step(self):
"""Does a step of SGD and logs the results."""
# Do a batch of SGD.
start = time.time()
samples = self._preprocess(next(self._prefetched_data_iterator))
self._state, results = self._update_step(self._state, samples)
# Take results from first replica.
results = utils.get_from_first_device(results, as_numpy=False)
if self._counter:
if self._increment_counts:
counts = self._counter.increment(
steps=1,
num_frames=self._frames_per_step,
time_elapsed=time.time() - start)
else:
counts = self._counter.get_counts()
if 'learner_steps' in counts:
results['steps_per_second'] = counts['learner_steps'] / counts[
'learner_time_elapsed']
results['frames_per_second'] = counts['learner_num_frames'] / counts[
'learner_time_elapsed']
else:
counts = {}
# Snapshot and attempt to write logs. Logger already throttles the logging.
self._logger.write({**results, **counts})
def get_variables(self, names: Sequence[str]) -> Sequence[hk.Params]:
# Return first replica of parameters.
return [utils.get_from_first_device(self._state.params, as_numpy=False)]
def save(self) -> TrainingState:
# Serialize only the first replica of parameters and optimizer state.
return jax.tree_map(utils.get_from_first_device, self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state, self._local_devices)
| alphastar-main | alphastar/unplugged/modules/learner.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data source for Riegeli-based datasets used in Alphastar offline training."""
import functools
from typing import Any, Generator, Mapping, Optional, Sequence, Tuple
from alphastar import types
from alphastar.unplugged.data import data_source_base
from alphastar.unplugged.data import util as data_utils
from alphastar.unplugged.data import path_utils
from pysc2.env import converted_env
from pysc2.env import enums as sc2_enums
from pysc2.env.converter.proto import converter_pb2
import tensorflow as tf
def make_episodes_dataset(
dataset_pattern,
features,
training=True,
):
"""Makes episodes dataset from files.
Args:
dataset_pattern: File path pattern for the dataset.
features: TF Example features in the dataset.
training: Boolean to indicate whether in training mode.
Returns:
TF Dataset for the episodes data.
"""
files_ds = tf.data.Dataset.list_files(dataset_pattern, shuffle=training)
ds = files_ds.interleave(
tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)
serializer = data_utils.TFExampleCoder(features=features, compress=True)
ds = ds.map(serializer.decode, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.repeat()
return ds
def make_unrolls_dataset(episode_ds: tf.data.Dataset, batch_size: int,
unroll_len: int, shuffle_buffer_size: int):
"""Makes unrolls dataset from episodes.
Args:
episode_ds: TF Dataset of episodes.
batch_size: Batch size used in the dataset.
unroll_len: Length of unroll for each sequence in the batch.
shuffle_buffer_size : Size of the shuffle buffer used for the unrolls.
Returns:
TF Dataset object with unrolled batched sequences.
"""
ds = episode_ds.flat_map(tf.data.Dataset.from_tensor_slices)
ds = ds.batch(unroll_len)
ds = ds.shuffle(
buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)
return ds
class OfflineTFRecordDataSource(data_source_base.DataSource):
"""A data source that runs the environment to read replays."""
def __init__(self,
unroll_len: int,
overlap_len: int,
batch_size: int,
data_split: data_source_base.DataSplit,
converter_settings: converter_pb2.ConverterSettings,
replay_versions: Sequence[str],
player_min_mmr: int,
dataset_paths_fname: str,
home_race: Optional[sc2_enums.Race],
away_race: Optional[sc2_enums.Race],
use_prev_features: bool,
max_times_sampled: int = 1,
shuffle_buffer_size: int = 1024,
extra_replay_filters: Optional[Mapping[str, Any]] = None):
"""Initializes the offline TF Record data source.
Args:
unroll_len: Unroll length (sequence length) for the inputs.
overlap_len: Overlap length between successful sequences.
batch_size: Batch size for the data.
data_split: Data split (train or test data)
converter_settings: Settings used for the converter that transforms the
observations and actions.
replay_versions: Sequence of replay files that are used as part of the
dataset.
player_min_mmr: Minimum MMR for the games to be a part of the dataset.
dataset_paths_fname: Filename of the python file that contains the path
information for different replay versions and MMR combinations.
home_race: Race of the player.
away_race: Race of the opponent.
use_prev_features: Whether to add the prev_feature field to the agent
inputs. They contain the actions taken at the step immediately before
the first time step of the rollout.
max_times_sampled: Maximum number of times to sample.
shuffle_buffer_size: Size of the shuffle buffer for the dataset.
extra_replay_filters: Map of further filters to be applied on the replay
data.
"""
super().__init__(unroll_len=unroll_len,
overlap_len=overlap_len,
batch_size=batch_size,
home_race=home_race,
away_race=away_race)
self._data_split = data_split
self._converter_settings = converter_settings
self._player_min_mmr = player_min_mmr
self._shuffle_buffer_size = shuffle_buffer_size
self._use_prev_features = use_prev_features
self._obs_spec, self._action_spec = converted_env.get_environment_spec(
self._converter_settings)
if extra_replay_filters:
# TODO(b/208419046): Add filtering support.
raise ValueError('Filtering is not supported yet.')
self._replay_versions = tuple(replay_versions)
dataset_pattern = path_utils.get_dataset_pattern(
self._replay_versions,
self._data_split,
self._player_min_mmr,
dataset_paths_fname)
if dataset_pattern is None:
raise ValueError(
f'Dataset not found matching '
f'replay_versions: {self._replay_versions}, '
f'data_split: {self._data_split}, '
f'player_min_mmr: {self._player_min_mmr}'
)
self._dataset_pattern = dataset_pattern
def get_generator(self) -> Generator[types.StreamDict, None, None]:
"""Get generator which yields training batches for the learner."""
features = data_utils.get_dataset_specs(self._obs_spec)
episodes_ds = make_episodes_dataset(self._dataset_pattern, features)
unrolls_ds = make_unrolls_dataset(
episodes_ds,
self._batch_size,
self._unroll_len + (1 if self._use_prev_features else 0),
shuffle_buffer_size=self._shuffle_buffer_size)
ds = unrolls_ds.prefetch(1)
yield from map(functools.partial(data_utils.as_learner_input,
use_prev_features=self._use_prev_features),
data_utils.iterate(ds))
@property
def env_spec(self) -> Tuple[types.ObsSpec, types.ActionSpec]:
_, obs_spec = data_utils.split_behaviour_actions(self._obs_spec)
obs_spec = types.SpecDict(obs_spec)
action_spec = types.SpecDict(self._action_spec)
return obs_spec, action_spec
@property
def input_spec(self) -> types.SpecDict:
behaviour_action_spec, obs_spec = data_utils.split_behaviour_actions(
self._obs_spec)
behaviour_features_spec = types.SpecDict(dict(action=behaviour_action_spec))
if self._use_prev_features:
prev_features_spec = behaviour_features_spec
else:
prev_features_spec = None
return data_utils.get_input_spec(
obs_spec,
behaviour_features_spec=behaviour_features_spec,
prev_features_spec=prev_features_spec)
class DummyDataSource(data_source_base.DataSource):
"""Data source that outputs dummy observations all the time (for testing)."""
def __init__(
self,
unroll_len: int,
batch_size: int,
converter_settings: converter_pb2.ConverterSettings,
**unused_kwargs):
"""Initializes the dummy data source.
Args:
unroll_len: Unroll length (sequence length) for the inputs.
batch_size: Batch size for the data.
converter_settings: Settings used for the converter that transforms the
observations and actions.
"""
self._unroll_len = unroll_len
self._batch_size = batch_size
self._obs_spec, self._action_spec = converted_env.get_environment_spec(
converter_settings)
def get_generator(self) -> Generator[types.StreamDict, None, None]:
dummy_obs = data_utils.get_dummy_observation(
self.input_spec,
batch_size=self._batch_size,
unroll_len=self._unroll_len)
while True:
yield dummy_obs
@property
def env_spec(self) -> Tuple[types.ObsSpec, types.ActionSpec]:
_, obs_spec = data_utils.split_behaviour_actions(self._obs_spec)
obs_spec = types.SpecDict(obs_spec)
action_spec = types.SpecDict(self._action_spec)
return obs_spec, action_spec
@property
def input_spec(self) -> types.SpecDict:
behaviour_action_spec, obs_spec = data_utils.split_behaviour_actions(
self._obs_spec)
behaviour_features_spec = types.SpecDict(dict(action=behaviour_action_spec))
return data_utils.get_input_spec(obs_spec, behaviour_features_spec)
| alphastar-main | alphastar/unplugged/data/data_source.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates tfrecords containing converted replay data.
Takes as input a partition file, as generated by generate_partitions.py,
plus a ConverterSettings proto file (in text format), e.g.
alphastar_supervised_converter_settings.pbtxt, converting all replays listed in
the partition from .SC2Replay to .tfrecord.
Note that replays expect the correct version of the StarCraft2 binary. It is
recommended that replays are stored in version-specific directories and
processed a version at a time. The `SC2PATH` environment variable can be used
to control the active SC2 version for each run.
"""
import os
from typing import Mapping
from absl import app
from absl import flags
from absl import logging
from alphastar.unplugged.data import util
import chex
import dm_env
import jax
import numpy as np
from pysc2.env import converted_env
from pysc2.env.converter.proto import converter_pb2
from pysc2.lib import protocol as sc2_protocol
from pysc2.lib import remote_controller
from pysc2.lib import sc_process
from pysc2.lib.replay import replay_converter
import tensorflow as tf
import websocket
from google.protobuf import text_format
FLAGS = flags.FLAGS
flags.DEFINE_string(
'sc2_replay_path', None, 'Path to .SC2Replay files.', required=True)
flags.DEFINE_string(
'converted_path', None, 'Path to .tfrecord files.', required=True)
flags.DEFINE_string(
'partition_file',
None,
'Path to partition file to be processed.',
required=True)
flags.DEFINE_string(
'converter_settings',
None,
'Converter settings to apply (pbtxt, using text_format).',
required=True)
flags.DEFINE_integer(
'max_retries', 3,
'Maximum number of times to retry parsing of a replay (which may be foiled '
'by websocket errors or similar) before moving on.')
def _generate_episode(replay_file_path: str, player_id: int, replay_data,
converter_settings) -> Mapping[str, chex.Array]:
"""Returns an episode as a tree of stacked converted observations."""
for attempt in range(FLAGS.max_retries + 1):
try:
logging.info('Converting %s, player %d.%s', replay_file_path, player_id,
' Attempt {attempt}.' if attempt else '')
observations = replay_converter.converted_observation_stream(
replay_data, player_id, converter_settings)
episode = jax.tree_map(lambda *xs: np.stack(xs), *observations)
except (
sc_process.SC2LaunchError,
remote_controller.ConnectError,
sc2_protocol.ConnectionError,
websocket.WebSocketTimeoutException,
) as e:
logging.warning('Ephemeral(?) exception while converting replay %s: %s.',
replay_file_path, e)
except AttributeError as e:
logging.warning(
'Giving up on converting replay %s for player %d due to '
'exception: %s.', replay_file_path, player_id, e)
return None
else:
return episode
logging.warning(
'Giving up on converting replay %s for player %d, max retries (%d) '
'reached.', replay_file_path, player_id, FLAGS.max_retries)
def _step_type_array(length: int):
step_type = np.full((length,), fill_value=dm_env.StepType.MID, dtype=np.int32)
step_type[0] = dm_env.StepType.FIRST
step_type[-1] = dm_env.StepType.LAST
return step_type
def _process(replay_file_path: str, converter_settings, serializer):
"""Yields ({replay_hash}_{player_id}, serialized episode).
0, 1 or 2 elements may be yielded, depending on whether the replay is
successfully parsed and on the number of players present in the replay.
Args:
replay_file_path: Path to an .SC2Replay file.
converter_settings: ConverterSettings proto.
serializer: For encoding episodes in serialized form.
"""
with tf.io.gfile.GFile(replay_file_path, 'rb') as f:
replay_data = f.read()
replay_hash = os.path.basename(replay_file_path).replace('.SC2Replay', '')
for player_id in [1, 2]: # Player IDs in a 2-player SC2 game.
episode = _generate_episode(replay_file_path, player_id, replay_data,
converter_settings)
if not episode:
# Skip failed replays.
continue
length = episode['player'].shape[0]
if length == 0:
# Skip empty replays.
continue
episode = dict(observation=episode, step_type=_step_type_array(length))
serialized_episode = serializer.encode(episode)
if serialized_episode:
episode_hash = f'{replay_hash}_{player_id}'
yield episode_hash, serialized_episode
def _write(episode_hash: str, serialized_episode):
"""Writes a serialized episode to file using TFRecordWriter."""
filename = episode_hash + '.tfrecord'
tmp_name = os.path.join(FLAGS.converted_path, '~' + filename)
final_name = os.path.join(FLAGS.converted_path, filename)
tf.io.gfile.makedirs(os.path.dirname(tmp_name))
tf.io.gfile.makedirs(os.path.dirname(final_name))
# Write to temp file first, and move only after writing is finished,
# to minimize the chances of partially-written files.
with tf.io.TFRecordWriter(tmp_name, options=None) as writer:
writer.write(serialized_episode)
tf.io.gfile.rename(tmp_name, final_name, overwrite=True)
def main(argv):
del argv
with tf.io.gfile.GFile(FLAGS.converter_settings, 'r') as f:
converter_settings = text_format.Parse(f.read(),
converter_pb2.ConverterSettings())
obs_spec, _ = converted_env.get_environment_spec(converter_settings)
features = util.get_dataset_specs(obs_spec)
serializer = util.TFExampleCoder(features=features, compress=True)
with tf.io.gfile.GFile(FLAGS.partition_file, 'r') as f:
replay_hashes = [l.strip() for l in f.readlines()]
for h in replay_hashes:
logging.info('Processing replay with hash %s.', h)
for episode_hash, serialized_episode in _process(
replay_file_path=os.path.join(FLAGS.sc2_replay_path, f'{h}.SC2Replay'),
converter_settings=converter_settings,
serializer=serializer):
logging.info('Writing episode hash %s.', episode_hash)
_write(episode_hash, serialized_episode)
if __name__ == '__main__':
app.run(main)
| alphastar-main | alphastar/unplugged/data/generate_dataset.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for data."""
import queue
import threading
from typing import Any, Callable, Generator, Iterable, Mapping, List, Optional, TypeVar, Union
import zlib
from absl import logging
from alphastar import types
import apache_beam as beam
from apache_beam import coders
import chex
import dm_env
from dm_env import specs
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tree
def get_input_spec(
obs_spec: types.ObsSpec,
behaviour_features_spec: Optional[types.SpecDict] = None,
prev_features_spec: Optional[types.SpecDict] = None
) -> types.SpecDict:
"""Get full input spec given obs_spec and optional behaviour_action_spec."""
spec = types.SpecDict()
spec['step_type'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=int(max(dm_env.StepType)))
spec['observation'] = obs_spec
if behaviour_features_spec:
spec['behaviour_features'] = behaviour_features_spec
if prev_features_spec:
spec['prev_features'] = prev_features_spec
return spec
def get_dummy_observation(input_spec: types.SpecDict,
batch_size: int,
unroll_len: Optional[int]) -> types.StreamDict:
"""Return a dummy observation matching the spec."""
if unroll_len is None:
def zeros_like_spec(spec):
return jnp.zeros((batch_size,) + spec.shape, spec.dtype)
else:
def zeros_like_spec(spec):
return jnp.zeros((batch_size, unroll_len) + spec.shape, spec.dtype)
return jax.tree_map(zeros_like_spec, input_spec)
class FeatureSpec:
"""A description of the features used in the dataset.
Feature values are numpy arrays which are serialized as bytes.
"""
def __init__(self, dtype, shape):
"""Initializes a FeatureSpec.
Args:
dtype: dtype convertible with tf.as_dtype
shape: shape convertible with tf.TensorShape.Make sure at most one
dimension is None as features can be reshaped at most along one unknown
dimension.
"""
self._dtype = tf.as_dtype(dtype)
self._numpy_scalar_type = self.dtype.as_numpy_dtype
self._numpy_dtype = np.dtype(self._numpy_scalar_type).newbyteorder('<')
self._shape = tf.TensorShape(shape)
self._reshape_arg = [
d.value if d.value is not None else -1 for d in self.shape.dims
]
@property
def dtype(self):
"""Tf datatype for this feature."""
return self._dtype
@property
def shape(self):
"""Tensorshape for this feature."""
return self._shape
def _check_shape(self, array):
if not self.shape.is_compatible_with(array.shape):
raise ValueError('Incompatible shape between what is expected :'
f' {self.shape} and that of the array : {array.shape}.')
def value_to_string(self, value) -> str:
"""Serializes value according to the spec for this feature.
Args:
value: The value to serialize.
Returns:
A string/bytes.
Raises:
TypeError: if valus cannot be cast into the specified type.
"""
array = np.asarray(value)
self._check_shape(array)
if array.dtype != self._numpy_dtype:
raise ValueError('Dtype {} do not match {}'.format(
array.dtype, self._numpy_dtype))
return array.tobytes()
def string_to_value(self, byte_string):
"""Deserializes a byte string according to the spec of the feature.
Args:
byte_string: byte_string to deserialize
Returns:
A value.
"""
raw_value = tf.io.decode_raw(byte_string, self.dtype)
return tf.reshape(raw_value, self._reshape_arg)
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if not isinstance(value, List):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
class TFExampleCoder(coders.Coder):
"""Helps encodes/decode rows of the dataset to/from numpy arrays/scalars."""
def __init__(self, features: Mapping[str, Any], compress: bool):
self._features = features
self._compress = compress
self._flat_features_with_paths = tree.flatten_with_path(features)
self._flat_features = [feat for _, feat in self._flat_features_with_paths]
flat_feature_paths = [path for path, _ in self._flat_features_with_paths]
self._string_flat_features = {
'/'.join(path): tf.io.FixedLenFeature((), tf.string)
for path in flat_feature_paths
}
self._success_counter = beam.metrics.Metrics.counter(
'SerializeFn', 'Success')
self._failure_counter = beam.metrics.Metrics.counter(
'SerializeFn', 'Failure')
def convert_to_proto(self, row: Mapping[str, Any]) -> tf.train.Example:
"""Encodes a nest of numpy scalars/arrays to a byte string."""
tree.assert_same_structure(row, self._features)
flat_row = tree.flatten(row)
features = {}
for (path, feature), value in zip(self._flat_features_with_paths, flat_row):
try:
value_str = feature.value_to_string(value)
if self._compress:
value_str = zlib.compress(value_str)
value_feature = _bytes_feature(value_str)
features['/'.join(path)] = value_feature
except (TypeError, ValueError) as e:
e.args = (f'Incompatible data when encoding feature at path {path}. '
'Value : {value}')
raise
example_proto = tf.train.Example(
features=tf.train.Features(feature=features))
return example_proto
def serialize(self, proto: tf.train.Example) -> Optional[str]:
"""Serializes a given example to a proto string."""
try:
serialized_proto = proto.SerializeToString()
self._success_counter.inc()
except ValueError as e:
self._failure_counter.inc()
logging.info('2 GB Protobuf limit hit : %s', e)
serialized_proto = None
return serialized_proto
def encode(self, row: Mapping[str, Any]) -> Optional[str]:
"""Serializes a row of data into a proto."""
serialized_proto = self.serialize(self.convert_to_proto(row))
if len(serialized_proto) > 1:
# Until b/203641663 is fixed, we skip episodes which fail to reconstruct.
# TODO(b/208420811): Skip if any array is >2gb instead.
try:
recons = self.decode(serialized_proto)
chex.assert_trees_all_close(row, recons)
except tf.errors.InvalidArgumentError as e:
logging.info('Failed to compress episode correctly. %s', e.message)
serialized_proto = None
return serialized_proto
def decode(self, example_string: str) -> Mapping[str, Any]:
parsed_example = tf.io.parse_single_example(example_string,
self._string_flat_features)
values = []
for (byte_string, feature) in zip(parsed_example.values(),
self._flat_features):
if self._compress:
byte_string = tf.io.decode_compressed(
byte_string, compression_type='ZLIB')
value_str = feature.string_to_value(byte_string)
values.append(value_str)
return tree.unflatten_as(self._features, values)
class Log(beam.DoFn):
"""Identity with logging. Useful for debugging."""
def __init__(self, prefix):
self._prefix = prefix
def process(self, item):
logging.info('%s: %s', self._prefix, repr(item))
yield item
def spec_to_feature(x: Union[np.ndarray, chex.Array]) -> FeatureSpec:
# Ensure scalars are stored as arrays of shape (1,) and not as scalars of
# shape (), so that they can be batched into proper arrays later.
if len(x.shape) == 1 and x.shape[0] == 1:
shape = (None,)
else:
shape = (None,) + x.shape
return FeatureSpec(dtype=x.dtype, shape=shape)
def get_dataset_specs(
obs_spec,
make_spec: Callable[..., FeatureSpec] = FeatureSpec
) -> Mapping[str, FeatureSpec]:
"""Generates dataset feature specs from SC2 observation specs."""
obs_features = jax.tree_map(spec_to_feature, obs_spec)
return dict(
step_type=make_spec(dtype=np.int32, shape=(None,)),
observation=obs_features)
T = TypeVar('T')
def prefetch(iterable: Iterable[T],
buffer_size: int) -> Generator[T, None, None]:
"""Performs prefetching of elements from an iterable in a separate thread.
Args:
iterable: An iterable to prefetch.
buffer_size: Maximum number of items to prefetch.
Yields:
Prefetched elements from the original iterable.
Raises:
Any error thrown by the iterable. Note this is not raised inside
the producer, but after it finishes executing.
"""
if not buffer_size >= 1:
raise ValueError('buffer_size should be at least 1.')
buffer = queue.Queue(maxsize=buffer_size)
producer_error = []
end = object()
def producer():
"""Enques items from iterable on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if working with
# tensorflow datasets because tf.graph objects are thread local.
for item in iterable:
buffer.put(item)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error in producer thread and will be raised in '
'the main thread.')
producer_error.append(e)
finally:
buffer.put(end)
threading.Thread(target=producer, daemon=True).start()
# Consumer.
while True:
value = buffer.get()
if value is end:
break
yield value
if producer_error:
raise producer_error[0]
def iterate(ds: tf.data.Dataset):
yield from prefetch(iter(tfds.as_numpy(ds)), 1)
def split_behaviour_actions(observation: Mapping[str, Any]):
"""Extracts out behaviour actions from the observation."""
behaviour_action_keys = [
key for key in observation.keys() if key.startswith('action/')
]
behaviour_actions = {
key.replace('action/', ''): observation[key]
for key in behaviour_action_keys
}
other_keys = set(observation.keys()) - set(behaviour_action_keys)
observation = {key: observation[key] for key in other_keys}
return behaviour_actions, observation
def as_learner_input(raw_input: Mapping[str, Any],
use_prev_features: bool = False) -> types.StreamDict:
"""Transform the raw_input into a StreamDict type usable by the learner.
If use_prev_features is set, the first timestep is used to generate the
previous features and is discarded from the main data. This means that
the output will have one less timestep than the input.
Args:
raw_input: The raw input to process.
use_prev_features: Whether to use the first timestep to generate the
prev_features field.
Returns:
A StreamDict containing data usable by the learner.
"""
behaviour_actions, observation = split_behaviour_actions(
raw_input['observation'])
output = types.StreamDict()
output['step_type'] = raw_input['step_type']
output['observation'] = types.StreamDict(observation)
output['behaviour_features', 'action'] = types.StreamDict(behaviour_actions)
if use_prev_features:
prev_output = jax.tree_map(lambda x: x[:, :-1], output)
output = jax.tree_map(lambda x: x[:, 1:], output)
output['prev_features', 'action'] = prev_output.get(
('behaviour_features', 'action'))
return output
| alphastar-main | alphastar/unplugged/data/util.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/unplugged/data/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for data/util.py."""
from absl.testing import parameterized
from alphastar.unplugged.data import util
import chex
import numpy as np
import tensorflow as tf
import tree
class TFCoderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([
[False,],
[True,],
])
def test_tf_coder(self, compress):
foo = np.asarray([[1., 2., 3.], [4., 5., 6.]], dtype=np.float32)
bar = np.asarray([1, 2, 3], dtype=np.int32)
baz = np.asarray(5, dtype=np.int32)
qux = np.asarray([50., 44.], dtype=np.float32)
bla = np.random.randint(-100, 100, size=[256, 53, 1])
blu = np.random.randn(64, 128, 128)
data = dict(
foo=foo,
bar=bar,
foo_bar=dict(baz=baz, qux=qux),
bla_blu=dict(bla=bla, blu=blu))
features = tree.map_structure(
lambda x: util.FeatureSpec(dtype=x.dtype, shape=x.shape), data)
coder = util.TFExampleCoder(features=features, compress=compress)
encoded_feature_str = coder.encode(data)
decoded_features = coder.decode(encoded_feature_str)
decoded_features_np = tree.map_structure(lambda x: x.numpy(),
decoded_features)
chex.assert_trees_all_close(decoded_features_np, data)
if __name__ == '__main__':
tf.test.main()
| alphastar-main | alphastar/unplugged/data/util_test.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates replay dataset partition files.
Given a directory containing .SC2Replay files and a directory which contains (or
will contain) converted .tfrecord files, determines which replays have yet to be
converted and partitions them, with each partition containing roughly the same
total replay file bytes.
Partitions are represented as text files containing newline-separated replay
hashes. The intention is that generate_dataset.py is instantiated multiple
times, with each instance being passed a separate partition to process.
"""
import collections
import os
from typing import Iterable, List, Sequence
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
'sc2_replay_path', None, 'Path to .SC2Replay files.', required=True)
flags.DEFINE_string(
'converted_path', None, 'Path to .tfrecord files.', required=True)
flags.DEFINE_integer(
'num_partitions', None, 'Number of partitions to create', required=True)
flags.DEFINE_string(
'partition_path', None, 'Path to write partition files to.', required=True)
def _enumerate_replays(sc2_replay_path: str,
converted_path: str) -> Iterable[str]:
"""Determines which replays remain to be converted.
Args:
sc2_replay_path: Source directory containing .SC2Replay files to convert.
converted_path: Destination directory, possibly containing converted
versions of replays that have already been processed.
Returns:
An iterable of replay hashes that still require conversion.
"""
src = set(
f.replace('.SC2Replay', '') for f in tf.io.gfile.listdir(sc2_replay_path))
if tf.io.gfile.isdir(converted_path):
counts = collections.Counter(
f.replace('.tfrecord', '')[:-2] # -2 strips off player id.
for f in tf.io.gfile.listdir(converted_path))
# We expect all replays to be 2 player. Any that don't have a .tfrecord
# file for each player are marked for conversion.
dst = set(k for k, v in counts.items() if v == 2)
else:
logging.warning('converted_path %s does not exist.', converted_path)
dst = set()
return sorted(src - dst)
def _partition_replays_by_size(
sc2_replay_path: str,
replay_hashes: List[str],
num_partitions: int) -> Sequence[List[str]]:
"""Partitions a set of replays, taking into account file sizes.
Args:
sc2_replay_path: Source directory containing .SC2Replay files.
replay_hashes: List of hashes of replays to partition.
num_partitions: How many partitions to create.
Yields:
A sequence of lists of hashes, the original list partitioned such that each
is of roughly equal total file size.
"""
sizes = []
total_size = 0
for h in replay_hashes:
name = os.path.join(sc2_replay_path, f'{h}.SC2Replay')
size = tf.io.gfile.stat(name).length
sizes.append(size)
total_size = sum(sizes)
target_size = total_size // num_partitions
cumulative_size = 0
partition = 1
hashes = []
for h, size in zip(replay_hashes, sizes):
hashes.append(h)
cumulative_size += size
if cumulative_size // partition >= target_size:
yield hashes
hashes.clear()
partition += 1
if hashes:
yield hashes
def _write_partition_files(partitions: Sequence[List[str]],
partition_path: str) -> Sequence[str]:
"""Writes a text file for each partition, of newline-separated strings.
Args:
partitions: A sequence of string lists.
partition_path: Directory to write partition files to.
Yields:
Sequence of partition filenames written.
"""
if not tf.io.gfile.isdir(partition_path):
tf.io.gfile.makedirs(partition_path)
for i, partition in enumerate(partitions):
name = os.path.join(partition_path, f'partition_{i}')
with tf.io.gfile.GFile(name, 'w') as f:
f.write('\n'.join(map(str, partition)))
yield name
def main(argv: Sequence[str]) -> None:
del argv
for p in _write_partition_files(
partitions=_partition_replays_by_size(
sc2_replay_path=FLAGS.sc2_replay_path,
replay_hashes=_enumerate_replays(
sc2_replay_path=FLAGS.sc2_replay_path,
converted_path=FLAGS.converted_path),
num_partitions=FLAGS.num_partitions),
partition_path=FLAGS.partition_path):
logging.info('Created %s.', p)
if __name__ == '__main__':
app.run(main)
| alphastar-main | alphastar/unplugged/data/generate_partitions.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base data source class for AlphaStar data."""
import abc
import enum
from alphastar import types
from typing import Generator, Optional, Tuple
from alphastar import collections
from pysc2.env import enums as sc2_enums
class DataSplit(str, enum.Enum):
TRAIN = "train"
TEST = "test"
DEBUG = "debug"
class DataSource(abc.ABC):
"""Abstract class for data sources."""
def __init__(self,
unroll_len: int,
overlap_len: int,
batch_size: int,
home_race: Optional[sc2_enums.Race],
away_race: Optional[sc2_enums.Race]):
self._unroll_len = unroll_len
self._overlap_len = overlap_len
if self._overlap_len >= self._unroll_len:
raise ValueError("Rollout length must be larger than overlap.")
self._batch_size = batch_size
if home_race == sc2_enums.Race.random or away_race == sc2_enums.Race.random:
# We raise an error here as using random can either mean only the random
# race, or any race, depending on the parts of the code.
raise ValueError("Filtering random race is not supported. "
"Use None to disable filtering.")
self._home_race = home_race
self._away_race = away_race
@property
@abc.abstractmethod
def env_spec(self) -> Tuple[types.ObsSpec, types.ActionSpec]:
"""The environment spec."""
@property
@abc.abstractmethod
def input_spec(self) -> types.SpecDict:
"""The full spec of the input to the agent."""
@property
def obs_spec(self) -> types.ObsSpec:
return self.env_spec[0]
@property
def action_spec(self) -> types.ActionSpec:
return self.env_spec[1]
@abc.abstractmethod
def get_generator(self) -> Generator[collections.Struct, None, None]:
"""Returns a data generator."""
| alphastar-main | alphastar/unplugged/data/data_source_base.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for AlphaStar dataset paths."""
import importlib.util
import os
from typing import Mapping, Optional, Tuple
_RelativePaths = Mapping[Tuple[Tuple[str, ...], str, int], str]
def _read_config(config_path: str) -> Tuple[str, _RelativePaths]:
"""Dynamically imports dataset paths config file and extracts key info."""
spec = importlib.util.spec_from_file_location('_paths', config_path)
if spec is None:
raise ValueError(
f'No module loader found for {config_path!r}. '
'This probably means that the file has an invalid extension. '
'The configuration file is expected to be a Python module.')
paths_module = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(paths_module)
except FileNotFoundError as e:
raise ValueError(f'File {config_path} not found.') from e
return paths_module.BASE_PATH, paths_module.RELATIVE_PATHS
def get_dataset_pattern(
replay_versions: Tuple[str, ...],
data_split: str,
player_min_mmr: int,
dataset_paths_fname: str,
) -> Optional[str]:
"""Gets the dataset file pattern from replay versions."""
if dataset_paths_fname is None:
raise ValueError(f'Dataset paths file is trivial : {dataset_paths_fname} .')
base_path, relative_paths = _read_config(dataset_paths_fname)
if not base_path:
raise ValueError(f'Base path ({base_path}) for data cannot be None.')
pattern_key = (replay_versions, data_split, player_min_mmr)
pattern = relative_paths.get(pattern_key, None)
if not pattern:
return None
return os.path.join(base_path, pattern)
| alphastar-main | alphastar/unplugged/data/path_utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for match_generator.py."""
from pysc2.env import sc2_env
from alphastar.modules import match_generator
from absl.testing import absltest
class MatchGeneratorTest(absltest.TestCase):
def test_error_cases(self):
with self.assertRaisesRegex(ValueError, 'must be non-empty'):
match_generator.MatchGenerator([], [sc2_env.Race.terran], ['Acropolis'])
with self.assertRaisesRegex(ValueError, 'must be non-empty'):
match_generator.MatchGenerator([sc2_env.Race.terran], None, ['Acropolis'])
with self.assertRaisesRegex(ValueError, 'must be non-empty'):
match_generator.MatchGenerator([sc2_env.Race.zerg], [sc2_env.Race.terran],
[])
def test_build_in_bot(self):
gen = match_generator.MatchGenerator([sc2_env.Race.zerg],
[sc2_env.Race.zerg], ['Acropolis'])
match = gen.generate('very_easy')
self.assertEqual(
match,
match_generator.Match(sc2_env.Race.zerg, sc2_env.Race.zerg,
'Acropolis'))
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/modules/match_generator_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches and tests the evaluator for a few steps.
The test spins up the evaluator with a v3 architecture and runs just 100 steps
(config.eval.max_num_steps = 100)
of one episode to make sure that the evaluation pipeline is mistake free.
"""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.architectures import architectures
from alphastar.loggers import eval_episode_logger
from alphastar.modules import evaluator
from alphastar.unplugged.configs import alphastar_supervised as expt_config_module
import jax
class EvaluatorTest(parameterized.TestCase):
"""Simple integration/smoke test for the ACME Evaluator."""
# Evaluator needs to be tested separately as guitar test.
# TODO(b/206426779) : Add a variant with a checkpoint trained in TP with v3.
@parameterized.parameters(
['alphastar.lite', 'EvalActor'],
['alphastar.dummy', 'EvalActor'],
['alphastar.dummy', 'ThreadedUnbatchedEvalActor'])
def test_evaluator_with_random_params(self, architecture, evaluator_type):
expt_config = expt_config_module.get_config(architecture)
expt_config.eval.max_num_steps = 100
architecture = architectures.get_architecture(expt_config.architecture.name)
architecture = functools.partial(architecture,
**expt_config.architecture.kwargs)
expt_config.eval.eval_checkpoint_dir = None
expt_config.eval.log_to_csv = False
expt_config.eval.evaluator_type = 'random_params'
expt_config.eval.evaluator_name = evaluator_type
expt_config.eval.num_threads_per_inference_device = 3
eval_actor = getattr(evaluator, expt_config.eval.evaluator_name)(
rng=jax.random.PRNGKey(42),
learner_frames_per_step=expt_config.eval
.default_learner_frames_per_step,
architecture=architecture,
episode_logger=eval_episode_logger.EvalEpisodeLogger(
log_name='eval',
log_to_csv=expt_config.eval.log_to_csv),
learner_node=None,
converter_settings=expt_config.converter_settings.eval,
competitor_name='very_easy',
total_num_episodes_per_thread=1,
**expt_config.eval)
if evaluator_type == 'ThreadedUnbatchedEvalActor':
eval_actor.run()
else:
eval_actor.run_episode(eval_actor.setup_agent())
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/modules/evaluator_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common."""
import functools
import os
from absl.testing import absltest
from acme.tf import savers as tf_savers
from alphastar.architectures import architectures
from alphastar.commons import jax_utils
from alphastar.modules import common
from alphastar.modules import optimizers
from alphastar.unplugged import losses
from alphastar.unplugged.configs import alphastar_supervised as expt_config_module
from alphastar.unplugged.data import data_source
from alphastar.unplugged.modules import learner as supervised_learner
import chex
import jax
def setUpModule():
# Disable JAX optimizations in order to speed up compilation.
jax_utils.disable_jax_optimizations()
def tearDownModule():
jax_utils.restore_jax_config()
class CommonTest(absltest.TestCase):
"""Simple tests for common.py."""
def test_checkpoint(self):
expt_config = expt_config_module.get_config('alphastar.dummy')
expt_config.train.learner_kwargs.batch_size = 4
expt_config.train.learner_kwargs.unroll_len = 3
expt_config.train.learner_kwargs.log_to_csv = False
expt_config.train.datasource.kwargs.shuffle_buffer_size = 16
expt_config.train.max_number_of_frames = 96
expt_config.architecture.name = 'alphastar.dummy'
expt_config.train.optimizer_kwargs.lr_frames_before_decay = 12
expt_config.train.datasource.name = 'DummyDataSource'
frames_per_step = int(expt_config.train.learner_kwargs.batch_size *
expt_config.train.learner_kwargs.unroll_len)
architecture = architectures.get_architecture(expt_config.architecture.name)
architecture = functools.partial(architecture,
**expt_config.architecture.kwargs)
loss = functools.partial(
getattr(losses,
expt_config.train.loss.name), **expt_config.train.loss.kwargs)
optimizer, optimizer_logs_fn = optimizers.get_optimizer(
num_frames_per_learner_update=frames_per_step,
total_num_training_frames=expt_config.train.max_number_of_frames,
**expt_config.train.optimizer_kwargs)
train_data_source = data_source.DummyDataSource(
batch_size=expt_config.train.learner_kwargs.batch_size,
unroll_len=expt_config.train.learner_kwargs.unroll_len,
converter_settings=expt_config.converter_settings.train)
learner = supervised_learner.SupervisedLearner(
data_source=train_data_source,
architecture_builder=architecture,
loss_builder=loss,
optimizer=optimizer,
optimizer_logs_fn=optimizer_logs_fn,
counter=None,
logger=None,
rng_key=jax.random.PRNGKey(42),
frames_per_step=frames_per_step,
increment_counts=False,
**expt_config.train.learner_kwargs)
checkpointer = tf_savers.Checkpointer(
{'wrapped': learner},
directory=f'{self.create_tempdir().full_path}/alphastar')
checkpointer.save(force=True)
restored_learner = supervised_learner.SupervisedLearner(
data_source=train_data_source,
architecture_builder=architecture,
loss_builder=loss,
optimizer=optimizer,
optimizer_logs_fn=optimizer_logs_fn,
counter=None,
logger=None,
rng_key=jax.random.PRNGKey(21),
frames_per_step=frames_per_step,
increment_counts=False,
**expt_config.train.learner_kwargs)
checkpoint_path = os.path.join(checkpointer.directory, 'ckpt-1')
common.restore_from_checkpoint(
restored_learner, checkpoint_path)
chex.assert_trees_all_close(restored_learner.save(), learner.save())
ckpt_gen = common.get_checkpoint_generator(checkpointer.directory)
state, _ = next(ckpt_gen)
chex.assert_trees_all_close(state, learner.save())
ckpt_gen_path = common.get_checkpoint_generator_for_path(checkpoint_path)
state, _ = next(ckpt_gen_path)
chex.assert_trees_all_close(state, learner.save())
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/modules/common_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for optimizers."""
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.modules import optimizers
import chex
import haiku as hk
import jax.numpy as jnp
class OptimizersTest(parameterized.TestCase):
"""Optimizer Tests."""
def get_params(self):
return hk.data_structures.to_haiku_dict(
dict(
module_1=dict(
layer_norm=jnp.array([1., 2.]), weight=jnp.array([3., 4.]))))
def get_gradients(self):
return hk.data_structures.to_haiku_dict(
dict(
module_1=dict(
layer_norm=jnp.array([10., 20.]), weight=jnp.array([30., 40.
]))))
@parameterized.parameters(
(optimizers.LearningRateScheduleType.STAIRCASE, True, 0.4, 1.0, 1.0,
[[-0.0642, -0.1176], [-0.1657, -0.2111]]),
(optimizers.LearningRateScheduleType.COSINE, True, 0.4, 1.0, 1.0,
[[-0.1480, -0.2710], [-0.3820, -0.4864]]),
(optimizers.LearningRateScheduleType.COSINE, False, 0.4, 1.0, 1.0,
[[-0.1849, -0.2917], [-0.3614, -0.4103]]),
(optimizers.LearningRateScheduleType.COSINE, False, 0.4, 1.0, 0.03,
[[-0.1849, -0.2917], [-0.3614, -0.4103]]),
)
def test_get_optimizer(self, lr_schedule, use_adamw, weight_decay,
before_norm, after_norm, results):
optimizer, _ = optimizers.get_optimizer(
num_frames_per_learner_update=8,
total_num_training_frames=100,
extra_weight_decay_mask_fn=None,
weight_decay_filter_out=['layer_norm'],
learning_rate=1.0,
learning_rate_schedule_type=lr_schedule,
lr_frames_before_decay=32,
lr_num_warmup_frames=16,
adam_b1=0.99,
adam_b2=0.99,
adam_eps=0.5,
use_adamw=use_adamw,
weight_decay=weight_decay,
staircase_lr_drop_factor=0.3,
before_adam_gradient_clipping_norm=before_norm,
after_adam_gradient_clipping_norm=after_norm)
state = optimizer.init(self.get_params())
for _ in range(8):
updates, state = optimizer.update(self.get_gradients(), state,
self.get_params())
chex.assert_trees_all_equal_structs(
hk.data_structures.to_haiku_dict({
'module_1': {
'layer_norm': jnp.array(results[0]),
'weight': jnp.array(results[1])
}
}), updates)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/modules/optimizers_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializes modules used in AlphaStar."""
| alphastar-main | alphastar/modules/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Types used in ACME Jaxcraft."""
from typing import Any, Callable
import acme
from acme import core
AgentNetwork = Any
LearnerFactory = Callable[..., acme.Learner]
EvaluatorFactory = Callable[..., core.Worker]
| alphastar-main | alphastar/modules/types.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Episode run loop utilities."""
from typing import Any, Dict, Optional
from alphastar import types
from alphastar.loggers import episode_logger as episode_logger_lib
from alphastar.modules import evaluator_base
import dm_env
import jax
import numpy as np
def play_episode(
env: dm_env.Environment,
agent: evaluator_base.Evaluator,
player: int,
episode_logger: episode_logger_lib.EpisodeLogger,
static_log: Dict[str, Any],
max_num_steps: Optional[int] = None):
"""Plays out an episode against an environment using the specified evaluator.
Args:
env: dm_env that will accept PySC2 actions and return PySC2 observations.
agent: An agent evaluator.
player: Index of the player in the game (0 or 1).
episode_logger: Logger for step output.
static_log: Static log output for the episode logger.
max_num_steps: Maximum number of steps befpre termination.
Returns:
Replay bytes.
Raises:
RuntimeError: if the agent output has a wrong format.
"""
timestep = env.step({})
step_counter = 0
while timestep.step_type != dm_env.StepType.LAST:
first_step = timestep.step_type == dm_env.StepType.FIRST
agent_output, logs = agent.step(timestep)
episode_logger.register_step(
player=player,
step_type=timestep.step_type,
prev_reward=timestep.reward if not first_step else 0.0,
observation=timestep.observation,
agent_output=agent_output,
log=logs,
static_log=static_log)
# Can this be done by the evaluator(s)?
if 'action' not in agent_output:
raise RuntimeError(
'The agent must provide an "action" key in its output.')
try:
timestep = env.step(jax.tree_map(np.squeeze, agent_output.get('action')))
except TypeError as e:
raise RuntimeError(
f'Action issue in stepping : {agent_output["action"]}') from e
step_counter += 1
if max_num_steps and step_counter >= max_num_steps:
break
# Register the final step.
episode_logger.register_step(
player=player,
step_type=timestep.step_type,
prev_reward=timestep.reward,
observation=types.StreamDict(timestep.observation),
agent_output=None,
log=None,
static_log=static_log)
| alphastar-main | alphastar/modules/run_loop.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizers for the learner."""
import enum
import functools
from typing import Any, Callable, List, Optional, Tuple
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
class LearningRateScheduleType(str, enum.Enum):
STAIRCASE = 'staircase'
COSINE = 'cosine'
class WeightDecayFilterType(str, enum.Enum):
LAYER_NORM = 'layer_norm'
BIAS = 'bias'
WeightDecayFilter = Callable[[optax.Params], Any]
# Schedules:
def get_staircase_schedule(
num_updates_before_decay: int,
drop_factor: float,
num_decays_cutoff: int = 9) -> optax.Schedule:
"""Gets a staircase style schedule for learning rate."""
if num_updates_before_decay * num_decays_cutoff > np.iinfo(np.int32).max:
raise ValueError('num_updates_before_decay is too large to fit into int32. '
'Decrease it or increase batch_size and/or unroll_len.')
return optax.piecewise_constant_schedule(
init_value=1.0,
boundaries_and_scales={
k * num_updates_before_decay: drop_factor
for k in range(1, num_decays_cutoff+1)})
def get_cosine_schedule(
num_updates_before_decay: int,
total_num_training_updates: int) -> optax.Schedule:
"""Gets a cosine decay schedule for learning rate."""
num_decayed_updates = total_num_training_updates - num_updates_before_decay
def schedule(count):
offset_count = jnp.maximum(count - num_updates_before_decay, 0)
return optax.cosine_decay_schedule(
init_value=1, decay_steps=num_decayed_updates)(offset_count)
return schedule
def add_warmup_to_schedule(
num_warmup_updates: int,
wrapped_schedule: optax.Schedule) -> optax.Schedule:
"""Wrapper module to add warmup to any schedule."""
def schedule(count):
factor = jnp.minimum(count / num_warmup_updates, 1.)
return wrapped_schedule(count) * factor
return schedule
# Weight decay:
def layer_norm_weight_filter(params):
def f(module_name, name, value):
del name, value
return 'layer_norm' not in module_name
return hk.data_structures.map(f, params)
def bias_weight_filter(params):
def f(module_name, name, value):
del module_name, value
return name != 'b'
return hk.data_structures.map(f, params)
weight_decay_filters = {
WeightDecayFilterType.LAYER_NORM: layer_norm_weight_filter,
WeightDecayFilterType.BIAS: bias_weight_filter,
}
def _logging_fn(opt_state, lr_schedule, scale_index: int, learning_rate: float):
"""A logging function that extracts logs fom optimizer state.
Args:
opt_state: Optimizer state.
lr_schedule: Learning rate schedule function.
scale_index: Index of learning rate scale in optimizer state.
learning_rate: Learning Rate fed into optimizer.
Returns:
Logs from the optimizer state.
"""
log = {}
learning_rate_scale = jnp.squeeze(lr_schedule(opt_state[scale_index].count))
log['learning_rate_scale'] = learning_rate_scale
log['learning_rate'] = learning_rate * learning_rate_scale
return log
def get_optimizer(
num_frames_per_learner_update: int,
extra_weight_decay_mask_fn: Optional[WeightDecayFilter],
total_num_training_frames: int,
weight_decay_filter_out: List[str],
learning_rate: float,
learning_rate_schedule_type: LearningRateScheduleType,
lr_frames_before_decay: float,
lr_num_warmup_frames: float,
adam_b1: float,
adam_b2: float,
adam_eps: float,
use_adamw: bool,
weight_decay: float,
staircase_lr_drop_factor: float,
before_adam_gradient_clipping_norm: Optional[float] = None,
after_adam_gradient_clipping_norm: Optional[float] = None,
) -> Tuple[optax.GradientTransformation, Any]:
"""Build the optimizer from the flags.
Args:
num_frames_per_learner_update: The number of frames processed per learner
update.
extra_weight_decay_mask_fn: A function which takes params and returns a tree
with boolean leaves stating whether we should apply weight decay to this
weight vector.
total_num_training_frames : Total number of training frames
weight_decay_filter_out: Specify which parameters are ignored by weight
decay. Must be part of
{"|".join(list(optimizers.WeightDecayFilterType))}].'
learning_rate: Initial learning rate
learning_rate_schedule_type: Type of learning rate schedule.
lr_frames_before_decay: Number of training frames before the learning rate
starts being reduced
lr_num_warmup_frames: Number of steps for learning rate warmup
adam_b1: Adam b1 parameter
adam_b2: Adam b2 parameter
adam_eps: Adam epsilon parameter
use_adamw: Whether to use AdamW. If not, weight decay is applied before
Adam
weight_decay: Co-efficient of weight decay,
staircase_lr_drop_factor: Multiply the learning rate by this when decaying
before_adam_gradient_clipping_norm: Global gradient norm for clipping
before Adam.
after_adam_gradient_clipping_norm: Global gradient norm for clipping after
Adam
Returns:
A tuple containing the optax optimizer and a logging function
which take the optimizer state as an input and return dict to log.
"""
optimizers = []
if weight_decay:
mask_fns = [weight_decay_filters[x] for x in weight_decay_filter_out]
if extra_weight_decay_mask_fn:
mask_fns.append(extra_weight_decay_mask_fn)
def mask_fn(params):
all_masks = [f(params) for f in mask_fns]
if all_masks:
output = jax.tree_map(lambda *masks: all(masks), *all_masks)
else:
output = jax.tree_map(lambda _: True, params)
logging.info('Using weight decay filter:\n%s', output)
return output
weight_decay = optax.masked(
inner=optax.additive_weight_decay(weight_decay),
mask=mask_fn)
else:
weight_decay = None
if weight_decay and not use_adamw:
optimizers.append(weight_decay)
if before_adam_gradient_clipping_norm:
optimizers.append(optax.clip_by_global_norm(
before_adam_gradient_clipping_norm))
optimizers.append(optax.scale_by_adam(
b1=adam_b1, b2=adam_b2, eps=adam_eps))
if weight_decay and use_adamw:
optimizers.append(weight_decay)
if after_adam_gradient_clipping_norm:
optimizers.append(optax.clip_by_global_norm(
after_adam_gradient_clipping_norm))
num_updates_before_decay = int(
lr_frames_before_decay / num_frames_per_learner_update)
total_num_training_updates = int(
total_num_training_frames / num_frames_per_learner_update)
if learning_rate_schedule_type == LearningRateScheduleType.STAIRCASE:
lr_schedule = get_staircase_schedule(
num_updates_before_decay=num_updates_before_decay,
drop_factor=staircase_lr_drop_factor)
elif learning_rate_schedule_type == LearningRateScheduleType.COSINE:
lr_schedule = get_cosine_schedule(
num_updates_before_decay=num_updates_before_decay,
total_num_training_updates=total_num_training_updates)
else:
raise ValueError(f'Unknown schedule {learning_rate_schedule_type}.')
if lr_num_warmup_frames:
num_warmup_updates = int(
lr_num_warmup_frames / num_frames_per_learner_update)
lr_schedule = add_warmup_to_schedule(num_warmup_updates, lr_schedule)
logging_fn = functools.partial(
_logging_fn, lr_schedule=lr_schedule, scale_index=len(optimizers),
learning_rate=learning_rate)
optimizers.append(optax.scale_by_schedule(lr_schedule))
optimizers.append(optax.scale(-learning_rate))
return optax.chain(*optimizers), logging_fn
| alphastar-main | alphastar/modules/optimizers.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""Tests for agent."""
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.architectures import architectures
from alphastar.commons import jax_utils
from alphastar.modules import agent
from alphastar.unplugged.data import util as data_util
from dm_env import specs
import jax.numpy as jnp
def setUpModule():
# Disable JAX optimizations in order to speed up compilation.
jax_utils.disable_jax_optimizations()
def tearDownModule():
jax_utils.restore_jax_config()
def get_test_specs():
max_num_selected_units = 4
obs_spec = {
'away_race_observed': specs.Array((), jnp.int32),
'away_race_requested': specs.Array((), jnp.int32),
'camera': specs.Array((256, 256), jnp.int32),
'camera_position': specs.Array((2,), jnp.int32),
'camera_size': specs.Array((2,), jnp.int32),
'game_loop': specs.Array((), jnp.int32),
'home_race_requested': specs.Array((), jnp.int32),
'minimap_alerts': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=5),
'minimap_buildable': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=1),
'minimap_creep': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=2),
'minimap_height_map': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=255),
'minimap_pathable': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=1),
'minimap_player_relative': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=3),
'minimap_visibility_map': specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=2),
'mmr': specs.Array((), jnp.int32),
'player': specs.Array((7,), jnp.int32),
'raw_units': specs.Array((10, 47), jnp.int32),
'unit_counts_bow': specs.Array((5,), jnp.int32),
'upgrades_fixed_length': specs.BoundedArray(
(8,), jnp.int32, minimum=0, maximum=13)}
action_spec = {
'function': specs.BoundedArray((), jnp.int32, minimum=0, maximum=10),
'delay': specs.BoundedArray((), jnp.int32, minimum=0, maximum=6),
'queued': specs.BoundedArray((), jnp.int32, minimum=0, maximum=2),
'repeat': specs.BoundedArray((), jnp.int32, minimum=0, maximum=4),
'unit_tags': specs.BoundedArray(
(max_num_selected_units,), jnp.int32, minimum=0, maximum=10),
'target_unit_tag': specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=9),
'world': specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=256**2 - 1)}
return obs_spec, action_spec
class EvaluatorTest(parameterized.TestCase):
@parameterized.parameters(('alphastar.dummy', False),
('alphastar.dummy', True),
('alphastar.lite', False),
('alphastar.lite', True))
def test_no_recompilation(self, architecture: str, is_training: bool):
builder = architectures.get_architecture(architecture)
obs_spec, action_spec = get_test_specs()
behaviour_features_spec = {'action': action_spec} if is_training else None
input_spec = data_util.get_input_spec(
obs_spec=obs_spec, behaviour_features_spec=behaviour_features_spec)
component = builder(input_spec, action_spec, is_training, 0, 0)
alphastar_agent = agent.AlphaStarAgent(component)
alphastar_agent.warmup()
with jax_utils.no_jax_compilation_allowed():
alphastar_agent.warmup()
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/modules/agent_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common modules used in training and evaluation."""
# pylint: disable=logging-fstring-interpolation
import re
import threading
import time
from typing import Any, Callable, Iterator, List, Mapping, Optional, Sequence
from typing import Tuple
from absl import logging
from acme import core
from acme.tf import savers as tf_savers
from acme.utils import loggers as acme_loggers
from alphastar.collections import jax as jax_collections
import chex
import ml_collections
import pandas as pd
import tensorflow as tf
jax_collections.register_struct()
class FramesLimitedCheckpointingRunner(tf_savers.CheckpointingRunner):
"""Extension of CheckpointingRunner terminating on number of frames."""
def __init__(self, num_frames_per_step: int, max_number_of_frames: int,
**kwargs):
super().__init__(**kwargs)
self._max_number_of_frames = max_number_of_frames
self._num_frames_per_step = num_frames_per_step
def run(self):
state = self.save()
while state.step * self._num_frames_per_step < self._max_number_of_frames:
self.step()
state = self.save()
class LockedIterator(object):
"""Wrapper around an iterator to guarantee thread-safety."""
def __init__(self, iterator):
self._lock = threading.Lock()
self._iterator = iter(iterator)
def __iter__(self):
return self
def __next__(self):
with self._lock:
return next(self._iterator)
class MockSaveableLearner(core.Saveable):
def __init__(self, state: Optional[chex.Array] = None):
self._state = state
def save(self):
return self._state
def restore(self, state):
self._state = state
def get_standard_loggers(
label: str,
log_to_csv: bool = True,
print_fn: Optional[Callable[[str], None]] = None,
) -> List[acme_loggers.Logger]:
"""Makes default Acme logger.
This is a logger that will write to logs, the terminal, and to bigtable if
running a deepmind job under xmanager.
This implementation is similar to `acme.utils.loggers.make_default_logger()`
Args:
label: Name to give to the logger.
log_to_csv : Where to log expt data to CSV.local:
print_fn: How to print to terminal (defaults to absl.logging).
Returns:
A list of standard acme logger objects.
"""
if not print_fn:
print_fn = print
terminal_logger = acme_loggers.TerminalLogger(label, print_fn=print_fn)
loggers = [terminal_logger]
if log_to_csv:
logging.info('logging CSV files under (%s)', label)
loggers.append(acme_loggers.CSVLogger(label=label))
return loggers
def aggregate_and_filter_logs(
loggers: List[acme_loggers.Logger],
asynchronous: bool,
time_delta: float,
serialize_fn: Optional[Callable[[Mapping[str, Any]],
str]] = acme_loggers.to_numpy
) -> acme_loggers.Logger:
"""Aggregates and filters logs.
Args:
loggers: A list of acme logger objects
asynchronous: Whether the write function should block or not.
time_delta: Time (in seconds) between logging events.
serialize_fn: An optional function to apply to the write inputs before
passing them to the various loggers.
Returns:
An ACME logger object.
"""
# Dispatch to all writers and filter Nones and by time.
logger = acme_loggers.aggregators.Dispatcher(loggers, serialize_fn)
logger = acme_loggers.NoneFilter(logger)
if asynchronous:
logger = acme_loggers.AsyncLogger(logger)
logger = acme_loggers.TimeFilter(logger, time_delta)
return logger
def make_default_logger(
label: str,
log_to_csv: bool = True,
time_delta: float = 1.0,
asynchronous: bool = False,
print_fn: Optional[Callable[[str], None]] = None,
serialize_fn: Optional[Callable[[Mapping[str, Any]],
str]] = acme_loggers.to_numpy,
) -> acme_loggers.Logger:
"""Make a default Acme logger.
This is a logger that will write to logs, the terminal, and to bigtable if
running a deepmind job under xmanager.
This implementation is similar to `acme.utils.loggers.make_default_logger()`
Args:
label: Name to give to the logger.
log_to_csv : Where to log expt data to CSV.
time_delta: Time (in seconds) between logging events.
asynchronous: Whether the write function should block or not.
print_fn: How to print to terminal (defaults to absl.logging).
serialize_fn: An optional function to apply to the write inputs before
passing them to the various loggers.
Returns:
A logger object that responds to logger.write(some_dict).
"""
loggers = get_standard_loggers(
label=label, log_to_csv=log_to_csv, print_fn=print_fn)
logger = aggregate_and_filter_logs(
loggers=loggers, asynchronous=asynchronous, time_delta=time_delta,
serialize_fn=serialize_fn)
return logger
def restore_from_checkpoint(wrapped: core.Saveable,
checkpoint_to_restore: Optional[str] = None,
fields_to_restore: Optional[Sequence[str]] = None):
"""Restore specified fields for the state from a checkpoint."""
# This will output the learner's state.
if isinstance(fields_to_restore, Sequence) and not fields_to_restore:
return wrapped
if checkpoint_to_restore:
wrapped_object_state = wrapped.save()
checkpointable_wrapped = tf_savers.SaveableAdapter(wrapped)
objects_to_save = {'wrapped': checkpointable_wrapped}
ckpt = tf.train.Checkpoint(**objects_to_save)
logging.info('Restoring from checkpoint %s', checkpoint_to_restore)
ckpt.restore(checkpoint_to_restore)
if fields_to_restore is not None and wrapped_object_state is not None:
wrapped_object_state_from_ckpt = wrapped.save()
# Replace only those fields of the state from checkpoint which you need.
modified_fields = {
field: getattr(wrapped_object_state_from_ckpt, field)
for field in fields_to_restore
}
wrapped_object_state = wrapped_object_state._replace(**modified_fields)
wrapped.restore(wrapped_object_state)
return wrapped
def _get_ckpt_index(ckpt_path_str: str) -> int:
if ckpt_path_str is None:
return 0
else:
# checkpoints are of the form {dir}/ckpt-{index}
return int(ckpt_path_str.split('/')[-1].split('-')[-1])
def get_checkpoint_generator(
checkpoint_dir: str,) -> Iterator[Tuple[Any, int]]:
"""Generator that returns the latest checkpoint, blocks if there are none."""
# ACME uses TF Checkpoint Manager and hence we need to use the same here to
# obtain checkpoints and do the necessary indexing.
directory, subdirectory = re.split(r'/checkpoints/', checkpoint_dir)
logging.log_first_n(
logging.INFO, f'Searching for checkpoints in directory : {directory} '
f'and subdirectory: {subdirectory}', 1)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
last_index = _get_ckpt_index(latest_checkpoint)
while last_index == 0:
logging.info('Waiting for a checkpoint in %s', checkpoint_dir)
time.sleep(10)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
last_index = _get_ckpt_index(latest_checkpoint)
cached_index, cached_state = 0, None
num_ckpt_restore_attempts = 0
saveable_learner = MockSaveableLearner()
while True:
if num_ckpt_restore_attempts > 10:
raise RuntimeError('Tried restoring checkpoint 10 times. Failing.')
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
last_index = _get_ckpt_index(latest_checkpoint)
if last_index > cached_index:
logging.info(f'Found new checkpoint {last_index} in '
f'{checkpoint_dir}, reloading')
try:
restore_from_checkpoint(saveable_learner, latest_checkpoint)
logging.info(f'Restored checkpoint {latest_checkpoint} successfully.')
cached_state = saveable_learner._state # pylint: disable=protected-access
cached_index = last_index
num_ckpt_restore_attempts = 0
except Exception as e: # pylint: disable=broad-except
num_ckpt_restore_attempts += 1
logging.warning(f'Caught exception while loading checkpoint : {e}. '
'Sleeping for 10 seconds and retrying.')
time.sleep(10)
else:
time.sleep(10)
if cached_state is None:
raise RuntimeError(f'State from {latest_checkpoint} cannot be None.')
yield cached_state, cached_index
def get_checkpoint_generator_for_path(
checkpoint_path: str,) -> Iterator[Tuple[Any, int]]:
"""Given a full checkpoint path, generates cached state and index."""
saveable_learner = MockSaveableLearner()
cached_state = None
# ACME Checkpoint is of the form dir/subdir/ckpt-<index>
_, cached_index = re.split(r'/ckpt-', checkpoint_path)
while True:
if cached_state is None:
restore_from_checkpoint(saveable_learner, checkpoint_path)
logging.info(f'Restored checkpoint {checkpoint_path} successfully.')
cached_state = saveable_learner._state # pylint: disable=protected-access
if cached_state is None:
raise RuntimeError(f'State from {checkpoint_path} cannot be None.')
yield cached_state, int(cached_index)
def flatten_metrics(metrics_nest):
return pd.json_normalize(metrics_nest, sep='_').to_dict(orient='records')[0]
def validate_config(config: ml_collections.ConfigDict,
launch_args: Sequence[str]):
"""Validates a config."""
args_as_dict = dict(
[arg.split('=', maxsplit=1) for arg in launch_args if '=' in arg])
arch_str = args_as_dict['--config'].split(':')[-1]
if arch_str != config.architecture.name:
raise ValueError(f'Architecture string in config tag [{arch_str}] and '
f'config.architecture.name [{config.architecture.name}] '
'need to be consistent.')
| alphastar-main | alphastar/modules/common.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator abstraction, for performing inference."""
import abc
import enum
from typing import Mapping, Tuple
from alphastar.commons import log_utils
import chex
import dm_env
class EvaluatorType(enum.IntEnum):
"""Defines different types of evaluation."""
CHECKPOINT = 0
RANDOM_PARAMS = 1
class Evaluator(abc.ABC):
"""Evaluator abstraction."""
@abc.abstractmethod
def reset(self) -> Mapping[str, chex.Array]:
"""Resets the evaluator in preparation for a new episode.
Returns:
Dict of data reflecting the current state of the evaluator.
"""
@abc.abstractmethod
def step(
self, timestep: dm_env.TimeStep
) -> Tuple[chex.ArrayTree, log_utils.Log]:
"""Steps the evaluator.
Args:
timestep: The latest environment step.
Returns:
(agent output (must have an `action` attribute), output_logs).
"""
| alphastar-main | alphastar/modules/evaluator_base.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The AlphaStar agent interface."""
import abc
import functools
from typing import Any, Optional
from absl import logging
from alphastar import types
from alphastar.architectures import modular
from alphastar.unplugged.data import util as data_util
import haiku as hk
import jax
class Agent(abc.ABC):
"""An agent interface used for AlphaStar."""
@abc.abstractmethod
def initial_state(self, rng_key, batch_size: int):
"""Initial internal state of the agent."""
@abc.abstractmethod
def init(self, rng_key, inputs, prev_state):
"""Get the initial parameters."""
@abc.abstractmethod
def apply(self, params, rng_key, inputs, prev_state):
"""Forward pass."""
@property
@abc.abstractmethod
def input_spec(self):
"""The spec of the input of the agent."""
@property
@abc.abstractmethod
def state_spec(self):
"""The spec of the prev_state and next_state of the agent."""
@property
@abc.abstractmethod
def output_spec(self):
"""The spec of the output of the agent."""
@abc.abstractmethod
def warmup(self, obs: types.StreamDict) -> hk.Params:
"""Warmup the agent haiku modules."""
class AlphaStarAgent(Agent):
"""AlphaStar agent."""
def __init__(
self,
component: modular.Component,
jit_agent_functions: bool = True,
jit_device: Optional[Any] = None,
jit_backend: Optional[str] = None):
"""Initializes an AlphaStar agent.
Args:
component : Stateful architecture of the agent as a component.
jit_agent_functions : Whether to JIT compile agent reset and step
functions. Usually, do not set it to False as this may trigger
recompilation of some modules such as hk.scan. If you want un-jitted
agent functions, set it to False and turn off the recompilation check
simultaneously.
jit_device: Device if for JIT.
jit_backend: Backend used for JIT (cou, tpu etc.)
"""
self._component = component
self._component.prev_state_spec.validate(
self._component.next_state_spec,
error_prefix='Agent next_state must contain prev state')
if jit_agent_functions:
self._agent_function_wrapper = functools.partial(
jax.jit, device=jit_device, backend=jit_backend)
else:
self._agent_function_wrapper = lambda fun: fun
@hk.transform
def initial_state_fun(batch_size: int) -> types.StreamDict:
return jax.vmap(component.initial_state, axis_size=batch_size)()
self._initial_state_fun = initial_state_fun
@hk.transform
def unroll_fun(inputs: types.StreamDict,
prev_state: types.StreamDict) -> modular.UnrollOutputType:
return jax.vmap(component.unroll)(inputs, prev_state)
self._unroll_fun = unroll_fun
def initial_state(self,
rng_key: jax.random.KeyArray,
batch_size: int) -> types.StreamDict:
"""Sets initial state for the agent."""
init_params = self._initial_state_fun.init(rng_key, batch_size)
return self._initial_state_fun.apply(init_params, rng_key, batch_size)
def init(self,
rng_key: jax.random.KeyArray,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> hk.Params:
"""Returns the initial parameters for the agent."""
inputs = inputs.filter(self.input_spec)
unroll_init = self._agent_function_wrapper(self._unroll_fun.init)
return unroll_init(rng_key, inputs, prev_state)
def apply(self,
params: hk.Params,
rng_key: jax.random.KeyArray,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> modular.UnrollOutputType:
"""Performs forward step of an agent."""
inputs = inputs.filter(self.input_spec)
unroll_apply = self._agent_function_wrapper(self._unroll_fun.apply)
return unroll_apply(params, rng_key, inputs, prev_state)
@property
def input_spec(self) -> types.SpecDict:
return self._component.input_spec
@property
def state_spec(self) -> types.SpecDict:
return self._component.prev_state_spec
@property
def output_spec(self) -> types.SpecDict:
return self._component.output_spec
def warmup(self,
batch_size: int = 1,
unroll_len: int = 1) -> hk.Params:
"""Warms up an agent by doing a forward pass."""
obs = data_util.get_dummy_observation(
self.input_spec, batch_size=batch_size, unroll_len=unroll_len)
rng = jax.random.PRNGKey(0)
initial_state_key, params_key, key = jax.random.split(rng, 3)
state = self.initial_state(initial_state_key, batch_size=batch_size)
logging.info('Warming up the agent.')
logging.info('Inputs: %s', jax.tree_map(lambda x: (x.shape, x.dtype), obs))
logging.info('State: %s', jax.tree_map(lambda x: (x.shape, x.dtype), state))
params = self.init(params_key, obs, state)
self.apply(params, key, obs, state)
return params
| alphastar-main | alphastar/modules/agent.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates match setups, given race and map constraints and an opponent."""
import collections
import random
from typing import Sequence
from absl import logging
from alphastar.commons import competitors
from pysc2.env import sc2_env
Match = collections.namedtuple('Match', ['home_race', 'away_race', 'map_name'])
class MatchGenerator:
"""Generates match setups, given race and map constraints and an opponent."""
def __init__(self, home_races: Sequence[sc2_env.Race],
away_races: Sequence[sc2_env.Race], map_names: Sequence[str]):
"""Initializer.
Args:
home_races: Races which the home (first) player may select.
away_races: Races which the away (second) player may select.
map_names: Maps which may be selected from.
"""
if not home_races:
raise ValueError('home_races must be non-empty')
if not away_races:
raise ValueError('away_races must be non-empty')
if not map_names:
raise ValueError('map_names must be non-empty')
self._home_races = home_races
self._away_races = away_races
self._map_names = map_names
def generate(self, opponent_name: str) -> Match:
"""Returns a random Match, taking into account opponent constraints.
Args:
opponent_name: Built-in bot difficulty name, else a competitor name.
"""
if competitors.is_built_in_bot(opponent_name):
home_races_available = self._home_races
away_races_available = self._away_races
map_names_available = self._map_names
else:
raise ValueError('Only games against built-in bots are supported at '
'the moment.')
home_race = sc2_env.Race(random.choice(home_races_available))
away_race = sc2_env.Race(random.choice(away_races_available))
map_name = random.choice(map_names_available)
logging.info('Match setup: Agent as %s, %s as %s - on %s.', home_race.name,
opponent_name, away_race.name, map_name)
return Match(home_race, away_race, map_name)
| alphastar-main | alphastar/modules/match_generator.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actors used for evaluation."""
# pylint: disable=logging-fstring-interpolation
import concurrent
import contextlib
import sys
import traceback
from typing import Any, Iterator, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
import acme
from acme.jax import savers
from alphastar import types
from alphastar.architectures import modular
from alphastar.commons import competitors
from alphastar.commons import jax_utils
from alphastar.commons import log_utils
from alphastar.loggers import episode_logger as episode_logger_lib
from alphastar.modules import agent as agent_lib
from alphastar.modules import common as acme_common
from alphastar.modules import evaluator_base
from alphastar.modules import match_generator
from alphastar.modules import run_loop
from alphastar.unplugged.data import util as data_utils
from alphastar.unplugged.modules import learner # pylint: disable=unused-import
import chex
import dm_env
import haiku as hk
import jax
import numpy as np
from pysc2.env import converted_env
from pysc2.env import sc2_env
from pysc2.env.converter import derive_interface_options
from pysc2.env.converter.proto import converter_pb2
from pysc2.maps import ladder as sc2_ladder
from pysc2.maps import lib as sc2_map_lib
# Needed to load learner checkpoints:
EvaluatorType = evaluator_base.EvaluatorType
# TODO(b/208422091): Cleanup.
if 'Automaton_v2' not in sc2_map_lib.get_maps():
# Inject custom battle net maps into pysc2.
custom_ladder_maps = [
('Automaton_v2', 'Ladder2019Season1May', 'AutomatonLE', 2),
('CyberForest_v2', 'Ladder2019Season1May', 'CyberForestLE', 2),
('PortAleksander_v2', 'Ladder2019Season1May', 'PortAleksanderLE', 2),
('KairosJunction_v2', 'Ladder2019Season1May', 'KairosJunctionLE', 2),
('KingsCove_v2', 'Ladder2019Season1May', 'KingsCoveLE', 2),
('NewRepugnancy_v2', 'Ladder2019Season1May', 'NewRepugnancyLE', 2),
('YearZero_v2', 'Ladder2019Season1May', 'YearZeroLE', 2),
]
# PySC2 finds maps by looking at subclasses of its map class.
# We inject the new maps into the global namespace of this file so that a
# reference is kept to them.
for name_, directory_, map_file_, players_ in custom_ladder_maps:
globals()[name_] = type(
name_, (sc2_ladder.Ladder,),
dict(filename=map_file_, directory=directory_, players=players_))
def _tree_has_nan(x):
return any(jax.tree_leaves(jax.tree_map(lambda y: np.isnan(np.sum(y)), x)))
class StepEvaluationMixin(object):
"""A mixin that steps an agent with a model and a params getter."""
def __init__(
self,
agent: agent_lib.AlphaStarAgent,
rng: chex.PRNGKey,
output_features: Sequence[str] = ('action',),
warmup_agent: bool = True,
# Try not to set this to False as compilation is costly.
prohibit_recompilation: bool = True
):
"""Initializes a one-step evaluation mixin.
Args:
agent : An Alphastar agent object.
rng : A jax random number generator.
output_features : Sequence of output features for the agent.
warmup_agent: Boolean to decide if an agent needs a warmup step.
prohibit_recompilation : Boolean to decide whether to prohibit any
recompilation of the model.
"""
self._agent = agent
self._rng = rng
self._expand_fn = lambda x: np.expand_dims(x, axis=(0, 1))
self._output_features = output_features
self._state = None
self._warmup_params = None
self._prohibit_recompilation = prohibit_recompilation
if warmup_agent:
self._warmup_params = agent.warmup()
def set_warmup_params(self, params):
self._warmup_params = params
def _step(
self, timestep: dm_env.TimeStep,
agent_params: hk.Params,
) -> Tuple[chex.ArrayTree, log_utils.Log]:
"""Step through the environment for one step."""
agent_obs = types.StreamDict()
agent_obs['step_type'] = np.array([[timestep.step_type]], dtype=np.int32)
agent_obs['observation'] = types.StreamDict(
jax.tree_map(self._expand_fn, timestep.observation))
if timestep.step_type == dm_env.StepType.FIRST:
self._rng, rng = jax.random.split(self._rng)
self._state = self._agent.initial_state(rng, batch_size=1)
self._rng, rng = jax.random.split(self._rng)
# Get only online params for inference.
online_params = hk.data_structures.filter(
lambda module, name, value: 'target_network' not in module,
agent_params)
chex.assert_trees_all_equal_shapes(online_params, self._warmup_params)
with jax_utils.no_jax_compilation_allowed(
) if self._prohibit_recompilation else contextlib.suppress():
output, self._state, logs = self._agent.apply(online_params, rng,
agent_obs, self._state)
for k, v in output.items():
if _tree_has_nan(v):
logging.info('Output[%s]: %s', k, v)
raise ValueError(f'Architecture output {k} has NaNs.')
filtered_output = output.filter(self._output_features)
squeeze_batch_and_time = lambda x: np.squeeze(x, axis=(0, 1))
filtered_output = jax.tree_map(squeeze_batch_and_time, filtered_output)
return filtered_output, logs
class CheckpointEvaluator(evaluator_base.Evaluator, StepEvaluationMixin):
"""Performs inference step based on a model checkpoint."""
def __init__(
self,
agent: agent_lib.AlphaStarAgent,
checkpoint_generator: Iterator[Tuple[Any, int]],
learner_frames_per_step: int,
rng: Optional[chex.PRNGKey] = None,
warmup_agent: bool = True,
prohibit_recompilation: bool = True,
output_features: Sequence[str] = ('action',),
):
"""Initializes an evaluator that evaluates a checkpoint.
Args:
agent : An Alphastar agent object.
checkpoint_generator : An iterator for checkpoint state and checkpoint
index.
learner_frames_per_step : Number of frames per step of training used in
the learner.
rng : A jax random number generator.
warmup_agent: Boolean to decide if an agent needs a warmup step.
prohibit_recompilation : Boolean to decide whether to prohibit any
recompilation of the model.
output_features : Sequence of output features for the agent.
"""
super().__init__(
agent=agent,
rng=rng,
warmup_agent=warmup_agent,
prohibit_recompilation=prohibit_recompilation,
output_features=output_features)
self._checkpoint_generator = checkpoint_generator
self._checkpoint_state = None
self._checkpoint_index = None
self._learner_frames_per_step = learner_frames_per_step
def reset(self) -> Mapping[str, chex.Array]:
self._checkpoint_state, checkpoint_index = next(self._checkpoint_generator)
if _tree_has_nan(self._checkpoint_state.params):
raise ValueError(
f'NaN found in checkpoint parameters (index={checkpoint_index}).')
return dict(
checkpoint_index=checkpoint_index,
learner_step=int(self._checkpoint_state.step),
home_steps=int(self._checkpoint_state.step) *
self._learner_frames_per_step)
def _get_params(self):
if self._checkpoint_state is None:
raise RuntimeError('Params retrieval called before checkpoint is ready.')
return self._checkpoint_state.params
def step(
self, timestep: dm_env.TimeStep
) -> Tuple[chex.ArrayTree, log_utils.Log]:
return self._step(timestep, self._get_params())
class RandomParamsEvaluator(evaluator_base.Evaluator, StepEvaluationMixin):
"""Performs inference step based on randomly initialized model params."""
def __init__(
self,
agent: agent_lib.AlphaStarAgent,
learner_frames_per_step: int,
rng: Optional[chex.PRNGKey] = None,
warmup_agent: bool = True,
prohibit_recompilation: bool = True,
output_features: Sequence[str] = ('action',),
):
"""Initializes an evaluator that uses random params.
Args:
agent : An Alphastar agent object.
learner_frames_per_step : Number of frames per step of training used in
the learner.
rng : A jax random number generator.
warmup_agent: Boolean to decide if an agent needs a warmup step.
prohibit_recompilation : Boolean to decide whether to prohibit any
recompilation of the model.
output_features : Sequence of output features for the agent.
"""
super().__init__(
agent=agent,
rng=rng,
warmup_agent=warmup_agent,
prohibit_recompilation=prohibit_recompilation,
output_features=output_features)
self._learner_frames_per_step = learner_frames_per_step
def reset(self) -> Mapping[str, chex.Array]:
return dict(checkpoint_index=-1, learner_step=-1, home_steps=-1)
def _get_params(self):
if self._warmup_params is None:
raise RuntimeError('Params retrieval called before warmup')
return self._warmup_params
def step(
self, timestep: dm_env.TimeStep
) -> Tuple[chex.ArrayTree, log_utils.Log]:
return self._step(timestep, self._get_params())
def _make_environment_factory(game_steps_per_episode: int):
"""Returns a factory for agent vs built-in bot environments."""
def _environment_factory(
my_race: sc2_env.Race,
my_converter_settings: converter_pb2.ConverterSettings, opponent: str,
opponent_race: sc2_env.Race, map_name: str):
return converted_env.make_streams(
converted_env.ConvertedEnvironment(
env=sc2_env.SC2Env(
map_name=map_name,
players=[
sc2_env.Agent(my_race),
sc2_env.Bot(opponent_race,
competitors.difficulty_string_to_enum(opponent))
],
game_steps_per_episode=game_steps_per_episode,
agent_interface_format=derive_interface_options.from_settings(
my_converter_settings)),
converter_factories=converted_env.make_converter_factories(
[my_converter_settings])))[0]
return _environment_factory
class EvalActor(acme.Worker):
"""Evaluation actor used for evaluating a given agent."""
def __init__(
self,
home_races: Sequence[sc2_env.Race],
away_races: Sequence[sc2_env.Race],
map_names: Sequence[str],
competitor_name: str,
converter_settings: converter_pb2.ConverterSettings,
architecture: modular.ArchitectureBuilder,
episode_logger: episode_logger_lib.EpisodeLogger,
learner_frames_per_step: int,
learner_node: Optional[savers.CheckpointingRunner] = None,
eval_checkpoint_dir: Optional[str] = None,
eval_checkpoint_path: Optional[str] = None,
agent_output_features: Sequence[str] = ('action',),
game_steps_per_episode: Optional[int] = None,
rng: Optional[chex.PRNGKey] = None,
rng_seed: Optional[int] = None,
max_num_steps: Optional[int] = None,
evaluator_type: Union[str, EvaluatorType] = EvaluatorType.CHECKPOINT,
warmup_agent: bool = True,
prohibit_recompilation: bool = True,
**unused_kwargs):
"""Initializes the evaluation actor.
Args:
home_races : Sequence of SC2 races that can be used for our agent to be
selected randomly.
away_races : Sequence of SC2 races that can be used for the opponent to
be selected randomly.
map_names : Sequence of SC2 maps that are used randomly for the game.
competitor_name : Name of the competitor -- for example, very_easy
converter_settings : Settings used for the converter that transforms the
observations and actions.
architecture : Architecture used in the agent.
episode_logger : Logger used to log episode stats and other evaluation
metrics.
learner_frames_per_step : Number of learner frames that are used per step
of training in the learner.
learner_node : Learner object(node) that can be used to query the
checkpoint directory to evaluate from.
eval_checkpoint_dir : Directory to evaluate checkpoints from. The most
recent checkpoint is evaluated if this field is set and it supersedes
the checkpoint directory obtained from the learner node.
eval_checkpoint_path : Checkpoint path to evaluate. This supersedes all
other checkpoint related path information.
agent_output_features : Sequence of features that the agent needs to
output during evaluation.
game_steps_per_episode : Number of games steps per episode set as part
of initializing the environment.
rng: A jax Random number generator key.
rng_seed : Seed used to create the random number generator. This is used
incase a rng is not passed directly.
max_num_steps: Number of steps per episode to run the evaluation for.
evaluator_type: Type of the evaluator used. Currently supports checkpoint
evaluator and random params evaluator.
warmup_agent : Boolean to decide if we need to warm-up the agent.
prohibit_recompilation : Boolean to decide if we need to prohibit any
recompilation of the model graph.
"""
if isinstance(evaluator_type, str):
evaluator_type = getattr(EvaluatorType, evaluator_type.upper())
self._opponent_name = competitor_name
self._match_generator = match_generator.MatchGenerator(
home_races=home_races, away_races=away_races, map_names=map_names)
self._episode_logger = episode_logger
self._converter_settings = converter_settings
self._mmr = self._converter_settings.mmr
self._learner_frames_per_step = learner_frames_per_step
self._eval_checkpoint_dir = eval_checkpoint_dir
self._eval_checkpoint_path = eval_checkpoint_path
self._checkpointed_learner = learner_node
self._agent_output_features = agent_output_features
self._game_steps_per_episode = game_steps_per_episode
self._architecture = architecture
if rng is None:
rng = jax.random.PRNGKey(rng_seed)
self._rng = rng
self._environment_factory = _make_environment_factory(
game_steps_per_episode=game_steps_per_episode)
self._max_num_steps = max_num_steps
self._evaluator_type = evaluator_type
self._warmup_agent = warmup_agent
self._prohibit_recompilation = prohibit_recompilation
self._checkpoint_generator = None
@property
def evaluator_type(self):
return self._evaluator_type
def get_agent_architecture(self):
obs_spec, action_spec = converted_env.get_environment_spec(
self._converter_settings)
input_spec = data_utils.get_input_spec(obs_spec) # pytype: disable=wrong-arg-types # strict_namedtuple_checks
return self._architecture(input_spec, action_spec, False)
def build_evaluator(
self, agent, checkpoint_generator: Optional[Iterator[Tuple[Any, int]]]
) -> evaluator_base.Evaluator:
if self._evaluator_type == EvaluatorType.CHECKPOINT:
evaluator = CheckpointEvaluator(
agent=agent,
rng=self._rng,
learner_frames_per_step=self._learner_frames_per_step,
checkpoint_generator=checkpoint_generator,
warmup_agent=self._warmup_agent,
prohibit_recompilation=self._prohibit_recompilation,
output_features=self._agent_output_features)
elif self._evaluator_type == EvaluatorType.RANDOM_PARAMS:
evaluator = RandomParamsEvaluator(
agent=agent,
rng=self._rng,
warmup_agent=self._warmup_agent,
prohibit_recompilation=self._prohibit_recompilation,
learner_frames_per_step=self._learner_frames_per_step,
output_features=self._agent_output_features)
return evaluator
def build_checkpoint_generator(self):
if self.evaluator_type == EvaluatorType.RANDOM_PARAMS:
return None
elif self._eval_checkpoint_path:
return acme_common.get_checkpoint_generator_for_path(
self._eval_checkpoint_path)
else:
eval_checkpoint_dir = self._eval_checkpoint_dir
if not eval_checkpoint_dir and self._checkpointed_learner:
eval_checkpoint_dir = self._checkpointed_learner.get_directory()
if eval_checkpoint_dir is None:
raise ValueError('Checkpoint directory cannot be ''None')
logging.info(f'Eval Checkpoint directory is set as {eval_checkpoint_dir}')
return acme_common.get_checkpoint_generator(eval_checkpoint_dir)
def setup_agent(self):
agent_architecture = self.get_agent_architecture()
agent = agent_lib.AlphaStarAgent(agent_architecture)
checkpoint_generator = self.build_checkpoint_generator()
return self.build_evaluator(agent, checkpoint_generator)
def run(self) -> None:
eval_agent = self.setup_agent()
while True:
self.run_episode(eval_agent)
def run_episode(self, agent: evaluator_base.Evaluator):
home_race, away_race, map_name = self._match_generator.generate(
self._opponent_name)
agent_info = agent.reset()
home_static_logs = dict(
agent_info,
competitor_name=self._opponent_name,
mmr=self._mmr,
map_name=map_name,
home_race=home_race.name,
away_race=away_race.name,
competitor_type='bot',
actor_type=(f'{self._opponent_name}:{self._mmr}:{map_name}:'
f'{home_race.name}_v_{away_race.name}'))
with self._environment_factory(
my_race=home_race,
my_converter_settings=self._converter_settings,
opponent=self._opponent_name,
opponent_race=away_race,
map_name=map_name) as env:
run_loop.play_episode(
env=env,
agent=agent,
player=0,
episode_logger=self._episode_logger,
static_log=home_static_logs,
max_num_steps=self._max_num_steps)
def _traceback_exception():
return traceback.print_exception(*sys.exc_info())
class ThreadedUnbatchedEvalActor(acme.Worker):
"""Multiple actor threads on a CPU with unbatched evaluation."""
def __init__(self,
num_threads_per_inference_device: int,
competitor_name: str,
competitor_names: Sequence[str],
use_warmup: bool = False,
total_num_episodes_per_thread: Optional[int] = None,
**eval_actor_kwargs):
"""Initializes a threaded unbatched evaluation actor.
Args:
num_threads_per_inference_device : Number of actor threads per inference
device.
competitor_name: Name of competitor (unused and kept only for
interface compatibility between different evaluator actors.)
competitor_names : Sequence of competitor names to choose from for each
actor thread.
use_warmup : Boolean to decide if actor thread needs to be warmed up.
total_num_episodes_per_thread : Number of episodes to be run on
each actor thread.
**eval_actor_kwargs : Keyword args passed on to each `EvalActor` thread.
"""
del competitor_name
self._num_agents = jax.device_count()
self._eval_actors = []
self._agents = []
self._total_num_episodes_per_thread = total_num_episodes_per_thread
self._num_evaluation_threads = int(
num_threads_per_inference_device * self._num_agents)
num_competitors = len(competitor_names)
for evaluator_id in range(self._num_evaluation_threads):
self._eval_actors.append(
EvalActor(
**eval_actor_kwargs,
competitor_name=competitor_names[evaluator_id % num_competitors],
warmup_agent=False,
prohibit_recompilation=False))
architecture = self._eval_actors[0].get_agent_architecture()
# JIT a model on each device core.
for device in jax.devices():
self._agents.append(
agent_lib.AlphaStarAgent(
architecture, jit_device=device))
logging.info(f'Jitted {self._num_agents} agents -- one per core.')
self._warmup_params = None
self._warmup_all_agents()
logging.info('All cores warmed up. Agents ready for inference.')
self._checkpoint_generator = None
def _warmup_all_agents(self):
warmup_fn = lambda agent: agent.warmup()
future_map = {}
with concurrent.futures.ThreadPoolExecutor(self._num_agents) as executor:
for agent_num, agent in enumerate(self._agents):
future_map[agent_num] = executor.submit(warmup_fn, agent)
results = {}
for agent_id, future in future_map.items():
try:
results[agent_id] = future.result()
# pylint: disable=broad-except
except Exception as e:
logging.info(f'Error occurred in warmup for agent id {agent_id}: {e}')
# pylint: enable=broad-except
self._warmup_params = results[0]
def _play_agents(self, actor_id):
"""Plays agents on an episode loop until termination."""
# Assignment is done such that task 0 goes to core 0, task 1 to core 1
# and so on. Tasks are executed in the order which they are submitted.
eval_actor = self._eval_actors[actor_id]
device_id = actor_id % self._num_agents
agent = self._agents[device_id]
logging.info(f'Setting up agent of actor {actor_id} on device {device_id}')
if eval_actor.evaluator_type == EvaluatorType.CHECKPOINT:
if self._checkpoint_generator is None:
raise ValueError('Checkpoint generator cannot be None.')
# Use same checkpoint generator across different threads. Guarantee thread
# safety with locked iterators.
eval_agent = eval_actor.build_evaluator(agent, self._checkpoint_generator)
# Since we disabled warmup when the evaluator was built,
# we are allowing for warmup params to be set.
eval_agent.set_warmup_params(self._warmup_params)
episode_count = 0
while True:
logging.info(f'Running new episode on actor {actor_id}')
try:
eval_actor.run_episode(eval_agent)
episode_count += 1
if episode_count > self._total_num_episodes_per_thread:
break
# pylint: disable=broad-except
except Exception:
logging.error(
'Error occurred while running the episode on actor '
f'{actor_id}:: {_traceback_exception()}')
def _set_checkpoint_generator(self):
generator = self._eval_actors[0].build_checkpoint_generator()
# Use checkpoint generator as a locked iterator to make sure it is
# thread-safe when multiple actor threads query it.
if generator:
self._checkpoint_generator = acme_common.LockedIterator(generator)
def run(self):
future_map = {}
self._set_checkpoint_generator()
with concurrent.futures.ThreadPoolExecutor(
self._num_evaluation_threads) as executor:
for thread_id in range(self._num_evaluation_threads):
future = executor.submit(self._play_agents, thread_id)
future_map[thread_id] = future
for thread_id, future in future_map.items():
try:
_ = future.result()
# pylint: disable=broad-except
except Exception:
logging.error(
f'Error in thread {thread_id} :: '
f' {_traceback_exception()}')
| alphastar-main | alphastar/modules/evaluator.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialize modules under the alphastar.collections library.
We expose Structure, an immutable dict with sorted keys, used to hold nested
observations, actions and state. We also expose utilities to register this
structure as a jax pytree node.
"""
from alphastar.collections.structure import Struct
| alphastar-main | alphastar/collections/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for alphastar.collections.structure.py."""
import copy
import pickle
from absl.testing import absltest
from alphastar.collections import structure
import tree
class TestStruct(absltest.TestCase):
def testPrettyPrinting(self):
result = str(structure.Struct(a=1, b=structure.Struct(c=2, d=[1, 2, 3])))
self.assertEqual(
result,
"Struct(\n a=1,\n b=Struct(\n c=2,\n d=[1, 2, 3],\n ),\n)")
def testAttributeAccess(self):
data = structure.Struct(x=1, y=2)
self.assertEqual(data.x, 1)
self.assertEqual(data.y, 2)
self.assertEqual(data["x"], 1)
self.assertEqual(data["y"], 2)
with self.assertRaisesRegex(AttributeError, "z"):
print(data.z)
with self.assertRaisesRegex(KeyError, "'z'"):
print(data["z"])
def testHasAttr(self):
data = structure.Struct(x=1, y=2)
self.assertTrue(hasattr(data, "x"))
self.assertFalse(hasattr(data, "z"))
def testGetAttr(self):
data = structure.Struct(x=1, y=2)
self.assertEqual(getattr(data, "x", None), 1)
self.assertIsNone(getattr(data, "z", None))
def testIn(self):
data = structure.Struct(x=1, y=2)
self.assertIn("x", data)
self.assertIn("y", data)
self.assertNotIn("z", data)
def testConstructionFromDict(self):
data_dict = dict(x=1, y=2)
data = structure.Struct(data_dict)
data_dict["x"] = 3
del data_dict["y"]
self.assertEqual(data["x"], 1)
self.assertEqual(data["y"], 2)
def testExtension(self):
data1 = structure.Struct(x=1, y=2)
data2 = structure.Struct(data1, z=3)
self.assertEqual(data2.x, 1)
self.assertEqual(data2.y, 2)
self.assertEqual(data2.z, 3)
def testReduction(self):
data1 = structure.Struct(x=1, y=2, z=3)
data2 = structure.Struct({k: v for k, v in data1.items() if k != "z"})
self.assertEqual(data2.x, 1)
self.assertEqual(data2.y, 2)
with self.assertRaisesRegex(AttributeError, "z"):
print(data2.z)
def testExtensionWithOverrideAllowed(self):
data1 = structure.Struct(x=1, y=2)
data2 = structure.Struct(data1, x=3, z=4)
self.assertEqual(data2.x, 3)
self.assertEqual(data2.y, 2)
self.assertEqual(data2.z, 4)
def testExtensionWithOverrideDisallowed(self):
data = structure.Struct(x=1, y=2)
with self.assertRaisesRegex(TypeError, ".*multiple values.*"):
structure.Struct(x=3, z=4, **data)
def testImmutability(self):
data = structure.Struct(x=1, y=2)
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data["x"] = 5
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.x = 5
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.pop()
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.popitem()
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.clear()
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.update(dict(z=3))
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.setdefault("z", 3)
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
del data["x"]
def testMutability(self):
data = structure.Struct()
data["x"] = 5
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.x = 5
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.pop()
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.popitem()
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.clear()
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.update(dict(z=3))
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
data.setdefault("z", 3)
with self.assertRaisesRegex(RuntimeError, "Modifications.*not permitted"):
del data["x"]
def testIncrementalBuildingSortsCorrectly(self):
data = structure.Struct()
data["c"] = 1
self.assertEqual(list(data.values()), [1])
data["b"] = 7
self.assertEqual(list(data.values()), [7, 1])
data["d"] = 4
self.assertEqual(list(data.values()), [7, 1, 4])
data["a"] = 3
self.assertEqual(list(data.values()), [3, 7, 1, 4])
def testIteratesInSortedKeyOrder(self):
data = structure.Struct(e=3, d=4, f=5, a=1, b=2, c=17)
self.assertEqual(list(data.keys()), ["a", "b", "c", "d", "e", "f"])
self.assertEqual(list(data.values()), [1, 2, 17, 4, 3, 5])
def testFlatten(self):
data = structure.Struct(e=3, d=4, f=5, a=1, b=2, c=17)
flattened = tree.flatten(data)
self.assertEqual(flattened, [1, 2, 17, 4, 3, 5])
def testReduced(self):
data = structure.Struct(e=3, d=4)
reduced = data.__reduce__()
self.assertEqual(reduced, (structure.Struct, (None, [("d", 4), ("e", 3)])))
def testShallowCopy(self):
struct = structure.Struct(a=1, b=dict(c=2))
copied = copy.copy(struct)
self.assertEqual(struct, copied)
struct.b["c"] = 3
self.assertEqual(struct, copied)
def testDeepCopy(self):
struct = structure.Struct(a=1, b=dict(c=2))
copied = copy.deepcopy(struct)
self.assertEqual(struct, copied)
struct.b["c"] = 3
self.assertNotEqual(struct, copied)
def testViewGetitem(self):
data = structure.Struct(e=3, d=4, f=5, a=1, b=2, c=17)
self.assertEqual(data.values()[2], 17)
self.assertEqual(data.keys()[2], "c")
self.assertEqual(data.items()[2], ("c", 17))
def testPickle(self):
original = structure.Struct(a=1, b=2.0, c=structure.Struct(d="hi", e=5))
data = pickle.dumps(original)
reconstructed = pickle.loads(data)
self.assertEqual(original, reconstructed)
if __name__ == "__main__":
absltest.main()
| alphastar-main | alphastar/collections/structure_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for using collections in JAX."""
from absl import logging
from alphastar import collections
import jax
def register_struct():
"""Register `structure.Struct` so pytree/tracer knows how to handle it."""
try:
jax.tree_util.register_pytree_node(
collections.Struct,
flatten_func=lambda s: (tuple(s.values()), tuple(s.keys())),
unflatten_func=lambda k, xs: collections.Struct(zip(k, xs)))
except ValueError:
logging.info('Struct is already registered as JAX PyTree Node.')
| alphastar-main | alphastar/collections/jax.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Immutable dict with sorted keys, access-as-attribute & pretty printing."""
import bisect
from collections import abc
from typing import Mapping, TypeVar
Value = TypeVar("Value")
class Struct(dict, Mapping[str, Value]):
"""Immutable dict with sorted keys, access-as-attribute & pretty printing.
Inherits from dict explicitly for tensor flow encoding (which uses isinstance
directly...) and py tree flattening (which is in C++ and ignores method
overrides).
Note that the immutability of the dict is valid only when the dict is
non-empty.
Note that to override existing fields one can call:
Struct(existing_struct, existing_1=new_value, existing_2=new_value_2).
To create new fields, checking that they don't duplicate existing fields:
Struct(new_1=new_value, new_2=new_value_2, **existing_struct).
"""
def __init__(self, *args, **kwargs):
super().__init__(*(_ignore_none(args) if args else []), **kwargs)
self._sorted_keys = sorted(super(Struct, self).keys())
# Note that if our dict is empty we set immutable to false. This is
# a workaround for the fact that tensorflow/python/client/session.py's
# _DictFetchMapper constructs an empty dict and assigns rather than
# constructing from a list of key-values. In that case users of this
# class lose protection against mutation via __setitem__.
self._immutable = bool(self._sorted_keys)
def __getattr__(self, name):
"""Permits item-as-attribute lookup."""
try:
return self[name]
except Exception:
raise AttributeError(name)
def __iter__(self):
"""Iterates over the sorted keys list."""
return self._sorted_keys.__iter__()
def __repr__(self):
"""Pretty-prints the Struct's contents."""
ret = ["Struct("]
for k in self:
v_repr = "\n ".join(repr(self[k]).split("\n"))
ret.append(" {}={},".format(k, v_repr))
ret.append(")")
return "\n".join(ret)
def __setattr__(self, k, v):
"""Allows only the setting of our private attributes."""
if k in ["_sorted_keys", "_immutable"]:
super(Struct, self).__setattr__(k, v)
else:
self._defined_as_read_only()
def __setitem__(self, k, v):
"""Allows setting items only if the Struct is mutable (see init)."""
if not self._immutable:
super(Struct, self).__setitem__(k, v)
bisect.insort(self._sorted_keys, k)
else:
self._defined_as_read_only()
def __hash__(self):
"""Protects against mutability, *but only in this object*."""
if not self._immutable:
raise TypeError("Can only hash immutable Structs")
return hash(tuple(self.items()))
def __reduce__(self):
"""Explicit pickling support is required due to the class' evolution.
Note that code exists which expects the 'None' here (which is superfluous
now). For the time being, meet those expectations.
Returns:
Reduced representation of this Struct instance.
"""
return self.__class__, (None, list(self.items()),)
def keys(self):
"""Uses the sorted key list."""
return self._sorted_keys
def items(self):
"""Returns a view onto items, sorted by key."""
return SortedItemsView(self)
def values(self):
"""Returns a view onto values, sorted by key."""
return SortedValuesView(self)
def _defined_as_read_only(self, *args, **kwargs):
raise RuntimeError("Modifications to Struct instances are not permitted")
__delitem__ = _defined_as_read_only
pop = _defined_as_read_only
popitem = _defined_as_read_only
clear = _defined_as_read_only
update = _defined_as_read_only
setdefault = _defined_as_read_only
def _not_supported(self, *args, **kwargs):
raise RuntimeError("Method unsupported by Struct")
__reversed__ = _not_supported
class SortedItemsView(abc.ItemsView, abc.Sequence):
def __init__(self, struct: Struct):
self._struct = struct
super().__init__(struct)
def __getitem__(self, i: int):
key = self._struct._sorted_keys[i]
return key, self._struct[key]
class SortedValuesView(abc.ValuesView, abc.Sequence):
def __init__(self, struct: Struct):
self._struct = struct
super().__init__(struct)
def __getitem__(self, i: int):
key = self._struct._sorted_keys[i]
return self._struct[key]
def _ignore_none(args):
"""Ignore superfluous None args when reconstructing from copies."""
if args and args[0] is None:
args = args[1:]
return args
| alphastar-main | alphastar/collections/structure.py |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal data reader for GQN TFRecord datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import tensorflow as tf
nest = tf.contrib.framework.nest
DatasetInfo = collections.namedtuple(
'DatasetInfo',
['basepath', 'train_size', 'test_size', 'frame_size', 'sequence_size']
)
Context = collections.namedtuple('Context', ['frames', 'cameras'])
Query = collections.namedtuple('Query', ['context', 'query_camera'])
TaskData = collections.namedtuple('TaskData', ['query', 'target'])
_DATASETS = dict(
jaco=DatasetInfo(
basepath='jaco',
train_size=3600,
test_size=400,
frame_size=64,
sequence_size=11),
mazes=DatasetInfo(
basepath='mazes',
train_size=1080,
test_size=120,
frame_size=84,
sequence_size=300),
rooms_free_camera_with_object_rotations=DatasetInfo(
basepath='rooms_free_camera_with_object_rotations',
train_size=2034,
test_size=226,
frame_size=128,
sequence_size=10),
rooms_ring_camera=DatasetInfo(
basepath='rooms_ring_camera',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
rooms_free_camera_no_object_rotations=DatasetInfo(
basepath='rooms_free_camera_no_object_rotations',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
shepard_metzler_5_parts=DatasetInfo(
basepath='shepard_metzler_5_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15),
shepard_metzler_7_parts=DatasetInfo(
basepath='shepard_metzler_7_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15)
)
_NUM_CHANNELS = 3
_NUM_RAW_CAMERA_PARAMS = 5
_MODES = ('train', 'test')
def _get_dataset_files(dateset_info, mode, root):
"""Generates lists of files for a given dataset version."""
basepath = dateset_info.basepath
base = os.path.join(root, basepath, mode)
if mode == 'train':
num_files = dateset_info.train_size
else:
num_files = dateset_info.test_size
length = len(str(num_files))
template = '{:0%d}-of-{:0%d}.tfrecord' % (length, length)
return [os.path.join(base, template.format(i + 1, num_files))
for i in range(num_files)]
def _convert_frame_data(jpeg_data):
decoded_frames = tf.image.decode_jpeg(jpeg_data)
return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)
class DataReader(object):
"""Minimal queue based TFRecord reader.
You can use this reader to load the datasets used to train Generative Query
Networks (GQNs) in the 'Neural Scene Representation and Rendering' paper.
See README.md for a description of the datasets and an example of how to use
the reader.
"""
def __init__(self,
dataset,
context_size,
root,
mode='train',
# Optionally reshape frames
custom_frame_size=None,
# Queue params
num_threads=4,
capacity=256,
min_after_dequeue=128,
seed=None):
"""Instantiates a DataReader object and sets up queues for data reading.
Args:
dataset: string, one of ['jaco', 'mazes', 'rooms_ring_camera',
'rooms_free_camera_no_object_rotations',
'rooms_free_camera_with_object_rotations', 'shepard_metzler_5_parts',
'shepard_metzler_7_parts'].
context_size: integer, number of views to be used to assemble the context.
root: string, path to the root folder of the data.
mode: (optional) string, one of ['train', 'test'].
custom_frame_size: (optional) integer, required size of the returned
frames, defaults to None.
num_threads: (optional) integer, number of threads used to feed the reader
queues, defaults to 4.
capacity: (optional) integer, capacity of the underlying
RandomShuffleQueue, defualts to 256.
min_after_dequeue: (optional) integer, min_after_dequeue of the underlying
RandomShuffleQueue, defualts to 128.
seed: (optional) integer, seed for the random number generators used in
the reader.
Raises:
ValueError: if the required version does not exist; if the required mode
is not supported; if the requested context_size is bigger than the
maximum supported for the given dataset version.
"""
if dataset not in _DATASETS:
raise ValueError('Unrecognized dataset {} requested. Available datasets '
'are {}'.format(dataset, _DATASETS.keys()))
if mode not in _MODES:
raise ValueError('Unsupported mode {} requested. Supported modes '
'are {}'.format(mode, _MODES))
self._dataset_info = _DATASETS[dataset]
if context_size >= self._dataset_info.sequence_size:
raise ValueError(
'Maximum support context size for dataset {} is {}, but '
'was {}.'.format(
dataset, self._dataset_info.sequence_size-1, context_size))
self._context_size = context_size
# Number of views in the context + target view
self._example_size = context_size + 1
self._custom_frame_size = custom_frame_size
with tf.device('/cpu'):
file_names = _get_dataset_files(self._dataset_info, mode, root)
filename_queue = tf.train.string_input_producer(file_names, seed=seed)
reader = tf.TFRecordReader()
read_ops = [self._make_read_op(reader, filename_queue)
for _ in range(num_threads)]
dtypes = nest.map_structure(lambda x: x.dtype, read_ops[0])
shapes = nest.map_structure(lambda x: x.shape[1:], read_ops[0])
self._queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
shapes=shapes,
seed=seed)
enqueue_ops = [self._queue.enqueue_many(op) for op in read_ops]
tf.train.add_queue_runner(tf.train.QueueRunner(self._queue, enqueue_ops))
def read(self, batch_size):
"""Reads batch_size (query, target) pairs."""
frames, cameras = self._queue.dequeue_many(batch_size)
context_frames = frames[:, :-1]
context_cameras = cameras[:, :-1]
target = frames[:, -1]
query_camera = cameras[:, -1]
context = Context(cameras=context_cameras, frames=context_frames)
query = Query(context=context, query_camera=query_camera)
return TaskData(query=query, target=target)
def _make_read_op(self, reader, filename_queue):
"""Instantiates the ops used to read and parse the data into tensors."""
_, raw_data = reader.read_up_to(filename_queue, num_records=16)
feature_map = {
'frames': tf.FixedLenFeature(
shape=self._dataset_info.sequence_size, dtype=tf.string),
'cameras': tf.FixedLenFeature(
shape=[self._dataset_info.sequence_size * _NUM_RAW_CAMERA_PARAMS],
dtype=tf.float32)
}
example = tf.parse_example(raw_data, feature_map)
indices = self._get_randomized_indices(seed)
frames = self._preprocess_frames(example, indices)
cameras = self._preprocess_cameras(example, indices)
return frames, cameras
def _get_randomized_indices(self, seed):
"""Generates randomized indices into a sequence of a specific length."""
indices = tf.range(0, self._dataset_info.sequence_size)
indices = tf.random_shuffle(indices, seed=seed)
indices = tf.slice(indices, begin=[0], size=[self._example_size])
return indices
def _preprocess_frames(self, example, indices):
"""Instantiates the ops used to preprocess the frames data."""
frames = tf.concat(example['frames'], axis=0)
frames = tf.gather(frames, indices, axis=1)
frames = tf.map_fn(
_convert_frame_data, tf.reshape(frames, [-1]),
dtype=tf.float32, back_prop=False)
dataset_image_dimensions = tuple(
[self._dataset_info.frame_size] * 2 + [_NUM_CHANNELS])
frames = tf.reshape(
frames, (-1, self._example_size) + dataset_image_dimensions)
if (self._custom_frame_size and
self._custom_frame_size != self._dataset_info.frame_size):
frames = tf.reshape(frames, (-1,) + dataset_image_dimensions)
new_frame_dimensions = (self._custom_frame_size,) * 2 + (_NUM_CHANNELS,)
frames = tf.image.resize_bilinear(
frames, new_frame_dimensions[:2], align_corners=True)
frames = tf.reshape(
frames, (-1, self._example_size) + new_frame_dimensions)
return frames
def _preprocess_cameras(self, example, indices):
"""Instantiates the ops used to preprocess the cameras data."""
raw_pose_params = example['cameras']
raw_pose_params = tf.reshape(
raw_pose_params,
[-1, self._dataset_info.sequence_size, _NUM_RAW_CAMERA_PARAMS])
raw_pose_params = tf.gather(raw_pose_params, indices, axis=1)
pos = raw_pose_params[:, :, 0:3]
yaw = raw_pose_params[:, :, 3:4]
pitch = raw_pose_params[:, :, 4:5]
cameras = tf.concat(
[pos, tf.sin(yaw), tf.cos(yaw), tf.sin(pitch), tf.cos(pitch)], axis=2)
return cameras
| gqn-datasets-master | data_reader.py |
#!/usr/bin/env python3
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script for running step_benchmark_test for a variety of build settings."""
import argparse
import os
import re
import shutil
import subprocess
import sys
from typing import NamedTuple, Tuple
class _Setting(NamedTuple):
name: str
cmake_args: Tuple[str]
def run_benchmark_variants(output_directory: str, build_directory: str,
benchmark_repetitions: int, delete_build: bool):
"""Builds different variants of the benchmark test, and runs them."""
# For each of the settings, the first option is what we use as a baseline.
# We chose to pick the settings that were likely best (so this is an ablation
# study), so interactions between settings are tested.
settings_ranges = {
"avx": [
_Setting("on", ("-DMUJOCO_ENABLE_AVX=ON",)),
_Setting("off", ("-DMUJOCO_ENABLE_AVX=OFF",))
],
"avx_intrinsics": [
_Setting("on", ("-DMUJOCO_ENABLE_AVX_INTRINSICS=ON",)),
_Setting("off", ("-DMUJOCO_ENABLE_AVX_INTRINSICS=OFF",))
],
"lto": [
_Setting("on", ("-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON",)),
_Setting("off", ("-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=OFF",)),
],
"compiler": compiler_cmake_flags_variants(),
}
baseline_settings = {
key: value[0] for key, value in settings_ranges.items()
}
run_benchmark(
f"{sys.platform}_baseline",
baseline_settings,
output_directory=output_directory,
build_directory=f"{build_directory}_baseline",
benchmark_repetitions=benchmark_repetitions,
delete_build=delete_build,
)
for key in sorted(settings_ranges):
settings = baseline_settings.copy()
for setting in settings_ranges[key][1:]:
settings[key] = setting
run_benchmark(
f"{sys.platform}_{key}_{setting.name}",
settings,
output_directory=output_directory,
build_directory=f"{build_directory}_{key}_{setting.name}",
benchmark_repetitions=benchmark_repetitions,
delete_build=delete_build,
)
def run_benchmark(name, settings, output_directory: str, build_directory: str,
benchmark_repetitions: int, delete_build: bool):
"""Builds and runs a single benchmark."""
current_dir = os.getcwd()
try:
if delete_build:
shutil.rmtree(build_directory, ignore_errors=True)
os.makedirs(build_directory, exist_ok=True)
os.makedirs(output_directory, exist_ok=True)
os.chdir(build_directory)
cmake_args = [
"cmake", "..", "-DMUJOCO_BUILD_TESTS=ON", "-DCMAKE_BUILD_TYPE=Release"
]
cmake_args.extend(os_specific_cmake_flags())
for _, setting in sorted(settings.items()):
cmake_args.extend(setting.cmake_args)
print(f"Running ({name}):", " ".join(cmake_args))
subprocess.check_call(cmake_args)
subprocess.check_call([
"cmake", "--build", ".", "-j8", "--config=Release", "-t",
"step_benchmark_test"
])
output_path = os.path.join(output_directory, f"{name}.json")
os.chdir("../mujoco/test")
if sys.platform == "win32":
cygwin_run_benchmark(build_directory, output_path, benchmark_repetitions)
else:
binary_path = os.path.join(build_directory, "bin", "step_benchmark_test")
subprocess.check_call([
binary_path,
"--benchmark_filter=all",
"--benchmark_enable_random_interleaving=true",
"--benchmark_min_time=0.5",
f"--benchmark_repetitions={benchmark_repetitions}",
"--benchmark_format=json",
f"--benchmark_out={output_path}",
])
finally:
os.chdir(current_dir)
def os_specific_cmake_flags():
"""CMake args that should be passed to all benchmarks on current OS."""
if sys.platform == "win32":
return ("-A", "x64", "-Thost=x86",
"-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded",
"-DCMAKE_SYSTEM_VERSION=10.0.19041.0")
elif sys.platform.startswith("linux"):
return ()
elif sys.platform == "darwin":
# macOS
return ()
else:
raise ValueError(f"Unknown OS: {sys.platform}")
def compiler_cmake_flags_variants():
"""Returns a list of benchmark settings for different compilers."""
if sys.platform == "win32":
return [
_Setting("VS2022", ("-G", "Visual Studio 17 2022")),
# MSVC 2017 doesn't seem to support C17 standard.
# _Setting("VS2017", ("-G", "Visual Studio 15 2017")),
]
elif sys.platform.startswith("linux"):
return [
_Setting("clang-11",
("-DCMAKE_C_COMPILER=clang-11",
"-DCMAKE_CXX_COMPILER=clang++-11", "-DCMAKE_LINKER=lld-11")),
_Setting("clang-8",
("-DCMAKE_C_COMPILER=clang-8",
"-DCMAKE_CXX_COMPILER=clang++-8", "-DCMAKE_LINKER=lld-8")),
]
elif sys.platform == "darwin":
# macOS
return [_Setting("clang", ())]
else:
raise ValueError(f"Unknown OS: {sys.platform}")
def cygwin_run_benchmark(build_directory, output_path, benchmark_repetitions):
"""Runs the benchmark command under the cygwin environment."""
# The subprocess module interacts badly with cygwin.
# Rather than trying to run the binary directly through Popen, use bash on
# cygwin.
lib_path = cygwin_path(os.path.join(build_directory, "lib", "Release"))
binary_path = cygwin_path(
os.path.join(build_directory, "bin", "Release",
"step_benchmark_test"))
cygwin = subprocess.Popen(["bash"], stdin=subprocess.PIPE)
command = (
f'PATH="$PATH:{lib_path}" {binary_path} --benchmark_filter=all '
"--benchmark_enable_random_interleaving=true ",
"--benchmark_min_time=0.5 "
f"--benchmark_repetitions={benchmark_repetitions} "
"--benchmark_format=json "
f"--benchmark_out='{output_path}'")
cygwin.communicate(input=bytes(command, "utf-8"))
if cygwin.returncode:
raise ValueError(f"Benchmark returned error code: {cygwin.returncode}")
def cygwin_path(windows_path):
path = windows_path.replace("\\", "/")
path = re.sub(r".*cygwin64", "", path)
return path
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_directory", default="benchmark_results")
parser.add_argument(
"-b",
"--build_directory",
default="build",
help="Base path for build directories. Benchmark names will be appended."
)
parser.add_argument("-n", "--repetitions", type=int, default=30)
parser.add_argument(
"-d",
"--delete_build",
nargs="?",
type=bool,
const=True,
default=False,
help="Delete the build directory before building benchmark.")
args = parser.parse_args(argv[1:])
run_benchmark_variants(
output_directory=os.path.abspath(args.output_directory),
build_directory=os.path.abspath(args.build_directory),
delete_build=args.delete_build,
benchmark_repetitions=args.repetitions)
if __name__ == "__main__":
main(sys.argv)
| mujoco-main | test/benchmark/run_ablation.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides information about MuJoCo API functions.
DO NOT EDIT. THIS FILE IS AUTOMATICALLY GENERATED.
"""
from typing import Mapping
from .ast_nodes import ArrayType
from .ast_nodes import FunctionDecl
from .ast_nodes import FunctionParameterDecl
from .ast_nodes import PointerType
from .ast_nodes import ValueType
FUNCTIONS: Mapping[str, FunctionDecl] = dict([
('mj_defaultVFS',
FunctionDecl(
name='mj_defaultVFS',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS'),
),
),
),
doc='Initialize VFS to empty (no deallocation).',
)),
('mj_addFileVFS',
FunctionDecl(
name='mj_addFileVFS',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS'),
),
),
FunctionParameterDecl(
name='directory',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Add file to VFS, return 0: success, 1: full, 2: repeated name, -1: failed to load.', # pylint: disable=line-too-long
)),
('mj_makeEmptyFileVFS',
FunctionDecl(
name='mj_makeEmptyFileVFS',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS'),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='filesize',
type=ValueType(name='int'),
),
),
doc='Make empty file in VFS, return 0: success, 1: full, 2: repeated name.', # pylint: disable=line-too-long
)),
('mj_findFileVFS',
FunctionDecl(
name='mj_findFileVFS',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS', is_const=True),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Return file index in VFS, or -1 if not found in VFS.',
)),
('mj_deleteFileVFS',
FunctionDecl(
name='mj_deleteFileVFS',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS'),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Delete file from VFS, return 0: success, -1: not found in VFS.',
)),
('mj_deleteVFS',
FunctionDecl(
name='mj_deleteVFS',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS'),
),
),
),
doc='Delete all files from VFS.',
)),
('mj_loadXML',
FunctionDecl(
name='mj_loadXML',
return_type=PointerType(
inner_type=ValueType(name='mjModel'),
),
parameters=(
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS', is_const=True),
),
),
FunctionParameterDecl(
name='error',
type=PointerType(
inner_type=ValueType(name='char'),
),
),
FunctionParameterDecl(
name='error_sz',
type=ValueType(name='int'),
),
),
doc='Parse XML file in MJCF or URDF format, compile it, return low-level model. If vfs is not NULL, look up files in vfs before reading from disk. If error is not NULL, it must have size error_sz.', # pylint: disable=line-too-long
)),
('mj_saveLastXML',
FunctionDecl(
name='mj_saveLastXML',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='error',
type=PointerType(
inner_type=ValueType(name='char'),
),
),
FunctionParameterDecl(
name='error_sz',
type=ValueType(name='int'),
),
),
doc='Update XML data structures with info from low-level model, save as MJCF. If error is not NULL, it must have size error_sz.', # pylint: disable=line-too-long
)),
('mj_freeLastXML',
FunctionDecl(
name='mj_freeLastXML',
return_type=ValueType(name='void'),
parameters=(),
doc='Free last XML model if loaded. Called internally at each load.',
)),
('mj_printSchema',
FunctionDecl(
name='mj_printSchema',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='buffer',
type=PointerType(
inner_type=ValueType(name='char'),
),
),
FunctionParameterDecl(
name='buffer_sz',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='flg_html',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='flg_pad',
type=ValueType(name='int'),
),
),
doc='Print internal XML schema as plain text or HTML, with style-padding or .', # pylint: disable=line-too-long
)),
('mj_step',
FunctionDecl(
name='mj_step',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Advance simulation, use control callback to obtain external force and control.', # pylint: disable=line-too-long
)),
('mj_step1',
FunctionDecl(
name='mj_step1',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Advance simulation in two steps: before external force and control is set by user.', # pylint: disable=line-too-long
)),
('mj_step2',
FunctionDecl(
name='mj_step2',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Advance simulation in two steps: after external force and control is set by user.', # pylint: disable=line-too-long
)),
('mj_forward',
FunctionDecl(
name='mj_forward',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Forward dynamics: same as mj_step but do not integrate in time.',
)),
('mj_inverse',
FunctionDecl(
name='mj_inverse',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Inverse dynamics: qacc must be set before calling.',
)),
('mj_forwardSkip',
FunctionDecl(
name='mj_forwardSkip',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='skipstage',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='skipsensor',
type=ValueType(name='int'),
),
),
doc='Forward dynamics with skip; skipstage is mjtStage.',
)),
('mj_inverseSkip',
FunctionDecl(
name='mj_inverseSkip',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='skipstage',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='skipsensor',
type=ValueType(name='int'),
),
),
doc='Inverse dynamics with skip; skipstage is mjtStage.',
)),
('mj_defaultLROpt',
FunctionDecl(
name='mj_defaultLROpt',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjLROpt'),
),
),
),
doc='Set default options for length range computation.',
)),
('mj_defaultSolRefImp',
FunctionDecl(
name='mj_defaultSolRefImp',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='solref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='solimp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='Set solver parameters to default values.',
)),
('mj_defaultOption',
FunctionDecl(
name='mj_defaultOption',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjOption'),
),
),
),
doc='Set physics options to default values.',
)),
('mj_defaultVisual',
FunctionDecl(
name='mj_defaultVisual',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='vis',
type=PointerType(
inner_type=ValueType(name='mjVisual'),
),
),
),
doc='Set visual options to default values.',
)),
('mj_copyModel',
FunctionDecl(
name='mj_copyModel',
return_type=PointerType(
inner_type=ValueType(name='mjModel'),
),
parameters=(
FunctionParameterDecl(
name='dest',
type=PointerType(
inner_type=ValueType(name='mjModel'),
),
),
FunctionParameterDecl(
name='src',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Copy mjModel, allocate new if dest is NULL.',
)),
('mj_saveModel',
FunctionDecl(
name='mj_saveModel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='buffer',
type=PointerType(
inner_type=ValueType(name='void'),
),
),
FunctionParameterDecl(
name='buffer_sz',
type=ValueType(name='int'),
),
),
doc='Save model to binary MJB file or memory buffer; buffer has precedence when given.', # pylint: disable=line-too-long
)),
('mj_loadModel',
FunctionDecl(
name='mj_loadModel',
return_type=PointerType(
inner_type=ValueType(name='mjModel'),
),
parameters=(
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='vfs',
type=PointerType(
inner_type=ValueType(name='mjVFS', is_const=True),
),
),
),
doc='Load model from binary MJB file. If vfs is not NULL, look up file in vfs before reading from disk.', # pylint: disable=line-too-long
)),
('mj_deleteModel',
FunctionDecl(
name='mj_deleteModel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel'),
),
),
),
doc='Free memory allocation in model.',
)),
('mj_sizeModel',
FunctionDecl(
name='mj_sizeModel',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Return size of buffer needed to hold model.',
)),
('mj_makeData',
FunctionDecl(
name='mj_makeData',
return_type=PointerType(
inner_type=ValueType(name='mjData'),
),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Allocate mjData corresponding to given model. If the model buffer is unallocated the initial configuration will not be set.', # pylint: disable=line-too-long
)),
('mj_copyData',
FunctionDecl(
name='mj_copyData',
return_type=PointerType(
inner_type=ValueType(name='mjData'),
),
parameters=(
FunctionParameterDecl(
name='dest',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='src',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
),
doc='Copy mjData. m is only required to contain the size fields from MJMODEL_INTS.', # pylint: disable=line-too-long
)),
('mj_resetData',
FunctionDecl(
name='mj_resetData',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Reset data to defaults.',
)),
('mj_resetDataDebug',
FunctionDecl(
name='mj_resetDataDebug',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='debug_value',
type=ValueType(name='unsigned char'),
),
),
doc='Reset data to defaults, fill everything else with debug_value.',
)),
('mj_resetDataKeyframe',
FunctionDecl(
name='mj_resetDataKeyframe',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='key',
type=ValueType(name='int'),
),
),
doc='Reset data, set fields from specified keyframe.',
)),
('mj_markStack',
FunctionDecl(
name='mj_markStack',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Mark a new frame on the mjData stack.',
)),
('mj_freeStack',
FunctionDecl(
name='mj_freeStack',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Free the current mjData stack frame. All pointers returned by mj_stackAlloc since the last call to mj_markStack must no longer be used afterwards.', # pylint: disable=line-too-long
)),
('mj_stackAlloc',
FunctionDecl(
name='mj_stackAlloc',
return_type=PointerType(
inner_type=ValueType(name='void'),
),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='bytes',
type=ValueType(name='size_t'),
),
FunctionParameterDecl(
name='alignment',
type=ValueType(name='size_t'),
),
),
doc='Allocate a number of bytes on mjData stack at a specific alignment. Call mju_error on stack overflow.', # pylint: disable=line-too-long
)),
('mj_stackAllocNum',
FunctionDecl(
name='mj_stackAllocNum',
return_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='size',
type=ValueType(name='int'),
),
),
doc='Allocate array of mjtNums on mjData stack. Call mju_error on stack overflow.', # pylint: disable=line-too-long
)),
('mj_stackAllocInt',
FunctionDecl(
name='mj_stackAllocInt',
return_type=PointerType(
inner_type=ValueType(name='int'),
),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='size',
type=ValueType(name='int'),
),
),
doc='Allocate array of ints on mjData stack. Call mju_error on stack overflow.', # pylint: disable=line-too-long
)),
('mj_deleteData',
FunctionDecl(
name='mj_deleteData',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Free memory allocation in mjData.',
)),
('mj_resetCallbacks',
FunctionDecl(
name='mj_resetCallbacks',
return_type=ValueType(name='void'),
parameters=(),
doc='Reset all callbacks to NULL pointers (NULL is the default).',
)),
('mj_setConst',
FunctionDecl(
name='mj_setConst',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel'),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Set constant fields of mjModel, corresponding to qpos0 configuration.', # pylint: disable=line-too-long
)),
('mj_setLengthRange',
FunctionDecl(
name='mj_setLengthRange',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel'),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='index',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjLROpt', is_const=True),
),
),
FunctionParameterDecl(
name='error',
type=PointerType(
inner_type=ValueType(name='char'),
),
),
FunctionParameterDecl(
name='error_sz',
type=ValueType(name='int'),
),
),
doc='Set actuator_lengthrange for specified actuator; return 1 if ok, 0 if error.', # pylint: disable=line-too-long
)),
('mj_printFormattedModel',
FunctionDecl(
name='mj_printFormattedModel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='float_format',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Print mjModel to text file, specifying format. float_format must be a valid printf-style format string for a single float value.', # pylint: disable=line-too-long
)),
('mj_printModel',
FunctionDecl(
name='mj_printModel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Print model to text file.',
)),
('mj_printFormattedData',
FunctionDecl(
name='mj_printFormattedData',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='float_format',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Print mjData to text file, specifying format. float_format must be a valid printf-style format string for a single float value', # pylint: disable=line-too-long
)),
('mj_printData',
FunctionDecl(
name='mj_printData',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='filename',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Print data to text file.',
)),
('mju_printMat',
FunctionDecl(
name='mju_printMat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='nr',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nc',
type=ValueType(name='int'),
),
),
doc='Print matrix to screen.',
)),
('mju_printMatSparse',
FunctionDecl(
name='mju_printMatSparse',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='nr',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='rownnz',
type=PointerType(
inner_type=ValueType(name='int', is_const=True),
),
),
FunctionParameterDecl(
name='rowadr',
type=PointerType(
inner_type=ValueType(name='int', is_const=True),
),
),
FunctionParameterDecl(
name='colind',
type=PointerType(
inner_type=ValueType(name='int', is_const=True),
),
),
),
doc='Print sparse matrix to screen.',
)),
('mj_fwdPosition',
FunctionDecl(
name='mj_fwdPosition',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run position-dependent computations.',
)),
('mj_fwdVelocity',
FunctionDecl(
name='mj_fwdVelocity',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run velocity-dependent computations.',
)),
('mj_fwdActuation',
FunctionDecl(
name='mj_fwdActuation',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute actuator force qfrc_actuator.',
)),
('mj_fwdAcceleration',
FunctionDecl(
name='mj_fwdAcceleration',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Add up all non-constraint forces, compute qacc_smooth.',
)),
('mj_fwdConstraint',
FunctionDecl(
name='mj_fwdConstraint',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run selected constraint solver.',
)),
('mj_Euler',
FunctionDecl(
name='mj_Euler',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Euler integrator, semi-implicit in velocity.',
)),
('mj_RungeKutta',
FunctionDecl(
name='mj_RungeKutta',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='N',
type=ValueType(name='int'),
),
),
doc='Runge-Kutta explicit order-N integrator.',
)),
('mj_invPosition',
FunctionDecl(
name='mj_invPosition',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run position-dependent computations in inverse dynamics.',
)),
('mj_invVelocity',
FunctionDecl(
name='mj_invVelocity',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run velocity-dependent computations in inverse dynamics.',
)),
('mj_invConstraint',
FunctionDecl(
name='mj_invConstraint',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Apply the analytical formula for inverse constraint dynamics.',
)),
('mj_compareFwdInv',
FunctionDecl(
name='mj_compareFwdInv',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compare forward and inverse dynamics, save results in fwdinv.',
)),
('mj_sensorPos',
FunctionDecl(
name='mj_sensorPos',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Evaluate position-dependent sensors.',
)),
('mj_sensorVel',
FunctionDecl(
name='mj_sensorVel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Evaluate velocity-dependent sensors.',
)),
('mj_sensorAcc',
FunctionDecl(
name='mj_sensorAcc',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Evaluate acceleration and force-dependent sensors.',
)),
('mj_energyPos',
FunctionDecl(
name='mj_energyPos',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Evaluate position-dependent energy (potential).',
)),
('mj_energyVel',
FunctionDecl(
name='mj_energyVel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Evaluate velocity-dependent energy (kinetic).',
)),
('mj_checkPos',
FunctionDecl(
name='mj_checkPos',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Check qpos, reset if any element is too big or nan.',
)),
('mj_checkVel',
FunctionDecl(
name='mj_checkVel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Check qvel, reset if any element is too big or nan.',
)),
('mj_checkAcc',
FunctionDecl(
name='mj_checkAcc',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Check qacc, reset if any element is too big or nan.',
)),
('mj_kinematics',
FunctionDecl(
name='mj_kinematics',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run forward kinematics.',
)),
('mj_comPos',
FunctionDecl(
name='mj_comPos',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Map inertias and motion dofs to global frame centered at CoM.',
)),
('mj_camlight',
FunctionDecl(
name='mj_camlight',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute camera and light positions and orientations.',
)),
('mj_tendon',
FunctionDecl(
name='mj_tendon',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute tendon lengths, velocities and moment arms.',
)),
('mj_transmission',
FunctionDecl(
name='mj_transmission',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute actuator transmission lengths and moments.',
)),
('mj_crb',
FunctionDecl(
name='mj_crb',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run composite rigid body inertia algorithm (CRB).',
)),
('mj_factorM',
FunctionDecl(
name='mj_factorM',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc="Compute sparse L'*D*L factorizaton of inertia matrix.",
)),
('mj_solveM',
FunctionDecl(
name='mj_solveM',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='x',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='y',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc="Solve linear system M * x = y using factorization: x = inv(L'*D*L)*y", # pylint: disable=line-too-long
)),
('mj_solveM2',
FunctionDecl(
name='mj_solveM2',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='x',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='y',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc="Half of linear solve: x = sqrt(inv(D))*inv(L')*y",
)),
('mj_comVel',
FunctionDecl(
name='mj_comVel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute cvel, cdof_dot.',
)),
('mj_passive',
FunctionDecl(
name='mj_passive',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute qfrc_passive from spring-dampers, viscosity and density.',
)),
('mj_subtreeVel',
FunctionDecl(
name='mj_subtreeVel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='subtree linear velocity and angular momentum',
)),
('mj_rne',
FunctionDecl(
name='mj_rne',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='flg_acc',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='result',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='RNE: compute M(qpos)*qacc + C(qpos,qvel); flg_acc=0 removes inertial term.', # pylint: disable=line-too-long
)),
('mj_rnePostConstraint',
FunctionDecl(
name='mj_rnePostConstraint',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='RNE with complete data: compute cacc, cfrc_ext, cfrc_int.',
)),
('mj_collision',
FunctionDecl(
name='mj_collision',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Run collision detection.',
)),
('mj_makeConstraint',
FunctionDecl(
name='mj_makeConstraint',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Construct constraints.',
)),
('mj_island',
FunctionDecl(
name='mj_island',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Find constraint islands.',
)),
('mj_projectConstraint',
FunctionDecl(
name='mj_projectConstraint',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute inverse constraint inertia efc_AR.',
)),
('mj_referenceConstraint',
FunctionDecl(
name='mj_referenceConstraint',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
),
doc='Compute efc_vel, efc_aref.',
)),
('mj_constraintUpdate',
FunctionDecl(
name='mj_constraintUpdate',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='jar',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='cost',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(1,),
),
),
FunctionParameterDecl(
name='flg_coneHessian',
type=ValueType(name='int'),
),
),
doc='Compute efc_state, efc_force, qfrc_constraint, and (optionally) cone Hessians. If cost is not NULL, set *cost = s(jar) where jar = Jac*qacc-aref.', # pylint: disable=line-too-long
)),
('mj_stateSize',
FunctionDecl(
name='mj_stateSize',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='spec',
type=ValueType(name='unsigned int'),
),
),
doc='Return size of state specification.',
)),
('mj_getState',
FunctionDecl(
name='mj_getState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='state',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='spec',
type=ValueType(name='unsigned int'),
),
),
doc='Get state.',
)),
('mj_setState',
FunctionDecl(
name='mj_setState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='state',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='spec',
type=ValueType(name='unsigned int'),
),
),
doc='Set state.',
)),
('mj_addContact',
FunctionDecl(
name='mj_addContact',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjContact', is_const=True),
),
),
),
doc='Add contact to d->contact list; return 0 if success; 1 if buffer full.', # pylint: disable=line-too-long
)),
('mj_isPyramidal',
FunctionDecl(
name='mj_isPyramidal',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Determine type of friction cone.',
)),
('mj_isSparse',
FunctionDecl(
name='mj_isSparse',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Determine type of constraint Jacobian.',
)),
('mj_isDual',
FunctionDecl(
name='mj_isDual',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Determine type of solver (PGS is dual, CG and Newton are primal).', # pylint: disable=line-too-long
)),
('mj_mulJacVec',
FunctionDecl(
name='mj_mulJacVec',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc='Multiply dense or sparse constraint Jacobian by vector.',
)),
('mj_mulJacTVec',
FunctionDecl(
name='mj_mulJacTVec',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc='Multiply dense or sparse constraint Jacobian transpose by vector.', # pylint: disable=line-too-long
)),
('mj_jac',
FunctionDecl(
name='mj_jac',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='jacp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='jacr',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='point',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
),
doc='Compute 3/6-by-nv end-effector Jacobian of global point attached to given body.', # pylint: disable=line-too-long
)),
('mj_jacBody',
FunctionDecl(
name='mj_jacBody',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='jacp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='jacr',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
),
doc='Compute body frame end-effector Jacobian.',
)),
('mj_jacBodyCom',
FunctionDecl(
name='mj_jacBodyCom',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='jacp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='jacr',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
),
doc='Compute body center-of-mass end-effector Jacobian.',
)),
('mj_jacSubtreeCom',
FunctionDecl(
name='mj_jacSubtreeCom',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='jacp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
),
doc='Compute subtree center-of-mass end-effector Jacobian.',
)),
('mj_jacGeom',
FunctionDecl(
name='mj_jacGeom',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='jacp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='jacr',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='geom',
type=ValueType(name='int'),
),
),
doc='Compute geom end-effector Jacobian.',
)),
('mj_jacSite',
FunctionDecl(
name='mj_jacSite',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='jacp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='jacr',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='site',
type=ValueType(name='int'),
),
),
doc='Compute site end-effector Jacobian.',
)),
('mj_jacPointAxis',
FunctionDecl(
name='mj_jacPointAxis',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='jacPoint',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='jacAxis',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='point',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='axis',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
),
doc='Compute translation end-effector Jacobian of point, and rotation Jacobian of axis.', # pylint: disable=line-too-long
)),
('mj_name2id',
FunctionDecl(
name='mj_name2id',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='type',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='name',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Get id of object with the specified mjtObj type and name, returns -1 if id not found.', # pylint: disable=line-too-long
)),
('mj_id2name',
FunctionDecl(
name='mj_id2name',
return_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='type',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='id',
type=ValueType(name='int'),
),
),
doc='Get name of object with the specified mjtObj type and id, returns NULL if name not found.', # pylint: disable=line-too-long
)),
('mj_fullM',
FunctionDecl(
name='mj_fullM',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='dst',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='M',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc='Convert sparse inertia matrix M into full (i.e. dense) matrix.',
)),
('mj_mulM',
FunctionDecl(
name='mj_mulM',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc='Multiply vector by inertia matrix.',
)),
('mj_mulM2',
FunctionDecl(
name='mj_mulM2',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc='Multiply vector by (inertia matrix)^(1/2).',
)),
('mj_addM',
FunctionDecl(
name='mj_addM',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='dst',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
FunctionParameterDecl(
name='rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
FunctionParameterDecl(
name='colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
),
doc='Add inertia matrix to destination matrix. Destination can be sparse uncompressed, or dense when all int* are NULL', # pylint: disable=line-too-long
)),
('mj_applyFT',
FunctionDecl(
name='mj_applyFT',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='force',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='torque',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='point',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='qfrc_target',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='Apply Cartesian force and torque (outside xfrc_applied mechanism).', # pylint: disable=line-too-long
)),
('mj_objectVelocity',
FunctionDecl(
name='mj_objectVelocity',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='objtype',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='objid',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(6,),
),
),
FunctionParameterDecl(
name='flg_local',
type=ValueType(name='int'),
),
),
doc='Compute object 6D velocity (rot:lin) in object-centered frame, world/local orientation.', # pylint: disable=line-too-long
)),
('mj_objectAcceleration',
FunctionDecl(
name='mj_objectAcceleration',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='objtype',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='objid',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(6,),
),
),
FunctionParameterDecl(
name='flg_local',
type=ValueType(name='int'),
),
),
doc='Compute object 6D acceleration (rot:lin) in object-centered frame, world/local orientation.', # pylint: disable=line-too-long
)),
('mj_contactForce',
FunctionDecl(
name='mj_contactForce',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='id',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='result',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(6,),
),
),
),
doc='Extract 6D force:torque given contact id, in the contact frame.',
)),
('mj_differentiatePos',
FunctionDecl(
name='mj_differentiatePos',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='qvel',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='dt',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='qpos1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='qpos2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc='Compute velocity by finite-differencing two positions.',
)),
('mj_integratePos',
FunctionDecl(
name='mj_integratePos',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='qpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='qvel',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='dt',
type=ValueType(name='mjtNum'),
),
),
doc='Integrate position with given velocity.',
)),
('mj_normalizeQuat',
FunctionDecl(
name='mj_normalizeQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='qpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='Normalize all quaternions in qpos-type vector.',
)),
('mj_local2Global',
FunctionDecl(
name='mj_local2Global',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='xpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='xmat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
FunctionParameterDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='body',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='sameframe',
type=ValueType(name='mjtByte'),
),
),
doc='Map from body local to global Cartesian coordinates.',
)),
('mj_getTotalmass',
FunctionDecl(
name='mj_getTotalmass',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
),
doc='Sum all body masses.',
)),
('mj_setTotalmass',
FunctionDecl(
name='mj_setTotalmass',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel'),
),
),
FunctionParameterDecl(
name='newmass',
type=ValueType(name='mjtNum'),
),
),
doc='Scale body masses and inertias to achieve specified total mass.',
)),
('mj_getPluginConfig',
FunctionDecl(
name='mj_getPluginConfig',
return_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='plugin_id',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='attrib',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Return a config attribute value of a plugin instance; NULL: invalid plugin instance ID or attribute name', # pylint: disable=line-too-long
)),
('mj_loadPluginLibrary',
FunctionDecl(
name='mj_loadPluginLibrary',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='path',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Load a dynamic library. The dynamic library is assumed to register one or more plugins.', # pylint: disable=line-too-long
)),
('mj_loadAllPluginLibraries',
FunctionDecl(
name='mj_loadAllPluginLibraries',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='directory',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='callback',
type=ValueType(name='mjfPluginLibraryLoadCallback'),
),
),
doc='Scan a directory and load all dynamic libraries. Dynamic libraries in the specified directory are assumed to register one or more plugins. Optionally, if a callback is specified, it is called for each dynamic library encountered that registers plugins.', # pylint: disable=line-too-long
)),
('mj_version',
FunctionDecl(
name='mj_version',
return_type=ValueType(name='int'),
parameters=(),
doc='Return version number: 1.0.2 is encoded as 102.',
)),
('mj_versionString',
FunctionDecl(
name='mj_versionString',
return_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
parameters=(),
doc='Return the current version of MuJoCo as a null-terminated string.', # pylint: disable=line-too-long
)),
('mj_multiRay',
FunctionDecl(
name='mj_multiRay',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='pnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='geomgroup',
type=PointerType(
inner_type=ValueType(name='mjtByte', is_const=True),
),
),
FunctionParameterDecl(
name='flg_static',
type=ValueType(name='mjtByte'),
),
FunctionParameterDecl(
name='bodyexclude',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='geomid',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
FunctionParameterDecl(
name='dist',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='nray',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='cutoff',
type=ValueType(name='mjtNum'),
),
),
doc='Intersect multiple rays emanating from a single point. Similar semantics to mj_ray, but vec is an array of (nray x 3) directions.', # pylint: disable=line-too-long
)),
('mj_ray',
FunctionDecl(
name='mj_ray',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='pnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='geomgroup',
type=PointerType(
inner_type=ValueType(name='mjtByte', is_const=True),
),
),
FunctionParameterDecl(
name='flg_static',
type=ValueType(name='mjtByte'),
),
FunctionParameterDecl(
name='bodyexclude',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='geomid',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(1,),
),
),
),
doc='Intersect ray (pnt+x*vec, x>=0) with visible geoms, except geoms in bodyexclude. Return distance (x) to nearest surface, or -1 if no intersection and output geomid. geomgroup, flg_static are as in mjvOption; geomgroup==NULL skips group exclusion.', # pylint: disable=line-too-long
)),
('mj_rayHfield',
FunctionDecl(
name='mj_rayHfield',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='geomid',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='pnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Intersect ray with hfield, return nearest distance or -1 if no intersection.', # pylint: disable=line-too-long
)),
('mj_rayMesh',
FunctionDecl(
name='mj_rayMesh',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='geomid',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='pnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Intersect ray with mesh, return nearest distance or -1 if no intersection.', # pylint: disable=line-too-long
)),
('mju_rayGeom',
FunctionDecl(
name='mju_rayGeom',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
FunctionParameterDecl(
name='size',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='pnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='geomtype',
type=ValueType(name='int'),
),
),
doc='Intersect ray with pure geom, return nearest distance or -1 if no intersection.', # pylint: disable=line-too-long
)),
('mju_raySkin',
FunctionDecl(
name='mju_raySkin',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='nface',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nvert',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='face',
type=PointerType(
inner_type=ValueType(name='int', is_const=True),
),
),
FunctionParameterDecl(
name='vert',
type=PointerType(
inner_type=ValueType(name='float', is_const=True),
),
),
FunctionParameterDecl(
name='pnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vertid',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(1,),
),
),
),
doc='Intersect ray with skin, return nearest distance or -1 if no intersection, and also output nearest vertex id.', # pylint: disable=line-too-long
)),
('mjv_defaultCamera',
FunctionDecl(
name='mjv_defaultCamera',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
),
doc='Set default camera.',
)),
('mjv_defaultFreeCamera',
FunctionDecl(
name='mjv_defaultFreeCamera',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
),
doc='Set default free camera.',
)),
('mjv_defaultPerturb',
FunctionDecl(
name='mjv_defaultPerturb',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb'),
),
),
),
doc='Set default perturbation.',
)),
('mjv_room2model',
FunctionDecl(
name='mjv_room2model',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='modelpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='modelquat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='roompos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='roomquat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
),
doc='Transform pose from room to model space.',
)),
('mjv_model2room',
FunctionDecl(
name='mjv_model2room',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='roompos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='roomquat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='modelpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='modelquat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
),
doc='Transform pose from model to room space.',
)),
('mjv_cameraInModel',
FunctionDecl(
name='mjv_cameraInModel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='headpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='forward',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='up',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
),
doc='Get camera info in model space; average left and right OpenGL cameras.', # pylint: disable=line-too-long
)),
('mjv_cameraInRoom',
FunctionDecl(
name='mjv_cameraInRoom',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='headpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='forward',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='up',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
),
doc='Get camera info in room space; average left and right OpenGL cameras.', # pylint: disable=line-too-long
)),
('mjv_frustumHeight',
FunctionDecl(
name='mjv_frustumHeight',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
),
doc='Get frustum height at unit distance from camera; average left and right OpenGL cameras.', # pylint: disable=line-too-long
)),
('mjv_alignToCamera',
FunctionDecl(
name='mjv_alignToCamera',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='forward',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Rotate 3D vec in horizontal plane by angle between (0,1) and (forward_x,forward_y).', # pylint: disable=line-too-long
)),
('mjv_moveCamera',
FunctionDecl(
name='mjv_moveCamera',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='action',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='reldx',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='reldy',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
),
doc='Move camera with mouse; action is mjtMouse.',
)),
('mjv_moveCameraFromState',
FunctionDecl(
name='mjv_moveCameraFromState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState', is_const=True),
),
),
FunctionParameterDecl(
name='action',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='reldx',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='reldy',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
),
doc='Move camera with mouse given a scene state; action is mjtMouse.',
)),
('mjv_movePerturb',
FunctionDecl(
name='mjv_movePerturb',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='action',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='reldx',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='reldy',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb'),
),
),
),
doc='Move perturb object with mouse; action is mjtMouse.',
)),
('mjv_movePerturbFromState',
FunctionDecl(
name='mjv_movePerturbFromState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState', is_const=True),
),
),
FunctionParameterDecl(
name='action',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='reldx',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='reldy',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb'),
),
),
),
doc='Move perturb object with mouse given a scene state; action is mjtMouse.', # pylint: disable=line-too-long
)),
('mjv_moveModel',
FunctionDecl(
name='mjv_moveModel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='action',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='reldx',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='reldy',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='roomup',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Move model with mouse; action is mjtMouse.',
)),
('mjv_initPerturb',
FunctionDecl(
name='mjv_initPerturb',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb'),
),
),
),
doc='Copy perturb pos,quat from selected body; set scale for perturbation.', # pylint: disable=line-too-long
)),
('mjv_applyPerturbPose',
FunctionDecl(
name='mjv_applyPerturbPose',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb', is_const=True),
),
),
FunctionParameterDecl(
name='flg_paused',
type=ValueType(name='int'),
),
),
doc='Set perturb pos,quat in d->mocap when selected body is mocap, and in d->qpos otherwise. Write d->qpos only if flg_paused and subtree root for selected body has free joint.', # pylint: disable=line-too-long
)),
('mjv_applyPerturbForce',
FunctionDecl(
name='mjv_applyPerturbForce',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb', is_const=True),
),
),
),
doc='Set perturb force,torque in d->xfrc_applied, if selected body is dynamic.', # pylint: disable=line-too-long
)),
('mjv_averageCamera',
FunctionDecl(
name='mjv_averageCamera',
return_type=ValueType(name='mjvGLCamera'),
parameters=(
FunctionParameterDecl(
name='cam1',
type=PointerType(
inner_type=ValueType(name='mjvGLCamera', is_const=True),
),
),
FunctionParameterDecl(
name='cam2',
type=PointerType(
inner_type=ValueType(name='mjvGLCamera', is_const=True),
),
),
),
doc='Return the average of two OpenGL cameras.',
)),
('mjv_select',
FunctionDecl(
name='mjv_select',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='vopt',
type=PointerType(
inner_type=ValueType(name='mjvOption', is_const=True),
),
),
FunctionParameterDecl(
name='aspectratio',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='relx',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='rely',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene', is_const=True),
),
),
FunctionParameterDecl(
name='selpnt',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='geomid',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(1,),
),
),
FunctionParameterDecl(
name='skinid',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(1,),
),
),
),
doc='Select geom or skin with mouse, return bodyid; -1: none selected.', # pylint: disable=line-too-long
)),
('mjv_defaultOption',
FunctionDecl(
name='mjv_defaultOption',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjvOption'),
),
),
),
doc='Set default visualization options.',
)),
('mjv_defaultFigure',
FunctionDecl(
name='mjv_defaultFigure',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='fig',
type=PointerType(
inner_type=ValueType(name='mjvFigure'),
),
),
),
doc='Set default figure.',
)),
('mjv_initGeom',
FunctionDecl(
name='mjv_initGeom',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='geom',
type=PointerType(
inner_type=ValueType(name='mjvGeom'),
),
),
FunctionParameterDecl(
name='type',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='size',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
FunctionParameterDecl(
name='rgba',
type=ArrayType(
inner_type=ValueType(name='float', is_const=True),
extents=(4,),
),
),
),
doc='Initialize given geom fields when not NULL, set the rest to their default values.', # pylint: disable=line-too-long
)),
('mjv_makeConnector',
FunctionDecl(
name='mjv_makeConnector',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='geom',
type=PointerType(
inner_type=ValueType(name='mjvGeom'),
),
),
FunctionParameterDecl(
name='type',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='width',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='a0',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='a1',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='a2',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='b0',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='b1',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='b2',
type=ValueType(name='mjtNum'),
),
),
doc='Set (type, size, pos, mat) for connector-type geom between given points. Assume that mjv_initGeom was already called to set all other properties. Width of mjGEOM_LINE is denominated in pixels. Deprecated: use mjv_connector.', # pylint: disable=line-too-long
)),
('mjv_connector',
FunctionDecl(
name='mjv_connector',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='geom',
type=PointerType(
inner_type=ValueType(name='mjvGeom'),
),
),
FunctionParameterDecl(
name='type',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='width',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='from',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='to',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Set (type, size, pos, mat) for connector-type geom between given points. Assume that mjv_initGeom was already called to set all other properties. Width of mjGEOM_LINE is denominated in pixels.', # pylint: disable=line-too-long
)),
('mjv_defaultScene',
FunctionDecl(
name='mjv_defaultScene',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Set default abstract scene.',
)),
('mjv_makeScene',
FunctionDecl(
name='mjv_makeScene',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
FunctionParameterDecl(
name='maxgeom',
type=ValueType(name='int'),
),
),
doc='Allocate resources in abstract scene.',
)),
('mjv_freeScene',
FunctionDecl(
name='mjv_freeScene',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Free abstract scene.',
)),
('mjv_updateScene',
FunctionDecl(
name='mjv_updateScene',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjvOption', is_const=True),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb', is_const=True),
),
),
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
FunctionParameterDecl(
name='catmask',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Update entire scene given model state.',
)),
('mjv_updateSceneFromState',
FunctionDecl(
name='mjv_updateSceneFromState',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState', is_const=True),
),
),
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjvOption', is_const=True),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb', is_const=True),
),
),
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
FunctionParameterDecl(
name='catmask',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Update entire scene from a scene state, return the number of new mjWARN_VGEOMFULL warnings.', # pylint: disable=line-too-long
)),
('mjv_defaultSceneState',
FunctionDecl(
name='mjv_defaultSceneState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState'),
),
),
),
doc='Set default scene state.',
)),
('mjv_makeSceneState',
FunctionDecl(
name='mjv_makeSceneState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData', is_const=True),
),
),
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState'),
),
),
FunctionParameterDecl(
name='maxgeom',
type=ValueType(name='int'),
),
),
doc='Allocate resources and initialize a scene state object.',
)),
('mjv_freeSceneState',
FunctionDecl(
name='mjv_freeSceneState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState'),
),
),
),
doc='Free scene state.',
)),
('mjv_updateSceneState',
FunctionDecl(
name='mjv_updateSceneState',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjvOption', is_const=True),
),
),
FunctionParameterDecl(
name='scnstate',
type=PointerType(
inner_type=ValueType(name='mjvSceneState'),
),
),
),
doc='Update a scene state from model and data.',
)),
('mjv_addGeoms',
FunctionDecl(
name='mjv_addGeoms',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='opt',
type=PointerType(
inner_type=ValueType(name='mjvOption', is_const=True),
),
),
FunctionParameterDecl(
name='pert',
type=PointerType(
inner_type=ValueType(name='mjvPerturb', is_const=True),
),
),
FunctionParameterDecl(
name='catmask',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Add geoms from selected categories.',
)),
('mjv_makeLights',
FunctionDecl(
name='mjv_makeLights',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Make list of lights.',
)),
('mjv_updateCamera',
FunctionDecl(
name='mjv_updateCamera',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='cam',
type=PointerType(
inner_type=ValueType(name='mjvCamera'),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Update camera.',
)),
('mjv_updateSkin',
FunctionDecl(
name='mjv_updateSkin',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
),
doc='Update skins.',
)),
('mjr_defaultContext',
FunctionDecl(
name='mjr_defaultContext',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
),
doc='Set default mjrContext.',
)),
('mjr_makeContext',
FunctionDecl(
name='mjr_makeContext',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
FunctionParameterDecl(
name='fontscale',
type=ValueType(name='int'),
),
),
doc='Allocate resources in custom OpenGL context; fontscale is mjtFontScale.', # pylint: disable=line-too-long
)),
('mjr_changeFont',
FunctionDecl(
name='mjr_changeFont',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='fontscale',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
),
doc='Change font of existing context.',
)),
('mjr_addAux',
FunctionDecl(
name='mjr_addAux',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='index',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='width',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='height',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='samples',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
),
doc='Add Aux buffer with given index to context; free previous Aux buffer.', # pylint: disable=line-too-long
)),
('mjr_freeContext',
FunctionDecl(
name='mjr_freeContext',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
),
doc='Free resources in custom OpenGL context, set to default.',
)),
('mjr_resizeOffscreen',
FunctionDecl(
name='mjr_resizeOffscreen',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='width',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='height',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
),
doc='Resize offscreen buffers.',
)),
('mjr_uploadTexture',
FunctionDecl(
name='mjr_uploadTexture',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
FunctionParameterDecl(
name='texid',
type=ValueType(name='int'),
),
),
doc='Upload texture to GPU, overwriting previous upload if any.',
)),
('mjr_uploadMesh',
FunctionDecl(
name='mjr_uploadMesh',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
FunctionParameterDecl(
name='meshid',
type=ValueType(name='int'),
),
),
doc='Upload mesh to GPU, overwriting previous upload if any.',
)),
('mjr_uploadHField',
FunctionDecl(
name='mjr_uploadHField',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
FunctionParameterDecl(
name='hfieldid',
type=ValueType(name='int'),
),
),
doc='Upload height field to GPU, overwriting previous upload if any.',
)),
('mjr_restoreBuffer',
FunctionDecl(
name='mjr_restoreBuffer',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Make con->currentBuffer current again.',
)),
('mjr_setBuffer',
FunctionDecl(
name='mjr_setBuffer',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='framebuffer',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext'),
),
),
),
doc='Set OpenGL framebuffer for rendering: mjFB_WINDOW or mjFB_OFFSCREEN. If only one buffer is available, set that buffer and ignore framebuffer argument.', # pylint: disable=line-too-long
)),
('mjr_readPixels',
FunctionDecl(
name='mjr_readPixels',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='rgb',
type=PointerType(
inner_type=ValueType(name='unsigned char'),
),
),
FunctionParameterDecl(
name='depth',
type=PointerType(
inner_type=ValueType(name='float'),
),
),
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Read pixels from current OpenGL framebuffer to client buffer. Viewport is in OpenGL framebuffer; client buffer starts at (0,0).', # pylint: disable=line-too-long
)),
('mjr_drawPixels',
FunctionDecl(
name='mjr_drawPixels',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='rgb',
type=PointerType(
inner_type=ValueType(name='unsigned char', is_const=True),
),
),
FunctionParameterDecl(
name='depth',
type=PointerType(
inner_type=ValueType(name='float', is_const=True),
),
),
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Draw pixels from client buffer to current OpenGL framebuffer. Viewport is in OpenGL framebuffer; client buffer starts at (0,0).', # pylint: disable=line-too-long
)),
('mjr_blitBuffer',
FunctionDecl(
name='mjr_blitBuffer',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='src',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='dst',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='flg_color',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='flg_depth',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Blit from src viewpoint in current framebuffer to dst viewport in other framebuffer. If src, dst have different size and flg_depth==0, color is interpolated with GL_LINEAR.', # pylint: disable=line-too-long
)),
('mjr_setAux',
FunctionDecl(
name='mjr_setAux',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='index',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Set Aux buffer for custom OpenGL rendering (call restoreBuffer when done).', # pylint: disable=line-too-long
)),
('mjr_blitAux',
FunctionDecl(
name='mjr_blitAux',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='index',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='src',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='left',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='bottom',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Blit from Aux buffer to con->currentBuffer.',
)),
('mjr_text',
FunctionDecl(
name='mjr_text',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='font',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='txt',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
FunctionParameterDecl(
name='x',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='y',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='r',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='g',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='b',
type=ValueType(name='float'),
),
),
doc='Draw text at (x,y) in relative coordinates; font is mjtFont.',
)),
('mjr_overlay',
FunctionDecl(
name='mjr_overlay',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='font',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='gridpos',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='overlay',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='overlay2',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Draw text overlay; font is mjtFont; gridpos is mjtGridPos.',
)),
('mjr_maxViewport',
FunctionDecl(
name='mjr_maxViewport',
return_type=ValueType(name='mjrRect'),
parameters=(
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Get maximum viewport for active buffer.',
)),
('mjr_rectangle',
FunctionDecl(
name='mjr_rectangle',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='r',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='g',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='b',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='a',
type=ValueType(name='float'),
),
),
doc='Draw rectangle.',
)),
('mjr_label',
FunctionDecl(
name='mjr_label',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='font',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='txt',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='r',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='g',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='b',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='a',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='rt',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='gt',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='bt',
type=ValueType(name='float'),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Draw rectangle with centered text.',
)),
('mjr_figure',
FunctionDecl(
name='mjr_figure',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='fig',
type=PointerType(
inner_type=ValueType(name='mjvFigure'),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Draw 2D figure.',
)),
('mjr_render',
FunctionDecl(
name='mjr_render',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='viewport',
type=ValueType(name='mjrRect'),
),
FunctionParameterDecl(
name='scn',
type=PointerType(
inner_type=ValueType(name='mjvScene'),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Render 3D scene.',
)),
('mjr_finish',
FunctionDecl(
name='mjr_finish',
return_type=ValueType(name='void'),
parameters=(),
doc='Call glFinish.',
)),
('mjr_getError',
FunctionDecl(
name='mjr_getError',
return_type=ValueType(name='int'),
parameters=(),
doc='Call glGetError and return result.',
)),
('mjr_findRect',
FunctionDecl(
name='mjr_findRect',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='x',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='y',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nrect',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='rect',
type=PointerType(
inner_type=ValueType(name='mjrRect', is_const=True),
),
),
),
doc='Find first rectangle containing mouse, -1: not found.',
)),
('mjui_themeSpacing',
FunctionDecl(
name='mjui_themeSpacing',
return_type=ValueType(name='mjuiThemeSpacing'),
parameters=(
FunctionParameterDecl(
name='ind',
type=ValueType(name='int'),
),
),
doc='Get builtin UI theme spacing (ind: 0-1).',
)),
('mjui_themeColor',
FunctionDecl(
name='mjui_themeColor',
return_type=ValueType(name='mjuiThemeColor'),
parameters=(
FunctionParameterDecl(
name='ind',
type=ValueType(name='int'),
),
),
doc='Get builtin UI theme color (ind: 0-3).',
)),
('mjui_add',
FunctionDecl(
name='mjui_add',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='ui',
type=PointerType(
inner_type=ValueType(name='mjUI'),
),
),
FunctionParameterDecl(
name='def',
type=PointerType(
inner_type=ValueType(name='mjuiDef', is_const=True),
),
),
),
doc='Add definitions to UI.',
)),
('mjui_addToSection',
FunctionDecl(
name='mjui_addToSection',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='ui',
type=PointerType(
inner_type=ValueType(name='mjUI'),
),
),
FunctionParameterDecl(
name='sect',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='def',
type=PointerType(
inner_type=ValueType(name='mjuiDef', is_const=True),
),
),
),
doc='Add definitions to UI section.',
)),
('mjui_resize',
FunctionDecl(
name='mjui_resize',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='ui',
type=PointerType(
inner_type=ValueType(name='mjUI'),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Compute UI sizes.',
)),
('mjui_update',
FunctionDecl(
name='mjui_update',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='section',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='item',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ui',
type=PointerType(
inner_type=ValueType(name='mjUI', is_const=True),
),
),
FunctionParameterDecl(
name='state',
type=PointerType(
inner_type=ValueType(name='mjuiState', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Update specific section/item; -1: update all.',
)),
('mjui_event',
FunctionDecl(
name='mjui_event',
return_type=PointerType(
inner_type=ValueType(name='mjuiItem'),
),
parameters=(
FunctionParameterDecl(
name='ui',
type=PointerType(
inner_type=ValueType(name='mjUI'),
),
),
FunctionParameterDecl(
name='state',
type=PointerType(
inner_type=ValueType(name='mjuiState'),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Handle UI event, return pointer to changed item, NULL if no change.', # pylint: disable=line-too-long
)),
('mjui_render',
FunctionDecl(
name='mjui_render',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='ui',
type=PointerType(
inner_type=ValueType(name='mjUI'),
),
),
FunctionParameterDecl(
name='state',
type=PointerType(
inner_type=ValueType(name='mjuiState', is_const=True),
),
),
FunctionParameterDecl(
name='con',
type=PointerType(
inner_type=ValueType(name='mjrContext', is_const=True),
),
),
),
doc='Copy UI image to current buffer.',
)),
('mju_error',
FunctionDecl(
name='mju_error',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Main error function; does not return to caller.',
)),
('mju_error_i',
FunctionDecl(
name='mju_error_i',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='i',
type=ValueType(name='int'),
),
),
doc='Deprecated: use mju_error.',
)),
('mju_error_s',
FunctionDecl(
name='mju_error_s',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='text',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Deprecated: use mju_error.',
)),
('mju_warning',
FunctionDecl(
name='mju_warning',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Main warning function; returns to caller.',
)),
('mju_warning_i',
FunctionDecl(
name='mju_warning_i',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='i',
type=ValueType(name='int'),
),
),
doc='Deprecated: use mju_warning.',
)),
('mju_warning_s',
FunctionDecl(
name='mju_warning_s',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='text',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Deprecated: use mju_warning.',
)),
('mju_clearHandlers',
FunctionDecl(
name='mju_clearHandlers',
return_type=ValueType(name='void'),
parameters=(),
doc='Clear user error and memory handlers.',
)),
('mju_malloc',
FunctionDecl(
name='mju_malloc',
return_type=PointerType(
inner_type=ValueType(name='void'),
),
parameters=(
FunctionParameterDecl(
name='size',
type=ValueType(name='size_t'),
),
),
doc='Allocate memory; byte-align on 64; pad size to multiple of 64.',
)),
('mju_free',
FunctionDecl(
name='mju_free',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='ptr',
type=PointerType(
inner_type=ValueType(name='void'),
),
),
),
doc='Free memory, using free() by default.',
)),
('mj_warning',
FunctionDecl(
name='mj_warning',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='warning',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='info',
type=ValueType(name='int'),
),
),
doc='High-level warning function: count warnings in mjData, print only the first.', # pylint: disable=line-too-long
)),
('mju_writeLog',
FunctionDecl(
name='mju_writeLog',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='type',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='msg',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Write [datetime, type: message] to MUJOCO_LOG.TXT.',
)),
('mju_zero3',
FunctionDecl(
name='mju_zero3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
),
doc='Set res = 0.',
)),
('mju_copy3',
FunctionDecl(
name='mju_copy3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='data',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Set res = vec.',
)),
('mju_scl3',
FunctionDecl(
name='mju_scl3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='scl',
type=ValueType(name='mjtNum'),
),
),
doc='Set res = vec*scl.',
)),
('mju_add3',
FunctionDecl(
name='mju_add3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Set res = vec1 + vec2.',
)),
('mju_sub3',
FunctionDecl(
name='mju_sub3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Set res = vec1 - vec2.',
)),
('mju_addTo3',
FunctionDecl(
name='mju_addTo3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Set res = res + vec.',
)),
('mju_subFrom3',
FunctionDecl(
name='mju_subFrom3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Set res = res - vec.',
)),
('mju_addToScl3',
FunctionDecl(
name='mju_addToScl3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='scl',
type=ValueType(name='mjtNum'),
),
),
doc='Set res = res + vec*scl.',
)),
('mju_addScl3',
FunctionDecl(
name='mju_addScl3',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='scl',
type=ValueType(name='mjtNum'),
),
),
doc='Set res = vec1 + vec2*scl.',
)),
('mju_normalize3',
FunctionDecl(
name='mju_normalize3',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
),
doc='Normalize vector, return length before normalization.',
)),
('mju_norm3',
FunctionDecl(
name='mju_norm3',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Return vector length (without normalizing the vector).',
)),
('mju_dot3',
FunctionDecl(
name='mju_dot3',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Return dot-product of vec1 and vec2.',
)),
('mju_dist3',
FunctionDecl(
name='mju_dist3',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='pos1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='pos2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Return Cartesian distance between 3D vectors pos1 and pos2.',
)),
('mju_rotVecMat',
FunctionDecl(
name='mju_rotVecMat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc='Multiply vector by 3D rotation matrix: res = mat * vec.',
)),
('mju_rotVecMatT',
FunctionDecl(
name='mju_rotVecMatT',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc="Multiply vector by transposed 3D rotation matrix: res = mat' * vec.", # pylint: disable=line-too-long
)),
('mju_cross',
FunctionDecl(
name='mju_cross',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='a',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='b',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Compute cross-product: res = cross(a, b).',
)),
('mju_zero4',
FunctionDecl(
name='mju_zero4',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
),
doc='Set res = 0.',
)),
('mju_unit4',
FunctionDecl(
name='mju_unit4',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
),
doc='Set res = (1,0,0,0).',
)),
('mju_copy4',
FunctionDecl(
name='mju_copy4',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='data',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Set res = vec.',
)),
('mju_normalize4',
FunctionDecl(
name='mju_normalize4',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
),
doc='Normalize vector, return length before normalization.',
)),
('mju_zero',
FunctionDecl(
name='mju_zero',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = 0.',
)),
('mju_fill',
FunctionDecl(
name='mju_fill',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='val',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = val.',
)),
('mju_copy',
FunctionDecl(
name='mju_copy',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = vec.',
)),
('mju_sum',
FunctionDecl(
name='mju_sum',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Return sum(vec).',
)),
('mju_L1',
FunctionDecl(
name='mju_L1',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Return L1 norm: sum(abs(vec)).',
)),
('mju_scl',
FunctionDecl(
name='mju_scl',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='scl',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = vec*scl.',
)),
('mju_add',
FunctionDecl(
name='mju_add',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = vec1 + vec2.',
)),
('mju_sub',
FunctionDecl(
name='mju_sub',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = vec1 - vec2.',
)),
('mju_addTo',
FunctionDecl(
name='mju_addTo',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = res + vec.',
)),
('mju_subFrom',
FunctionDecl(
name='mju_subFrom',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = res - vec.',
)),
('mju_addToScl',
FunctionDecl(
name='mju_addToScl',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='scl',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = res + vec*scl.',
)),
('mju_addScl',
FunctionDecl(
name='mju_addScl',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='scl',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set res = vec1 + vec2*scl.',
)),
('mju_normalize',
FunctionDecl(
name='mju_normalize',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Normalize vector, return length before normalization.',
)),
('mju_norm',
FunctionDecl(
name='mju_norm',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Return vector length (without normalizing vector).',
)),
('mju_dot',
FunctionDecl(
name='mju_dot',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Return dot-product of vec1 and vec2.',
)),
('mju_mulMatVec',
FunctionDecl(
name='mju_mulMatVec',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='nr',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nc',
type=ValueType(name='int'),
),
),
doc='Multiply matrix and vector: res = mat * vec.',
)),
('mju_mulMatTVec',
FunctionDecl(
name='mju_mulMatTVec',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='nr',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nc',
type=ValueType(name='int'),
),
),
doc="Multiply transposed matrix and vector: res = mat' * vec.",
)),
('mju_mulVecMatVec',
FunctionDecl(
name='mju_mulVecMatVec',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='vec1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc="Multiply square matrix with vectors on both sides: returns vec1' * mat * vec2.", # pylint: disable=line-too-long
)),
('mju_transpose',
FunctionDecl(
name='mju_transpose',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='nr',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nc',
type=ValueType(name='int'),
),
),
doc="Transpose matrix: res = mat'.",
)),
('mju_symmetrize',
FunctionDecl(
name='mju_symmetrize',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc="Symmetrize square matrix res = (mat + mat')/2.",
)),
('mju_eye',
FunctionDecl(
name='mju_eye',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Set mat to the identity matrix.',
)),
('mju_mulMatMat',
FunctionDecl(
name='mju_mulMatMat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='mat2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='r1',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='c1',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='c2',
type=ValueType(name='int'),
),
),
doc='Multiply matrices: res = mat1 * mat2.',
)),
('mju_mulMatMatT',
FunctionDecl(
name='mju_mulMatMatT',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='mat2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='r1',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='c1',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='r2',
type=ValueType(name='int'),
),
),
doc="Multiply matrices, second argument transposed: res = mat1 * mat2'.", # pylint: disable=line-too-long
)),
('mju_mulMatTMat',
FunctionDecl(
name='mju_mulMatTMat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat1',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='mat2',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='r1',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='c1',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='c2',
type=ValueType(name='int'),
),
),
doc="Multiply matrices, first argument transposed: res = mat1' * mat2.", # pylint: disable=line-too-long
)),
('mju_sqrMatTD',
FunctionDecl(
name='mju_sqrMatTD',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='diag',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='nr',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nc',
type=ValueType(name='int'),
),
),
doc="Set res = mat' * diag * mat if diag is not NULL, and res = mat' * mat otherwise.", # pylint: disable=line-too-long
)),
('mju_transformSpatial',
FunctionDecl(
name='mju_transformSpatial',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(6,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(6,),
),
),
FunctionParameterDecl(
name='flg_force',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='newpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='oldpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='rotnew2old',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc='Coordinate transform of 6D motion or force vector in rotation:translation format. rotnew2old is 3-by-3, NULL means no rotation; flg_force specifies force or motion type.', # pylint: disable=line-too-long
)),
('mju_rotVecQuat',
FunctionDecl(
name='mju_rotVecQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Rotate vector by quaternion.',
)),
('mju_negQuat',
FunctionDecl(
name='mju_negQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Conjugate quaternion, corresponding to opposite rotation.',
)),
('mju_mulQuat',
FunctionDecl(
name='mju_mulQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='quat1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='quat2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Multiply quaternions.',
)),
('mju_mulQuatAxis',
FunctionDecl(
name='mju_mulQuatAxis',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='axis',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Multiply quaternion and axis.',
)),
('mju_axisAngle2Quat',
FunctionDecl(
name='mju_axisAngle2Quat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='axis',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='angle',
type=ValueType(name='mjtNum'),
),
),
doc='Convert axisAngle to quaternion.',
)),
('mju_quat2Vel',
FunctionDecl(
name='mju_quat2Vel',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='dt',
type=ValueType(name='mjtNum'),
),
),
doc='Convert quaternion (corresponding to orientation difference) to 3D velocity.', # pylint: disable=line-too-long
)),
('mju_subQuat',
FunctionDecl(
name='mju_subQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='qa',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='qb',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Subtract quaternions, express as 3D velocity: qb*quat(res) = qa.',
)),
('mju_quat2Mat',
FunctionDecl(
name='mju_quat2Mat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Convert quaternion to 3D rotation matrix.',
)),
('mju_mat2Quat',
FunctionDecl(
name='mju_mat2Quat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc='Convert 3D rotation matrix to quaternion.',
)),
('mju_derivQuat',
FunctionDecl(
name='mju_derivQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='vel',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Compute time-derivative of quaternion, given 3D rotational velocity.', # pylint: disable=line-too-long
)),
('mju_quatIntegrate',
FunctionDecl(
name='mju_quatIntegrate',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='vel',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='scale',
type=ValueType(name='mjtNum'),
),
),
doc='Integrate quaternion given 3D angular velocity.',
)),
('mju_quatZ2Vec',
FunctionDecl(
name='mju_quatZ2Vec',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Construct quaternion performing rotation from z-axis to given vector.', # pylint: disable=line-too-long
)),
('mju_mulPose',
FunctionDecl(
name='mju_mulPose',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='posres',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='quatres',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='pos1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat1',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='pos2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat2',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Multiply two poses.',
)),
('mju_negPose',
FunctionDecl(
name='mju_negPose',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='posres',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='quatres',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
),
doc='Conjugate pose, corresponding to the opposite spatial transformation.', # pylint: disable=line-too-long
)),
('mju_trnVecPose',
FunctionDecl(
name='mju_trnVecPose',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='vec',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Transform vector by pose.',
)),
('mju_cholFactor',
FunctionDecl(
name='mju_cholFactor',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='mindiag',
type=ValueType(name='mjtNum'),
),
),
doc="Cholesky decomposition: mat = L*L'; return rank, decomposition performed in-place into mat.", # pylint: disable=line-too-long
)),
('mju_cholSolve',
FunctionDecl(
name='mju_cholSolve',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc="Solve (mat*mat') * res = vec, where mat is a Cholesky factor.",
)),
('mju_cholUpdate',
FunctionDecl(
name='mju_cholUpdate',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='x',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='flg_plus',
type=ValueType(name='int'),
),
),
doc="Cholesky rank-one update: L*L' +/- x*x'; return rank.",
)),
('mju_cholFactorBand',
FunctionDecl(
name='mju_cholFactorBand',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='ntotal',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nband',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ndense',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='diagadd',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='diagmul',
type=ValueType(name='mjtNum'),
),
),
doc='Band-dense Cholesky decomposition. Returns minimum value in the factorized diagonal, or 0 if rank-deficient. mat has (ntotal-ndense) x nband + ndense x ntotal elements. The first (ntotal-ndense) x nband store the band part, left of diagonal, inclusive. The second ndense x ntotal store the band part as entire dense rows. Add diagadd+diagmul*mat_ii to diagonal before factorization.', # pylint: disable=line-too-long
)),
('mju_cholSolveBand',
FunctionDecl(
name='mju_cholSolveBand',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='ntotal',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nband',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ndense',
type=ValueType(name='int'),
),
),
doc="Solve (mat*mat')*res = vec where mat is a band-dense Cholesky factor.", # pylint: disable=line-too-long
)),
('mju_band2Dense',
FunctionDecl(
name='mju_band2Dense',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='ntotal',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nband',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ndense',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='flg_sym',
type=ValueType(name='mjtByte'),
),
),
doc='Convert banded matrix to dense matrix, fill upper triangle if flg_sym>0.', # pylint: disable=line-too-long
)),
('mju_dense2Band',
FunctionDecl(
name='mju_dense2Band',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='ntotal',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nband',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ndense',
type=ValueType(name='int'),
),
),
doc='Convert dense matrix to banded matrix.',
)),
('mju_bandMulMatVec',
FunctionDecl(
name='mju_bandMulMatVec',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='mat',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='ntotal',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nband',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ndense',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nvec',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='flg_sym',
type=ValueType(name='mjtByte'),
),
),
doc='Multiply band-diagonal matrix with nvec vectors, include upper triangle if flg_sym>0.', # pylint: disable=line-too-long
)),
('mju_bandDiag',
FunctionDecl(
name='mju_bandDiag',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='i',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ntotal',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='nband',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='ndense',
type=ValueType(name='int'),
),
),
doc='Address of diagonal element i in band-dense matrix representation.', # pylint: disable=line-too-long
)),
('mju_eig3',
FunctionDecl(
name='mju_eig3',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='eigval',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
FunctionParameterDecl(
name='eigvec',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
FunctionParameterDecl(
name='quat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
),
FunctionParameterDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc='Eigenvalue decomposition of symmetric 3x3 matrix.',
)),
('mju_boxQP',
FunctionDecl(
name='mju_boxQP',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='R',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='index',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
FunctionParameterDecl(
name='H',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='g',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='lower',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='upper',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
),
doc="minimize 0.5*x'*H*x + x'*g s.t. lower <= x <= upper, return rank or -1 if failed inputs: n - problem dimension H - SPD matrix n*n g - bias vector n lower - lower bounds n upper - upper bounds n res - solution warmstart n return value: nfree <= n - rank of unconstrained subspace, -1 if failure outputs (required): res - solution n R - subspace Cholesky factor nfree*nfree allocated: n*(n+7) outputs (optional): index - set of free dimensions nfree allocated: n notes: the initial value of res is used to warmstart the solver R must have allocatd size n*(n+7), but only nfree*nfree values are used in output index (if given) must have allocated size n, but only nfree values are used in output only the lower triangles of H and R and are read from and written to, respectively the convenience function mju_boxQPmalloc allocates the required data structures", # pylint: disable=line-too-long
)),
('mju_boxQPmalloc',
FunctionDecl(
name='mju_boxQPmalloc',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
FunctionParameterDecl(
name='R',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
FunctionParameterDecl(
name='index',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='int'),
),
),
),
FunctionParameterDecl(
name='H',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
FunctionParameterDecl(
name='g',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='lower',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
FunctionParameterDecl(
name='upper',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
),
doc='allocate heap memory for box-constrained Quadratic Program as in mju_boxQP, index, lower, and upper are optional free all pointers with mju_free()', # pylint: disable=line-too-long
)),
('mju_muscleGain',
FunctionDecl(
name='mju_muscleGain',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='len',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='vel',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='lengthrange',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(2,),
),
),
FunctionParameterDecl(
name='acc0',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='prm',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc='Muscle active force, prm = (range[2], force, scale, lmin, lmax, vmax, fpmax, fvmax).', # pylint: disable=line-too-long
)),
('mju_muscleBias',
FunctionDecl(
name='mju_muscleBias',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='len',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='lengthrange',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(2,),
),
),
FunctionParameterDecl(
name='acc0',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='prm',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(9,),
),
),
),
doc='Muscle passive force, prm = (range[2], force, scale, lmin, lmax, vmax, fpmax, fvmax).', # pylint: disable=line-too-long
)),
('mju_muscleDynamics',
FunctionDecl(
name='mju_muscleDynamics',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='ctrl',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='act',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='prm',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
),
doc='Muscle activation dynamics, prm = (tau_act, tau_deact, smoothing_width).', # pylint: disable=line-too-long
)),
('mju_encodePyramid',
FunctionDecl(
name='mju_encodePyramid',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='pyramid',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='force',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='mu',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='dim',
type=ValueType(name='int'),
),
),
doc='Convert contact force to pyramid representation.',
)),
('mju_decodePyramid',
FunctionDecl(
name='mju_decodePyramid',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='force',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='pyramid',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='mu',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='dim',
type=ValueType(name='int'),
),
),
doc='Convert pyramid representation to contact force.',
)),
('mju_springDamper',
FunctionDecl(
name='mju_springDamper',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='pos0',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='vel0',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='Kp',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='Kv',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='dt',
type=ValueType(name='mjtNum'),
),
),
doc='Integrate spring-damper analytically, return pos(dt).',
)),
('mju_min',
FunctionDecl(
name='mju_min',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='a',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='b',
type=ValueType(name='mjtNum'),
),
),
doc='Return min(a,b) with single evaluation of a and b.',
)),
('mju_max',
FunctionDecl(
name='mju_max',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='a',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='b',
type=ValueType(name='mjtNum'),
),
),
doc='Return max(a,b) with single evaluation of a and b.',
)),
('mju_clip',
FunctionDecl(
name='mju_clip',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='x',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='min',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='max',
type=ValueType(name='mjtNum'),
),
),
doc='Clip x to the range [min, max].',
)),
('mju_sign',
FunctionDecl(
name='mju_sign',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='x',
type=ValueType(name='mjtNum'),
),
),
doc='Return sign of x: +1, -1 or 0.',
)),
('mju_round',
FunctionDecl(
name='mju_round',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='x',
type=ValueType(name='mjtNum'),
),
),
doc='Round x to nearest integer.',
)),
('mju_type2Str',
FunctionDecl(
name='mju_type2Str',
return_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='type',
type=ValueType(name='int'),
),
),
doc='Convert type id (mjtObj) to type name.',
)),
('mju_str2Type',
FunctionDecl(
name='mju_str2Type',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='str',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Convert type name to type id (mjtObj).',
)),
('mju_writeNumBytes',
FunctionDecl(
name='mju_writeNumBytes',
return_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='nbytes',
type=ValueType(name='size_t'),
),
),
doc='Return human readable number of bytes using standard letter suffix.', # pylint: disable=line-too-long
)),
('mju_warningText',
FunctionDecl(
name='mju_warningText',
return_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='warning',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='info',
type=ValueType(name='size_t'),
),
),
doc='Construct a warning message given the warning type and info.',
)),
('mju_isBad',
FunctionDecl(
name='mju_isBad',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='x',
type=ValueType(name='mjtNum'),
),
),
doc='Return 1 if nan or abs(x)>mjMAXVAL, 0 otherwise. Used by check functions.', # pylint: disable=line-too-long
)),
('mju_isZero',
FunctionDecl(
name='mju_isZero',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Return 1 if all elements are 0.',
)),
('mju_standardNormal',
FunctionDecl(
name='mju_standardNormal',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='num2',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='Standard normal random number generator (optional second number).', # pylint: disable=line-too-long
)),
('mju_f2n',
FunctionDecl(
name='mju_f2n',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='float', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Convert from float to mjtNum.',
)),
('mju_n2f',
FunctionDecl(
name='mju_n2f',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='float'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Convert from mjtNum to float.',
)),
('mju_d2n',
FunctionDecl(
name='mju_d2n',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='double', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Convert from double to mjtNum.',
)),
('mju_n2d',
FunctionDecl(
name='mju_n2d',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='res',
type=PointerType(
inner_type=ValueType(name='double'),
),
),
FunctionParameterDecl(
name='vec',
type=PointerType(
inner_type=ValueType(name='mjtNum', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Convert from mjtNum to double.',
)),
('mju_insertionSort',
FunctionDecl(
name='mju_insertionSort',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='list',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Insertion sort, resulting list is in increasing order.',
)),
('mju_insertionSortInt',
FunctionDecl(
name='mju_insertionSortInt',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='list',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Integer insertion sort, resulting list is in increasing order.',
)),
('mju_Halton',
FunctionDecl(
name='mju_Halton',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='index',
type=ValueType(name='int'),
),
FunctionParameterDecl(
name='base',
type=ValueType(name='int'),
),
),
doc='Generate Halton sequence.',
)),
('mju_strncpy',
FunctionDecl(
name='mju_strncpy',
return_type=PointerType(
inner_type=ValueType(name='char'),
),
parameters=(
FunctionParameterDecl(
name='dst',
type=PointerType(
inner_type=ValueType(name='char'),
),
),
FunctionParameterDecl(
name='src',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='n',
type=ValueType(name='int'),
),
),
doc='Call strncpy, then set dst[n-1] = 0.',
)),
('mju_sigmoid',
FunctionDecl(
name='mju_sigmoid',
return_type=ValueType(name='mjtNum'),
parameters=(
FunctionParameterDecl(
name='x',
type=ValueType(name='mjtNum'),
),
),
doc='Sigmoid function over 0<=x<=1 using quintic polynomial.',
)),
('mjd_transitionFD',
FunctionDecl(
name='mjd_transitionFD',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='eps',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='flg_centered',
type=ValueType(name='mjtByte'),
),
FunctionParameterDecl(
name='A',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='B',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='C',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='D',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='Finite differenced transition matrices (control theory notation) d(x_next) = A*dx + B*du d(sensor) = C*dx + D*du required output matrix dimensions: A: (2*nv+na x 2*nv+na) B: (2*nv+na x nu) D: (nsensordata x 2*nv+na) C: (nsensordata x nu)', # pylint: disable=line-too-long
)),
('mjd_inverseFD',
FunctionDecl(
name='mjd_inverseFD',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='m',
type=PointerType(
inner_type=ValueType(name='mjModel', is_const=True),
),
),
FunctionParameterDecl(
name='d',
type=PointerType(
inner_type=ValueType(name='mjData'),
),
),
FunctionParameterDecl(
name='eps',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='flg_actuation',
type=ValueType(name='mjtByte'),
),
FunctionParameterDecl(
name='DfDq',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='DfDv',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='DfDa',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='DsDq',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='DsDv',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='DsDa',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
FunctionParameterDecl(
name='DmDq',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
),
),
doc='Finite differenced Jacobians of (force, sensors) = mj_inverse(state, acceleration) All outputs are optional. Output dimensions (transposed w.r.t Control Theory convention): DfDq: (nv x nv) DfDv: (nv x nv) DfDa: (nv x nv) DsDq: (nv x nsensordata) DsDv: (nv x nsensordata) DsDa: (nv x nsensordata) DmDq: (nv x nM) single-letter shortcuts: inputs: q=qpos, v=qvel, a=qacc outputs: f=qfrc_inverse, s=sensordata, m=qM notes: optionally computes mass matrix Jacobian DmDq flg_actuation specifies whether to subtract qfrc_actuator from qfrc_inverse', # pylint: disable=line-too-long
)),
('mjd_subQuat',
FunctionDecl(
name='mjd_subQuat',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='qa',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='qb',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(4,),
),
),
FunctionParameterDecl(
name='Da',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
FunctionParameterDecl(
name='Db',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
),
doc='Derivatives of mju_subQuat.',
)),
('mjd_quatIntegrate',
FunctionDecl(
name='mjd_quatIntegrate',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='vel',
type=ArrayType(
inner_type=ValueType(name='mjtNum', is_const=True),
extents=(3,),
),
),
FunctionParameterDecl(
name='scale',
type=ValueType(name='mjtNum'),
),
FunctionParameterDecl(
name='Dquat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
FunctionParameterDecl(
name='Dvel',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
),
FunctionParameterDecl(
name='Dscale',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
),
),
doc='Derivatives of mju_quatIntegrate.',
)),
('mjp_defaultPlugin',
FunctionDecl(
name='mjp_defaultPlugin',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='plugin',
type=PointerType(
inner_type=ValueType(name='mjpPlugin'),
),
),
),
doc='Set default plugin definition.',
)),
('mjp_registerPlugin',
FunctionDecl(
name='mjp_registerPlugin',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='plugin',
type=PointerType(
inner_type=ValueType(name='mjpPlugin', is_const=True),
),
),
),
doc='Globally register a plugin. This function is thread-safe. If an identical mjpPlugin is already registered, this function does nothing. If a non-identical mjpPlugin with the same name is already registered, an mju_error is raised. Two mjpPlugins are considered identical if all member function pointers and numbers are equal, and the name and attribute strings are all identical, however the char pointers to the strings need not be the same.', # pylint: disable=line-too-long
)),
('mjp_pluginCount',
FunctionDecl(
name='mjp_pluginCount',
return_type=ValueType(name='int'),
parameters=(),
doc='Return the number of globally registered plugins.',
)),
('mjp_getPlugin',
FunctionDecl(
name='mjp_getPlugin',
return_type=PointerType(
inner_type=ValueType(name='mjpPlugin', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='name',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
FunctionParameterDecl(
name='slot',
type=PointerType(
inner_type=ValueType(name='int'),
),
),
),
doc='Look up a plugin by name. If slot is not NULL, also write its registered slot number into it.', # pylint: disable=line-too-long
)),
('mjp_getPluginAtSlot',
FunctionDecl(
name='mjp_getPluginAtSlot',
return_type=PointerType(
inner_type=ValueType(name='mjpPlugin', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='slot',
type=ValueType(name='int'),
),
),
doc='Look up a plugin by the registered slot number that was returned by mjp_registerPlugin.', # pylint: disable=line-too-long
)),
('mjp_defaultResourceProvider',
FunctionDecl(
name='mjp_defaultResourceProvider',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='provider',
type=PointerType(
inner_type=ValueType(name='mjpResourceProvider'),
),
),
),
doc='Set default resource provider definition.',
)),
('mjp_registerResourceProvider',
FunctionDecl(
name='mjp_registerResourceProvider',
return_type=ValueType(name='int'),
parameters=(
FunctionParameterDecl(
name='provider',
type=PointerType(
inner_type=ValueType(name='mjpResourceProvider', is_const=True), # pylint: disable=line-too-long
),
),
),
doc='Globally register a resource provider in a thread-safe manner. The provider must have a prefix that is not a sub-prefix or super-prefix of any current registered providers. This function returns a slot number > 0 on success.', # pylint: disable=line-too-long
)),
('mjp_resourceProviderCount',
FunctionDecl(
name='mjp_resourceProviderCount',
return_type=ValueType(name='int'),
parameters=(),
doc='Return the number of globally registered resource providers.',
)),
('mjp_getResourceProvider',
FunctionDecl(
name='mjp_getResourceProvider',
return_type=PointerType(
inner_type=ValueType(name='mjpResourceProvider', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='resource_name',
type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
),
doc='Return the resource provider with the prefix that matches against the resource name. If no match, return NULL.', # pylint: disable=line-too-long
)),
('mjp_getResourceProviderAtSlot',
FunctionDecl(
name='mjp_getResourceProviderAtSlot',
return_type=PointerType(
inner_type=ValueType(name='mjpResourceProvider', is_const=True),
),
parameters=(
FunctionParameterDecl(
name='slot',
type=ValueType(name='int'),
),
),
doc='Look up a resource provider by slot number returned by mjp_registerResourceProvider. If invalid slot number, return NULL.', # pylint: disable=line-too-long
)),
('mju_threadPoolCreate',
FunctionDecl(
name='mju_threadPoolCreate',
return_type=PointerType(
inner_type=ValueType(name='mjThreadPool'),
),
parameters=(
FunctionParameterDecl(
name='number_of_threads',
type=ValueType(name='size_t'),
),
),
doc='Create a thread pool with the specified number of threads running.', # pylint: disable=line-too-long
)),
('mju_threadPoolEnqueue',
FunctionDecl(
name='mju_threadPoolEnqueue',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='thread_pool',
type=PointerType(
inner_type=ValueType(name='mjThreadPool'),
),
),
FunctionParameterDecl(
name='task',
type=PointerType(
inner_type=ValueType(name='mjTask'),
),
),
),
doc='Enqueue a task in a thread pool.',
)),
('mju_threadPoolDestroy',
FunctionDecl(
name='mju_threadPoolDestroy',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='thread_pool',
type=PointerType(
inner_type=ValueType(name='mjThreadPool'),
),
),
),
doc='Destroy a thread pool.',
)),
('mju_defaultTask',
FunctionDecl(
name='mju_defaultTask',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='task',
type=PointerType(
inner_type=ValueType(name='mjTask'),
),
),
),
doc='Initialize an mjTask.',
)),
('mju_taskJoin',
FunctionDecl(
name='mju_taskJoin',
return_type=ValueType(name='void'),
parameters=(
FunctionParameterDecl(
name='task',
type=PointerType(
inner_type=ValueType(name='mjTask'),
),
),
),
doc='Wait for a task to complete.',
)),
])
| mujoco-main | introspect/functions.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides information about MuJoCo API enums.
DO NOT EDIT. THIS FILE IS AUTOMATICALLY GENERATED.
"""
from typing import Mapping
from .ast_nodes import EnumDecl
ENUMS: Mapping[str, EnumDecl] = dict([
('mjtDisableBit',
EnumDecl(
name='mjtDisableBit',
declname='enum mjtDisableBit_',
values=dict([
('mjDSBL_CONSTRAINT', 1),
('mjDSBL_EQUALITY', 2),
('mjDSBL_FRICTIONLOSS', 4),
('mjDSBL_LIMIT', 8),
('mjDSBL_CONTACT', 16),
('mjDSBL_PASSIVE', 32),
('mjDSBL_GRAVITY', 64),
('mjDSBL_CLAMPCTRL', 128),
('mjDSBL_WARMSTART', 256),
('mjDSBL_FILTERPARENT', 512),
('mjDSBL_ACTUATION', 1024),
('mjDSBL_REFSAFE', 2048),
('mjDSBL_SENSOR', 4096),
('mjDSBL_MIDPHASE', 8192),
('mjDSBL_EULERDAMP', 16384),
('mjNDISABLE', 15),
]),
)),
('mjtEnableBit',
EnumDecl(
name='mjtEnableBit',
declname='enum mjtEnableBit_',
values=dict([
('mjENBL_OVERRIDE', 1),
('mjENBL_ENERGY', 2),
('mjENBL_FWDINV', 4),
('mjENBL_INVDISCRETE', 8),
('mjENBL_SENSORNOISE', 16),
('mjENBL_MULTICCD', 32),
('mjENBL_ISLAND', 64),
('mjNENABLE', 7),
]),
)),
('mjtJoint',
EnumDecl(
name='mjtJoint',
declname='enum mjtJoint_',
values=dict([
('mjJNT_FREE', 0),
('mjJNT_BALL', 1),
('mjJNT_SLIDE', 2),
('mjJNT_HINGE', 3),
]),
)),
('mjtGeom',
EnumDecl(
name='mjtGeom',
declname='enum mjtGeom_',
values=dict([
('mjGEOM_PLANE', 0),
('mjGEOM_HFIELD', 1),
('mjGEOM_SPHERE', 2),
('mjGEOM_CAPSULE', 3),
('mjGEOM_ELLIPSOID', 4),
('mjGEOM_CYLINDER', 5),
('mjGEOM_BOX', 6),
('mjGEOM_MESH', 7),
('mjGEOM_SDF', 8),
('mjNGEOMTYPES', 9),
('mjGEOM_ARROW', 100),
('mjGEOM_ARROW1', 101),
('mjGEOM_ARROW2', 102),
('mjGEOM_LINE', 103),
('mjGEOM_SKIN', 104),
('mjGEOM_LABEL', 105),
('mjGEOM_NONE', 1001),
]),
)),
('mjtCamLight',
EnumDecl(
name='mjtCamLight',
declname='enum mjtCamLight_',
values=dict([
('mjCAMLIGHT_FIXED', 0),
('mjCAMLIGHT_TRACK', 1),
('mjCAMLIGHT_TRACKCOM', 2),
('mjCAMLIGHT_TARGETBODY', 3),
('mjCAMLIGHT_TARGETBODYCOM', 4),
]),
)),
('mjtTexture',
EnumDecl(
name='mjtTexture',
declname='enum mjtTexture_',
values=dict([
('mjTEXTURE_2D', 0),
('mjTEXTURE_CUBE', 1),
('mjTEXTURE_SKYBOX', 2),
]),
)),
('mjtIntegrator',
EnumDecl(
name='mjtIntegrator',
declname='enum mjtIntegrator_',
values=dict([
('mjINT_EULER', 0),
('mjINT_RK4', 1),
('mjINT_IMPLICIT', 2),
('mjINT_IMPLICITFAST', 3),
]),
)),
('mjtCollision',
EnumDecl(
name='mjtCollision',
declname='enum mjtCollision_',
values=dict([
('mjCOL_ALL', 0),
('mjCOL_PAIR', 1),
('mjCOL_DYNAMIC', 2),
]),
)),
('mjtCone',
EnumDecl(
name='mjtCone',
declname='enum mjtCone_',
values=dict([
('mjCONE_PYRAMIDAL', 0),
('mjCONE_ELLIPTIC', 1),
]),
)),
('mjtJacobian',
EnumDecl(
name='mjtJacobian',
declname='enum mjtJacobian_',
values=dict([
('mjJAC_DENSE', 0),
('mjJAC_SPARSE', 1),
('mjJAC_AUTO', 2),
]),
)),
('mjtSolver',
EnumDecl(
name='mjtSolver',
declname='enum mjtSolver_',
values=dict([
('mjSOL_PGS', 0),
('mjSOL_CG', 1),
('mjSOL_NEWTON', 2),
]),
)),
('mjtEq',
EnumDecl(
name='mjtEq',
declname='enum mjtEq_',
values=dict([
('mjEQ_CONNECT', 0),
('mjEQ_WELD', 1),
('mjEQ_JOINT', 2),
('mjEQ_TENDON', 3),
('mjEQ_DISTANCE', 4),
]),
)),
('mjtWrap',
EnumDecl(
name='mjtWrap',
declname='enum mjtWrap_',
values=dict([
('mjWRAP_NONE', 0),
('mjWRAP_JOINT', 1),
('mjWRAP_PULLEY', 2),
('mjWRAP_SITE', 3),
('mjWRAP_SPHERE', 4),
('mjWRAP_CYLINDER', 5),
]),
)),
('mjtTrn',
EnumDecl(
name='mjtTrn',
declname='enum mjtTrn_',
values=dict([
('mjTRN_JOINT', 0),
('mjTRN_JOINTINPARENT', 1),
('mjTRN_SLIDERCRANK', 2),
('mjTRN_TENDON', 3),
('mjTRN_SITE', 4),
('mjTRN_BODY', 5),
('mjTRN_UNDEFINED', 1000),
]),
)),
('mjtDyn',
EnumDecl(
name='mjtDyn',
declname='enum mjtDyn_',
values=dict([
('mjDYN_NONE', 0),
('mjDYN_INTEGRATOR', 1),
('mjDYN_FILTER', 2),
('mjDYN_FILTEREXACT', 3),
('mjDYN_MUSCLE', 4),
('mjDYN_USER', 5),
]),
)),
('mjtGain',
EnumDecl(
name='mjtGain',
declname='enum mjtGain_',
values=dict([
('mjGAIN_FIXED', 0),
('mjGAIN_AFFINE', 1),
('mjGAIN_MUSCLE', 2),
('mjGAIN_USER', 3),
]),
)),
('mjtBias',
EnumDecl(
name='mjtBias',
declname='enum mjtBias_',
values=dict([
('mjBIAS_NONE', 0),
('mjBIAS_AFFINE', 1),
('mjBIAS_MUSCLE', 2),
('mjBIAS_USER', 3),
]),
)),
('mjtObj',
EnumDecl(
name='mjtObj',
declname='enum mjtObj_',
values=dict([
('mjOBJ_UNKNOWN', 0),
('mjOBJ_BODY', 1),
('mjOBJ_XBODY', 2),
('mjOBJ_JOINT', 3),
('mjOBJ_DOF', 4),
('mjOBJ_GEOM', 5),
('mjOBJ_SITE', 6),
('mjOBJ_CAMERA', 7),
('mjOBJ_LIGHT', 8),
('mjOBJ_MESH', 9),
('mjOBJ_SKIN', 10),
('mjOBJ_HFIELD', 11),
('mjOBJ_TEXTURE', 12),
('mjOBJ_MATERIAL', 13),
('mjOBJ_PAIR', 14),
('mjOBJ_EXCLUDE', 15),
('mjOBJ_EQUALITY', 16),
('mjOBJ_TENDON', 17),
('mjOBJ_ACTUATOR', 18),
('mjOBJ_SENSOR', 19),
('mjOBJ_NUMERIC', 20),
('mjOBJ_TEXT', 21),
('mjOBJ_TUPLE', 22),
('mjOBJ_KEY', 23),
('mjOBJ_PLUGIN', 24),
]),
)),
('mjtConstraint',
EnumDecl(
name='mjtConstraint',
declname='enum mjtConstraint_',
values=dict([
('mjCNSTR_EQUALITY', 0),
('mjCNSTR_FRICTION_DOF', 1),
('mjCNSTR_FRICTION_TENDON', 2),
('mjCNSTR_LIMIT_JOINT', 3),
('mjCNSTR_LIMIT_TENDON', 4),
('mjCNSTR_CONTACT_FRICTIONLESS', 5),
('mjCNSTR_CONTACT_PYRAMIDAL', 6),
('mjCNSTR_CONTACT_ELLIPTIC', 7),
]),
)),
('mjtConstraintState',
EnumDecl(
name='mjtConstraintState',
declname='enum mjtConstraintState_',
values=dict([
('mjCNSTRSTATE_SATISFIED', 0),
('mjCNSTRSTATE_QUADRATIC', 1),
('mjCNSTRSTATE_LINEARNEG', 2),
('mjCNSTRSTATE_LINEARPOS', 3),
('mjCNSTRSTATE_CONE', 4),
]),
)),
('mjtSensor',
EnumDecl(
name='mjtSensor',
declname='enum mjtSensor_',
values=dict([
('mjSENS_TOUCH', 0),
('mjSENS_ACCELEROMETER', 1),
('mjSENS_VELOCIMETER', 2),
('mjSENS_GYRO', 3),
('mjSENS_FORCE', 4),
('mjSENS_TORQUE', 5),
('mjSENS_MAGNETOMETER', 6),
('mjSENS_RANGEFINDER', 7),
('mjSENS_CAMPROJECTION', 8),
('mjSENS_JOINTPOS', 9),
('mjSENS_JOINTVEL', 10),
('mjSENS_TENDONPOS', 11),
('mjSENS_TENDONVEL', 12),
('mjSENS_ACTUATORPOS', 13),
('mjSENS_ACTUATORVEL', 14),
('mjSENS_ACTUATORFRC', 15),
('mjSENS_JOINTACTFRC', 16),
('mjSENS_BALLQUAT', 17),
('mjSENS_BALLANGVEL', 18),
('mjSENS_JOINTLIMITPOS', 19),
('mjSENS_JOINTLIMITVEL', 20),
('mjSENS_JOINTLIMITFRC', 21),
('mjSENS_TENDONLIMITPOS', 22),
('mjSENS_TENDONLIMITVEL', 23),
('mjSENS_TENDONLIMITFRC', 24),
('mjSENS_FRAMEPOS', 25),
('mjSENS_FRAMEQUAT', 26),
('mjSENS_FRAMEXAXIS', 27),
('mjSENS_FRAMEYAXIS', 28),
('mjSENS_FRAMEZAXIS', 29),
('mjSENS_FRAMELINVEL', 30),
('mjSENS_FRAMEANGVEL', 31),
('mjSENS_FRAMELINACC', 32),
('mjSENS_FRAMEANGACC', 33),
('mjSENS_SUBTREECOM', 34),
('mjSENS_SUBTREELINVEL', 35),
('mjSENS_SUBTREEANGMOM', 36),
('mjSENS_CLOCK', 37),
('mjSENS_PLUGIN', 38),
('mjSENS_USER', 39),
]),
)),
('mjtStage',
EnumDecl(
name='mjtStage',
declname='enum mjtStage_',
values=dict([
('mjSTAGE_NONE', 0),
('mjSTAGE_POS', 1),
('mjSTAGE_VEL', 2),
('mjSTAGE_ACC', 3),
]),
)),
('mjtDataType',
EnumDecl(
name='mjtDataType',
declname='enum mjtDataType_',
values=dict([
('mjDATATYPE_REAL', 0),
('mjDATATYPE_POSITIVE', 1),
('mjDATATYPE_AXIS', 2),
('mjDATATYPE_QUATERNION', 3),
]),
)),
('mjtLRMode',
EnumDecl(
name='mjtLRMode',
declname='enum mjtLRMode_',
values=dict([
('mjLRMODE_NONE', 0),
('mjLRMODE_MUSCLE', 1),
('mjLRMODE_MUSCLEUSER', 2),
('mjLRMODE_ALL', 3),
]),
)),
('mjtState',
EnumDecl(
name='mjtState',
declname='enum mjtState_',
values=dict([
('mjSTATE_TIME', 1),
('mjSTATE_QPOS', 2),
('mjSTATE_QVEL', 4),
('mjSTATE_ACT', 8),
('mjSTATE_WARMSTART', 16),
('mjSTATE_CTRL', 32),
('mjSTATE_QFRC_APPLIED', 64),
('mjSTATE_XFRC_APPLIED', 128),
('mjSTATE_MOCAP_POS', 256),
('mjSTATE_MOCAP_QUAT', 512),
('mjSTATE_USERDATA', 1024),
('mjSTATE_PLUGIN', 2048),
('mjNSTATE', 12),
('mjSTATE_PHYSICS', 14),
('mjSTATE_FULLPHYSICS', 2063),
('mjSTATE_USER', 2016),
('mjSTATE_INTEGRATION', 4095),
]),
)),
('mjtWarning',
EnumDecl(
name='mjtWarning',
declname='enum mjtWarning_',
values=dict([
('mjWARN_INERTIA', 0),
('mjWARN_CONTACTFULL', 1),
('mjWARN_CNSTRFULL', 2),
('mjWARN_VGEOMFULL', 3),
('mjWARN_BADQPOS', 4),
('mjWARN_BADQVEL', 5),
('mjWARN_BADQACC', 6),
('mjWARN_BADCTRL', 7),
('mjNWARNING', 8),
]),
)),
('mjtTimer',
EnumDecl(
name='mjtTimer',
declname='enum mjtTimer_',
values=dict([
('mjTIMER_STEP', 0),
('mjTIMER_FORWARD', 1),
('mjTIMER_INVERSE', 2),
('mjTIMER_POSITION', 3),
('mjTIMER_VELOCITY', 4),
('mjTIMER_ACTUATION', 5),
('mjTIMER_ACCELERATION', 6),
('mjTIMER_CONSTRAINT', 7),
('mjTIMER_POS_KINEMATICS', 8),
('mjTIMER_POS_INERTIA', 9),
('mjTIMER_POS_COLLISION', 10),
('mjTIMER_POS_MAKE', 11),
('mjTIMER_POS_PROJECT', 12),
('mjNTIMER', 13),
]),
)),
('mjtCatBit',
EnumDecl(
name='mjtCatBit',
declname='enum mjtCatBit_',
values=dict([
('mjCAT_STATIC', 1),
('mjCAT_DYNAMIC', 2),
('mjCAT_DECOR', 4),
('mjCAT_ALL', 7),
]),
)),
('mjtMouse',
EnumDecl(
name='mjtMouse',
declname='enum mjtMouse_',
values=dict([
('mjMOUSE_NONE', 0),
('mjMOUSE_ROTATE_V', 1),
('mjMOUSE_ROTATE_H', 2),
('mjMOUSE_MOVE_V', 3),
('mjMOUSE_MOVE_H', 4),
('mjMOUSE_ZOOM', 5),
('mjMOUSE_SELECT', 6),
]),
)),
('mjtPertBit',
EnumDecl(
name='mjtPertBit',
declname='enum mjtPertBit_',
values=dict([
('mjPERT_TRANSLATE', 1),
('mjPERT_ROTATE', 2),
]),
)),
('mjtCamera',
EnumDecl(
name='mjtCamera',
declname='enum mjtCamera_',
values=dict([
('mjCAMERA_FREE', 0),
('mjCAMERA_TRACKING', 1),
('mjCAMERA_FIXED', 2),
('mjCAMERA_USER', 3),
]),
)),
('mjtLabel',
EnumDecl(
name='mjtLabel',
declname='enum mjtLabel_',
values=dict([
('mjLABEL_NONE', 0),
('mjLABEL_BODY', 1),
('mjLABEL_JOINT', 2),
('mjLABEL_GEOM', 3),
('mjLABEL_SITE', 4),
('mjLABEL_CAMERA', 5),
('mjLABEL_LIGHT', 6),
('mjLABEL_TENDON', 7),
('mjLABEL_ACTUATOR', 8),
('mjLABEL_CONSTRAINT', 9),
('mjLABEL_SKIN', 10),
('mjLABEL_SELECTION', 11),
('mjLABEL_SELPNT', 12),
('mjLABEL_CONTACTPOINT', 13),
('mjLABEL_CONTACTFORCE', 14),
('mjLABEL_ISLAND', 15),
('mjNLABEL', 16),
]),
)),
('mjtFrame',
EnumDecl(
name='mjtFrame',
declname='enum mjtFrame_',
values=dict([
('mjFRAME_NONE', 0),
('mjFRAME_BODY', 1),
('mjFRAME_GEOM', 2),
('mjFRAME_SITE', 3),
('mjFRAME_CAMERA', 4),
('mjFRAME_LIGHT', 5),
('mjFRAME_CONTACT', 6),
('mjFRAME_WORLD', 7),
('mjNFRAME', 8),
]),
)),
('mjtVisFlag',
EnumDecl(
name='mjtVisFlag',
declname='enum mjtVisFlag_',
values=dict([
('mjVIS_CONVEXHULL', 0),
('mjVIS_TEXTURE', 1),
('mjVIS_JOINT', 2),
('mjVIS_CAMERA', 3),
('mjVIS_ACTUATOR', 4),
('mjVIS_ACTIVATION', 5),
('mjVIS_LIGHT', 6),
('mjVIS_TENDON', 7),
('mjVIS_RANGEFINDER', 8),
('mjVIS_CONSTRAINT', 9),
('mjVIS_INERTIA', 10),
('mjVIS_SCLINERTIA', 11),
('mjVIS_PERTFORCE', 12),
('mjVIS_PERTOBJ', 13),
('mjVIS_CONTACTPOINT', 14),
('mjVIS_ISLAND', 15),
('mjVIS_CONTACTFORCE', 16),
('mjVIS_CONTACTSPLIT', 17),
('mjVIS_TRANSPARENT', 18),
('mjVIS_AUTOCONNECT', 19),
('mjVIS_COM', 20),
('mjVIS_SELECT', 21),
('mjVIS_STATIC', 22),
('mjVIS_SKIN', 23),
('mjVIS_MIDPHASE', 24),
('mjVIS_MESHBVH', 25),
('mjVIS_SDFITER', 26),
('mjNVISFLAG', 27),
]),
)),
('mjtRndFlag',
EnumDecl(
name='mjtRndFlag',
declname='enum mjtRndFlag_',
values=dict([
('mjRND_SHADOW', 0),
('mjRND_WIREFRAME', 1),
('mjRND_REFLECTION', 2),
('mjRND_ADDITIVE', 3),
('mjRND_SKYBOX', 4),
('mjRND_FOG', 5),
('mjRND_HAZE', 6),
('mjRND_SEGMENT', 7),
('mjRND_IDCOLOR', 8),
('mjRND_CULL_FACE', 9),
('mjNRNDFLAG', 10),
]),
)),
('mjtStereo',
EnumDecl(
name='mjtStereo',
declname='enum mjtStereo_',
values=dict([
('mjSTEREO_NONE', 0),
('mjSTEREO_QUADBUFFERED', 1),
('mjSTEREO_SIDEBYSIDE', 2),
]),
)),
('mjtPluginCapabilityBit',
EnumDecl(
name='mjtPluginCapabilityBit',
declname='enum mjtPluginCapabilityBit_',
values=dict([
('mjPLUGIN_ACTUATOR', 1),
('mjPLUGIN_SENSOR', 2),
('mjPLUGIN_PASSIVE', 4),
('mjPLUGIN_SDF', 8),
]),
)),
('mjtGridPos',
EnumDecl(
name='mjtGridPos',
declname='enum mjtGridPos_',
values=dict([
('mjGRID_TOPLEFT', 0),
('mjGRID_TOPRIGHT', 1),
('mjGRID_BOTTOMLEFT', 2),
('mjGRID_BOTTOMRIGHT', 3),
]),
)),
('mjtFramebuffer',
EnumDecl(
name='mjtFramebuffer',
declname='enum mjtFramebuffer_',
values=dict([
('mjFB_WINDOW', 0),
('mjFB_OFFSCREEN', 1),
]),
)),
('mjtFontScale',
EnumDecl(
name='mjtFontScale',
declname='enum mjtFontScale_',
values=dict([
('mjFONTSCALE_50', 50),
('mjFONTSCALE_100', 100),
('mjFONTSCALE_150', 150),
('mjFONTSCALE_200', 200),
('mjFONTSCALE_250', 250),
('mjFONTSCALE_300', 300),
]),
)),
('mjtFont',
EnumDecl(
name='mjtFont',
declname='enum mjtFont_',
values=dict([
('mjFONT_NORMAL', 0),
('mjFONT_SHADOW', 1),
('mjFONT_BIG', 2),
]),
)),
('mjtTaskStatus',
EnumDecl(
name='mjtTaskStatus',
declname='enum mjtTaskStatus_',
values=dict([
('mjTASK_NEW', 0),
('mjTASK_QUEUED', 1),
('mjTASK_COMPLETED', 2),
]),
)),
('mjtButton',
EnumDecl(
name='mjtButton',
declname='enum mjtButton_',
values=dict([
('mjBUTTON_NONE', 0),
('mjBUTTON_LEFT', 1),
('mjBUTTON_RIGHT', 2),
('mjBUTTON_MIDDLE', 3),
]),
)),
('mjtEvent',
EnumDecl(
name='mjtEvent',
declname='enum mjtEvent_',
values=dict([
('mjEVENT_NONE', 0),
('mjEVENT_MOVE', 1),
('mjEVENT_PRESS', 2),
('mjEVENT_RELEASE', 3),
('mjEVENT_SCROLL', 4),
('mjEVENT_KEY', 5),
('mjEVENT_RESIZE', 6),
('mjEVENT_REDRAW', 7),
('mjEVENT_FILESDROP', 8),
]),
)),
('mjtItem',
EnumDecl(
name='mjtItem',
declname='enum mjtItem_',
values=dict([
('mjITEM_END', -2),
('mjITEM_SECTION', -1),
('mjITEM_SEPARATOR', 0),
('mjITEM_STATIC', 1),
('mjITEM_BUTTON', 2),
('mjITEM_CHECKINT', 3),
('mjITEM_CHECKBYTE', 4),
('mjITEM_RADIO', 5),
('mjITEM_RADIOLINE', 6),
('mjITEM_SELECT', 7),
('mjITEM_SLIDERINT', 8),
('mjITEM_SLIDERNUM', 9),
('mjITEM_EDITINT', 10),
('mjITEM_EDITNUM', 11),
('mjITEM_EDITFLOAT', 12),
('mjITEM_EDITTXT', 13),
('mjNITEM', 14),
]),
)),
])
| mujoco-main | introspect/enums.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for enums.py."""
from absl.testing import absltest
from . import enums
class EnumsTest(absltest.TestCase):
# "simple" enum that just starts at zero and increment by one.
def test_mjtJoint(self): # pylint: disable=invalid-name
enum_decl = enums.ENUMS['mjtJoint']
self.assertEqual(enum_decl.name, 'mjtJoint')
self.assertEqual(enum_decl.declname, 'enum mjtJoint_')
self.assertEqual(
tuple(enum_decl.values.items()), (('mjJNT_FREE', 0),
('mjJNT_BALL', 1),
('mjJNT_SLIDE', 2),
('mjJNT_HINGE', 3)))
# all values explicitly specified
def test_mjtEnableBit(self): # pylint: disable=invalid-name
enum_decl = enums.ENUMS['mjtEnableBit']
self.assertEqual(enum_decl.name, 'mjtEnableBit')
self.assertEqual(enum_decl.declname, 'enum mjtEnableBit_')
self.assertEqual(
tuple(enum_decl.values.items()), (('mjENBL_OVERRIDE', 1<<0),
('mjENBL_ENERGY', 1<<1),
('mjENBL_FWDINV', 1<<2),
('mjENBL_INVDISCRETE', 1<<3),
('mjENBL_SENSORNOISE', 1<<4),
('mjENBL_MULTICCD', 1<<5),
('mjENBL_ISLAND', 1<<6),
('mjNENABLE', 7)))
# values mostly increment by one with occasional overrides
def test_mjtGeom(self): # pylint: disable=invalid-name
enum_decl = enums.ENUMS['mjtGeom']
self.assertEqual(enum_decl.name, 'mjtGeom')
self.assertEqual(enum_decl.declname, 'enum mjtGeom_')
self.assertEqual(enum_decl.values['mjGEOM_PLANE'], 0)
self.assertEqual(enum_decl.values['mjGEOM_HFIELD'], 1)
self.assertEqual(enum_decl.values['mjGEOM_SPHERE'], 2)
# Skip a few...
self.assertEqual(enum_decl.values['mjGEOM_ARROW'], 100)
self.assertEqual(enum_decl.values['mjGEOM_ARROW1'], 101)
self.assertEqual(enum_decl.values['mjGEOM_ARROW2'], 102)
# Skip a few...
self.assertEqual(enum_decl.values['mjGEOM_NONE'], 1001)
if __name__ == '__main__':
absltest.main()
| mujoco-main | introspect/enums_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_parsing.py."""
from absl.testing import absltest
from . import ast_nodes
from . import type_parsing
class TypeParsingTest(absltest.TestCase):
def test_parse_complex_type(self):
parsed_type = type_parsing.parse_type(
'int unsigned volatile long const long'+
'(**const(*const restrict*[9])[7])[3][4]')
expected_type = ast_nodes.ArrayType(
extents=[9],
inner_type=ast_nodes.PointerType(
ast_nodes.PointerType(
is_const=True,
is_restrict=True,
inner_type=ast_nodes.ArrayType(
extents=[7],
inner_type=ast_nodes.PointerType(
is_const=True,
inner_type=ast_nodes.PointerType(
ast_nodes.ArrayType(
extents=(3, 4),
inner_type=ast_nodes.ValueType(
'int unsigned long long',
is_const=True, is_volatile=True)
)
)
)
)
)
)
)
self.assertEqual(parsed_type, expected_type)
if __name__ == '__main__':
absltest.main()
| mujoco-main | introspect/type_parsing_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_nodes.py."""
from absl.testing import absltest
from . import ast_nodes
class AstNodesTest(absltest.TestCase):
def test_value_type(self):
value_type = ast_nodes.ValueType('int')
self.assertEqual(str(value_type), 'int')
self.assertEqual(value_type.decl('var'), 'int var')
const_value_type = ast_nodes.ValueType('double', is_const=True)
self.assertEqual(str(const_value_type), 'const double')
self.assertEqual(const_value_type.decl('var2'), 'const double var2')
def test_pointer_type(self):
pointer_type = ast_nodes.PointerType(ast_nodes.ValueType('int'))
self.assertEqual(str(pointer_type), 'int *')
self.assertEqual(pointer_type.decl('var'), 'int * var')
const_pointer_type = ast_nodes.PointerType(
ast_nodes.ValueType('double'), is_const=True)
self.assertEqual(str(const_pointer_type), 'double * const')
self.assertEqual(const_pointer_type.decl('var2'), 'double * const var2')
pointer_to_const_type = ast_nodes.PointerType(
ast_nodes.ValueType('float', is_const=True))
self.assertEqual(str(pointer_to_const_type), 'const float *')
self.assertEqual(pointer_to_const_type.decl('var3'), 'const float * var3')
restrict_volatile_pointer_to_const_type = ast_nodes.PointerType(
ast_nodes.ValueType('char', is_const=True),
is_volatile=True, is_restrict=True)
self.assertEqual(str(restrict_volatile_pointer_to_const_type),
'const char * volatile restrict')
self.assertEqual(
restrict_volatile_pointer_to_const_type.decl('var4'),
'const char * volatile restrict var4')
pointer_to_array_type = ast_nodes.PointerType(
ast_nodes.ArrayType(ast_nodes.ValueType('long'), (3,)))
self.assertEqual(str(pointer_to_array_type), 'long (*)[3]')
self.assertEqual(pointer_to_array_type.decl('var5'), 'long (* var5)[3]')
const_pointer_to_array_type = ast_nodes.PointerType(
ast_nodes.ArrayType(ast_nodes.ValueType('unsigned int'), (4,)),
is_const=True)
self.assertEqual(
str(const_pointer_to_array_type), 'unsigned int (* const)[4]')
self.assertEqual(
const_pointer_to_array_type.decl('var6'),
'unsigned int (* const var6)[4]')
def test_array_type(self):
array_type = ast_nodes.ArrayType(ast_nodes.ValueType('int'), (4,))
self.assertEqual(str(array_type), 'int [4]')
self.assertEqual(array_type.decl('var'), 'int var[4]')
array_2d_type = ast_nodes.ArrayType(
ast_nodes.ValueType('double', is_const=True), (2, 3))
self.assertEqual(str(array_2d_type), 'const double [2][3]')
self.assertEqual(array_2d_type.decl('var2'), 'const double var2[2][3]')
array_to_pointer_type = ast_nodes.ArrayType(
ast_nodes.PointerType(ast_nodes.ValueType('char', is_const=True)), (5,))
self.assertEqual(str(array_to_pointer_type), 'const char * [5]')
self.assertEqual(array_to_pointer_type.decl('var3'), 'const char * var3[5]')
array_to_const_pointer_type = ast_nodes.ArrayType(
ast_nodes.PointerType(ast_nodes.ValueType('float'), is_const=True),
(7,))
self.assertEqual(str(array_to_const_pointer_type), 'float * const [7]')
self.assertEqual(
array_to_const_pointer_type.decl('var4'), 'float * const var4[7]')
def test_complex_type(self):
complex_type = ast_nodes.ArrayType(
extents=[9],
inner_type=ast_nodes.PointerType(
ast_nodes.PointerType(
is_const=True,
inner_type=ast_nodes.ArrayType(
extents=[7],
inner_type=ast_nodes.PointerType(
is_const=True,
inner_type=ast_nodes.PointerType(
ast_nodes.ArrayType(
extents=(3, 4),
inner_type=ast_nodes.ValueType(
'unsigned int', is_const=True)
)
)
)
)
)
)
)
self.assertEqual(str(complex_type),
'const unsigned int (* * const (* const * [9])[7])[3][4]')
self.assertEqual(
complex_type.decl('var'),
'const unsigned int (* * const (* const * var[9])[7])[3][4]')
def test_struct_decl(self):
struct = ast_nodes.StructDecl(
name='mystruct',
declname='struct mystruct_',
fields=[
ast_nodes.StructFieldDecl(
name='foo',
type=ast_nodes.ValueType('int'),
doc='',
)
],
)
self.assertEqual(struct.decl('var'), 'mystruct var')
def test_anonymous_struct_decl(self):
struct = ast_nodes.AnonymousStructDecl(
fields=[
ast_nodes.StructFieldDecl(
name='foo',
type=ast_nodes.ValueType('int'),
doc='',
),
ast_nodes.StructFieldDecl(
name='bar',
type=ast_nodes.ArrayType(
inner_type=ast_nodes.ValueType('float'), extents=(3,)
),
doc='',
),
],
)
self.assertEqual(str(struct), 'struct {int foo; float bar[3];}')
self.assertEqual(struct.decl('var'), 'struct {int foo; float bar[3];} var')
self.assertEqual(struct.fields[0].decltype, 'int')
self.assertEqual(struct.fields[1].decltype, 'float [3]')
def test_anonymous_union_decl(self):
union = ast_nodes.AnonymousUnionDecl(
fields=[
ast_nodes.StructFieldDecl(
name='foo',
type=ast_nodes.ValueType('int'),
doc='',
),
ast_nodes.StructFieldDecl(
name='bar',
type=ast_nodes.ArrayType(
inner_type=ast_nodes.ValueType('float'), extents=(3,)
),
doc='',
),
],
)
self.assertEqual(str(union), 'union {int foo; float bar[3];}')
self.assertEqual(union.decl('var'), 'union {int foo; float bar[3];} var')
if __name__ == '__main__':
absltest.main()
| mujoco-main | introspect/ast_nodes_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for structs.py."""
import re
from absl.testing import absltest
from . import ast_nodes
from . import structs
from . import type_parsing
class StructsTest(absltest.TestCase):
def test_mjData(self): # pylint: disable=invalid-name
struct_decl = structs.STRUCTS['mjData']
self.assertEqual(struct_decl.name, 'mjData')
self.assertEqual(struct_decl.declname, 'struct mjData_')
field_names = set()
for field in struct_decl.fields:
self.assertNotIn(field.name, field_names)
field_names.add(field.name)
if field.name == 'warning':
self.assertEqual(field.type,
type_parsing.parse_type('mjWarningStat[8]'))
self.assertEqual(field.doc, 'warning statistics')
elif field.name == 'qpos':
self.assertEqual(field.type, type_parsing.parse_type('mjtNum*'))
self.assertEqual(re.sub(r'\s+', ' ', field.doc), 'position (nq x 1)')
self.assertIn('warning', field_names)
self.assertIn('qpos', field_names)
def test_mjVisual(self): # pylint: disable=invalid-name
struct_decl = structs.STRUCTS['mjVisual']
self.assertEqual(struct_decl.name, 'mjVisual')
self.assertEqual(struct_decl.declname, 'struct mjVisual_')
outer_fields = set()
for outer_field in struct_decl.fields:
self.assertNotIn(outer_field.name, outer_fields)
outer_fields.add(outer_field.name)
self.assertIsInstance(outer_field.type, ast_nodes.AnonymousStructDecl)
inner_fields = set()
if outer_field.name == 'global':
for inner_field in outer_field.type.fields:
self.assertNotIn(inner_field.name, inner_fields)
inner_fields.add(inner_field.name)
if inner_field.name == 'ipd':
self.assertEqual(inner_field.type, type_parsing.parse_type('float'))
self.assertEqual(
inner_field.doc, 'inter-pupilary distance for free camera'
)
elif inner_field.name == 'offwidth':
self.assertEqual(inner_field.type, type_parsing.parse_type('int'))
self.assertEqual(inner_field.doc, 'width of offscreen buffer')
self.assertIn('ipd', inner_fields)
self.assertIn('offwidth', inner_fields)
elif outer_field.name == 'headlight':
for inner_field in outer_field.type.fields:
self.assertNotIn(inner_field.name, inner_fields)
inner_fields.add(inner_field.name)
if inner_field.name in {'ambient', 'diffuse', 'specular'}:
self.assertEqual(inner_field.type,
type_parsing.parse_type('float[3]'))
self.assertEqual(inner_field.doc,
f'{inner_field.name} rgb (alpha=1)')
elif inner_field.name == 'active':
self.assertEqual(inner_field.type, type_parsing.parse_type('int'))
self.assertEqual(inner_field.doc, 'is headlight active')
self.assertIn('ambient', inner_fields)
self.assertIn('diffuse', inner_fields)
self.assertIn('specular', inner_fields)
self.assertIn('active', inner_fields)
self.assertIn('global', outer_fields)
self.assertIn('headlight', outer_fields)
def test_mjuiItem(self): # pylint: disable=invalid-name
struct_decl = structs.STRUCTS['mjuiItem']
self.assertEqual(struct_decl.name, 'mjuiItem')
self.assertEqual(struct_decl.declname, 'struct mjuiItem_')
found_anonymous_union = False
outer_fields = set()
for outer_field in struct_decl.fields:
if isinstance(outer_field, ast_nodes.AnonymousUnionDecl):
self.assertFalse(found_anonymous_union)
found_anonymous_union = True
inner_fields = set()
for inner_field in outer_field.fields:
self.assertNotIn(inner_field.name, inner_fields)
inner_fields.add(inner_field.name)
if inner_field.name == 'single':
self.assertEqual(inner_field.type,
type_parsing.parse_type('struct mjuiItemSingle_'))
self.assertEqual(inner_field.doc, 'check and button')
elif inner_field.name == 'multi':
self.assertEqual(inner_field.type,
type_parsing.parse_type('struct mjuiItemMulti_'))
self.assertEqual(inner_field.doc, 'static, radio and select')
self.assertIn('single', inner_fields)
self.assertIn('multi', inner_fields)
else:
self.assertNotIn(outer_field.name, outer_fields)
outer_fields.add(outer_field.name)
if outer_field.name == 'pdata':
self.assertEqual(outer_field.type, type_parsing.parse_type('void*'))
self.assertEqual(outer_field.doc, 'data pointer (type-specific)')
self.assertTrue(found_anonymous_union)
self.assertIn('pdata', outer_fields)
if __name__ == '__main__':
absltest.main()
| mujoco-main | introspect/structs_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides information about MuJoCo API structs.
DO NOT EDIT. THIS FILE IS AUTOMATICALLY GENERATED.
"""
from typing import Mapping
from .ast_nodes import AnonymousStructDecl
from .ast_nodes import AnonymousUnionDecl
from .ast_nodes import ArrayType
from .ast_nodes import PointerType
from .ast_nodes import StructDecl
from .ast_nodes import StructFieldDecl
from .ast_nodes import ValueType
STRUCTS: Mapping[str, StructDecl] = dict([
('mjLROpt',
StructDecl(
name='mjLROpt',
declname='struct mjLROpt_',
fields=(
StructFieldDecl(
name='mode',
type=ValueType(name='int'),
doc='which actuators to process (mjtLRMode)',
),
StructFieldDecl(
name='useexisting',
type=ValueType(name='int'),
doc='use existing length range if available',
),
StructFieldDecl(
name='uselimit',
type=ValueType(name='int'),
doc='use joint and tendon limits if available',
),
StructFieldDecl(
name='accel',
type=ValueType(name='mjtNum'),
doc='target acceleration used to compute force',
),
StructFieldDecl(
name='maxforce',
type=ValueType(name='mjtNum'),
doc='maximum force; 0: no limit',
),
StructFieldDecl(
name='timeconst',
type=ValueType(name='mjtNum'),
doc='time constant for velocity reduction; min 0.01',
),
StructFieldDecl(
name='timestep',
type=ValueType(name='mjtNum'),
doc='simulation timestep; 0: use mjOption.timestep',
),
StructFieldDecl(
name='inttotal',
type=ValueType(name='mjtNum'),
doc='total simulation time interval',
),
StructFieldDecl(
name='interval',
type=ValueType(name='mjtNum'),
doc='evaluation time interval (at the end)',
),
StructFieldDecl(
name='tolrange',
type=ValueType(name='mjtNum'),
doc='convergence tolerance (relative to range)',
),
),
)),
('mjVFS',
StructDecl(
name='mjVFS',
declname='struct mjVFS_',
fields=(
StructFieldDecl(
name='nfile',
type=ValueType(name='int'),
doc='number of files present',
),
StructFieldDecl(
name='filename',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(2000, 1000),
),
doc='file name without path',
),
StructFieldDecl(
name='filesize',
type=ArrayType(
inner_type=ValueType(name='size_t'),
extents=(2000,),
),
doc='file size in bytes',
),
StructFieldDecl(
name='filedata',
type=ArrayType(
inner_type=PointerType(
inner_type=ValueType(name='void'),
),
extents=(2000,),
),
doc='buffer with file data',
),
),
)),
('mjOption',
StructDecl(
name='mjOption',
declname='struct mjOption_',
fields=(
StructFieldDecl(
name='timestep',
type=ValueType(name='mjtNum'),
doc='timestep',
),
StructFieldDecl(
name='apirate',
type=ValueType(name='mjtNum'),
doc='update rate for remote API (Hz)',
),
StructFieldDecl(
name='impratio',
type=ValueType(name='mjtNum'),
doc='ratio of friction-to-normal contact impedance',
),
StructFieldDecl(
name='tolerance',
type=ValueType(name='mjtNum'),
doc='main solver tolerance',
),
StructFieldDecl(
name='ls_tolerance',
type=ValueType(name='mjtNum'),
doc='CG/Newton linesearch tolerance',
),
StructFieldDecl(
name='noslip_tolerance',
type=ValueType(name='mjtNum'),
doc='noslip solver tolerance',
),
StructFieldDecl(
name='mpr_tolerance',
type=ValueType(name='mjtNum'),
doc='MPR solver tolerance',
),
StructFieldDecl(
name='gravity',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='gravitational acceleration',
),
StructFieldDecl(
name='wind',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='wind (for lift, drag and viscosity)',
),
StructFieldDecl(
name='magnetic',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='global magnetic flux',
),
StructFieldDecl(
name='density',
type=ValueType(name='mjtNum'),
doc='density of medium',
),
StructFieldDecl(
name='viscosity',
type=ValueType(name='mjtNum'),
doc='viscosity of medium',
),
StructFieldDecl(
name='o_margin',
type=ValueType(name='mjtNum'),
doc='margin',
),
StructFieldDecl(
name='o_solref',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(2,),
),
doc='solref',
),
StructFieldDecl(
name='o_solimp',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(5,),
),
doc='solimp',
),
StructFieldDecl(
name='integrator',
type=ValueType(name='int'),
doc='integration mode (mjtIntegrator)',
),
StructFieldDecl(
name='collision',
type=ValueType(name='int'),
doc='collision mode (mjtCollision)',
),
StructFieldDecl(
name='cone',
type=ValueType(name='int'),
doc='type of friction cone (mjtCone)',
),
StructFieldDecl(
name='jacobian',
type=ValueType(name='int'),
doc='type of Jacobian (mjtJacobian)',
),
StructFieldDecl(
name='solver',
type=ValueType(name='int'),
doc='solver algorithm (mjtSolver)',
),
StructFieldDecl(
name='iterations',
type=ValueType(name='int'),
doc='maximum number of main solver iterations',
),
StructFieldDecl(
name='ls_iterations',
type=ValueType(name='int'),
doc='maximum number of CG/Newton linesearch iterations',
),
StructFieldDecl(
name='noslip_iterations',
type=ValueType(name='int'),
doc='maximum number of noslip solver iterations',
),
StructFieldDecl(
name='mpr_iterations',
type=ValueType(name='int'),
doc='maximum number of MPR solver iterations',
),
StructFieldDecl(
name='disableflags',
type=ValueType(name='int'),
doc='bit flags for disabling standard features',
),
StructFieldDecl(
name='enableflags',
type=ValueType(name='int'),
doc='bit flags for enabling optional features',
),
StructFieldDecl(
name='sdf_initpoints',
type=ValueType(name='int'),
doc='number of starting points for gradient descent',
),
StructFieldDecl(
name='sdf_iterations',
type=ValueType(name='int'),
doc='max number of iterations for gradient descent',
),
),
)),
('mjVisual',
StructDecl(
name='mjVisual',
declname='struct mjVisual_',
fields=(
StructFieldDecl(
name='global',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='fovy',
type=ValueType(name='float'),
doc='y-field of view for free camera (degrees)',
),
StructFieldDecl(
name='ipd',
type=ValueType(name='float'),
doc='inter-pupilary distance for free camera',
),
StructFieldDecl(
name='azimuth',
type=ValueType(name='float'),
doc='initial azimuth of free camera (degrees)',
),
StructFieldDecl(
name='elevation',
type=ValueType(name='float'),
doc='initial elevation of free camera (degrees)',
),
StructFieldDecl(
name='linewidth',
type=ValueType(name='float'),
doc='line width for wireframe and ray rendering',
),
StructFieldDecl(
name='glow',
type=ValueType(name='float'),
doc='glow coefficient for selected body',
),
StructFieldDecl(
name='realtime',
type=ValueType(name='float'),
doc='initial real-time factor (1: real time)',
),
StructFieldDecl(
name='offwidth',
type=ValueType(name='int'),
doc='width of offscreen buffer',
),
StructFieldDecl(
name='offheight',
type=ValueType(name='int'),
doc='height of offscreen buffer',
),
StructFieldDecl(
name='ellipsoidinertia',
type=ValueType(name='int'),
doc='geom for inertia visualization (0: box, 1: ellipsoid)', # pylint: disable=line-too-long
),
),
),
doc='',
),
StructFieldDecl(
name='quality',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='shadowsize',
type=ValueType(name='int'),
doc='size of shadowmap texture',
),
StructFieldDecl(
name='offsamples',
type=ValueType(name='int'),
doc='number of multisamples for offscreen rendering', # pylint: disable=line-too-long
),
StructFieldDecl(
name='numslices',
type=ValueType(name='int'),
doc='number of slices for builtin geom drawing',
),
StructFieldDecl(
name='numstacks',
type=ValueType(name='int'),
doc='number of stacks for builtin geom drawing',
),
StructFieldDecl(
name='numquads',
type=ValueType(name='int'),
doc='number of quads for box rendering',
),
),
),
doc='',
),
StructFieldDecl(
name='headlight',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='ambient',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='ambient rgb (alpha=1)',
),
StructFieldDecl(
name='diffuse',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='diffuse rgb (alpha=1)',
),
StructFieldDecl(
name='specular',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='specular rgb (alpha=1)',
),
StructFieldDecl(
name='active',
type=ValueType(name='int'),
doc='is headlight active',
),
),
),
doc='',
),
StructFieldDecl(
name='map',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='stiffness',
type=ValueType(name='float'),
doc='mouse perturbation stiffness (space->force)',
),
StructFieldDecl(
name='stiffnessrot',
type=ValueType(name='float'),
doc='mouse perturbation stiffness (space->torque)',
),
StructFieldDecl(
name='force',
type=ValueType(name='float'),
doc='from force units to space units',
),
StructFieldDecl(
name='torque',
type=ValueType(name='float'),
doc='from torque units to space units',
),
StructFieldDecl(
name='alpha',
type=ValueType(name='float'),
doc='scale geom alphas when transparency is enabled', # pylint: disable=line-too-long
),
StructFieldDecl(
name='fogstart',
type=ValueType(name='float'),
doc='OpenGL fog starts at fogstart * mjModel.stat.extent', # pylint: disable=line-too-long
),
StructFieldDecl(
name='fogend',
type=ValueType(name='float'),
doc='OpenGL fog ends at fogend * mjModel.stat.extent', # pylint: disable=line-too-long
),
StructFieldDecl(
name='znear',
type=ValueType(name='float'),
doc='near clipping plane = znear * mjModel.stat.extent', # pylint: disable=line-too-long
),
StructFieldDecl(
name='zfar',
type=ValueType(name='float'),
doc='far clipping plane = zfar * mjModel.stat.extent', # pylint: disable=line-too-long
),
StructFieldDecl(
name='haze',
type=ValueType(name='float'),
doc='haze ratio',
),
StructFieldDecl(
name='shadowclip',
type=ValueType(name='float'),
doc='directional light: shadowclip * mjModel.stat.extent', # pylint: disable=line-too-long
),
StructFieldDecl(
name='shadowscale',
type=ValueType(name='float'),
doc='spot light: shadowscale * light.cutoff',
),
StructFieldDecl(
name='actuatortendon',
type=ValueType(name='float'),
doc='scale tendon width',
),
),
),
doc='',
),
StructFieldDecl(
name='scale',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='forcewidth',
type=ValueType(name='float'),
doc='width of force arrow',
),
StructFieldDecl(
name='contactwidth',
type=ValueType(name='float'),
doc='contact width',
),
StructFieldDecl(
name='contactheight',
type=ValueType(name='float'),
doc='contact height',
),
StructFieldDecl(
name='connect',
type=ValueType(name='float'),
doc='autoconnect capsule width',
),
StructFieldDecl(
name='com',
type=ValueType(name='float'),
doc='com radius',
),
StructFieldDecl(
name='camera',
type=ValueType(name='float'),
doc='camera object',
),
StructFieldDecl(
name='light',
type=ValueType(name='float'),
doc='light object',
),
StructFieldDecl(
name='selectpoint',
type=ValueType(name='float'),
doc='selection point',
),
StructFieldDecl(
name='jointlength',
type=ValueType(name='float'),
doc='joint length',
),
StructFieldDecl(
name='jointwidth',
type=ValueType(name='float'),
doc='joint width',
),
StructFieldDecl(
name='actuatorlength',
type=ValueType(name='float'),
doc='actuator length',
),
StructFieldDecl(
name='actuatorwidth',
type=ValueType(name='float'),
doc='actuator width',
),
StructFieldDecl(
name='framelength',
type=ValueType(name='float'),
doc='bodyframe axis length',
),
StructFieldDecl(
name='framewidth',
type=ValueType(name='float'),
doc='bodyframe axis width',
),
StructFieldDecl(
name='constraint',
type=ValueType(name='float'),
doc='constraint width',
),
StructFieldDecl(
name='slidercrank',
type=ValueType(name='float'),
doc='slidercrank width',
),
),
),
doc='',
),
StructFieldDecl(
name='rgba',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='fog',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='fog',
),
StructFieldDecl(
name='haze',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='haze',
),
StructFieldDecl(
name='force',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='external force',
),
StructFieldDecl(
name='inertia',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='inertia box',
),
StructFieldDecl(
name='joint',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='joint',
),
StructFieldDecl(
name='actuator',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='actuator, neutral',
),
StructFieldDecl(
name='actuatornegative',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='actuator, negative limit',
),
StructFieldDecl(
name='actuatorpositive',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='actuator, positive limit',
),
StructFieldDecl(
name='com',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='center of mass',
),
StructFieldDecl(
name='camera',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='camera object',
),
StructFieldDecl(
name='light',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='light object',
),
StructFieldDecl(
name='selectpoint',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='selection point',
),
StructFieldDecl(
name='connect',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='auto connect',
),
StructFieldDecl(
name='contactpoint',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='contact point',
),
StructFieldDecl(
name='contactforce',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='contact force',
),
StructFieldDecl(
name='contactfriction',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='contact friction force',
),
StructFieldDecl(
name='contacttorque',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='contact torque',
),
StructFieldDecl(
name='contactgap',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='contact point in gap',
),
StructFieldDecl(
name='rangefinder',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='rangefinder ray',
),
StructFieldDecl(
name='constraint',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='constraint',
),
StructFieldDecl(
name='slidercrank',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='slidercrank',
),
StructFieldDecl(
name='crankbroken',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='used when crank must be stretched/broken',
),
),
),
doc='',
),
),
)),
('mjStatistic',
StructDecl(
name='mjStatistic',
declname='struct mjStatistic_',
fields=(
StructFieldDecl(
name='meaninertia',
type=ValueType(name='mjtNum'),
doc='mean diagonal inertia',
),
StructFieldDecl(
name='meanmass',
type=ValueType(name='mjtNum'),
doc='mean body mass',
),
StructFieldDecl(
name='meansize',
type=ValueType(name='mjtNum'),
doc='mean body size',
),
StructFieldDecl(
name='extent',
type=ValueType(name='mjtNum'),
doc='spatial extent',
),
StructFieldDecl(
name='center',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='center of model',
),
),
)),
('mjModel',
StructDecl(
name='mjModel',
declname='struct mjModel_',
fields=(
StructFieldDecl(
name='nq',
type=ValueType(name='int'),
doc='number of generalized coordinates = dim(qpos)',
),
StructFieldDecl(
name='nv',
type=ValueType(name='int'),
doc='number of degrees of freedom = dim(qvel)',
),
StructFieldDecl(
name='nu',
type=ValueType(name='int'),
doc='number of actuators/controls = dim(ctrl)',
),
StructFieldDecl(
name='na',
type=ValueType(name='int'),
doc='number of activation states = dim(act)',
),
StructFieldDecl(
name='nbody',
type=ValueType(name='int'),
doc='number of bodies',
),
StructFieldDecl(
name='nbvh',
type=ValueType(name='int'),
doc='number of bounding volumes in all bodies',
),
StructFieldDecl(
name='njnt',
type=ValueType(name='int'),
doc='number of joints',
),
StructFieldDecl(
name='ngeom',
type=ValueType(name='int'),
doc='number of geoms',
),
StructFieldDecl(
name='nsite',
type=ValueType(name='int'),
doc='number of sites',
),
StructFieldDecl(
name='ncam',
type=ValueType(name='int'),
doc='number of cameras',
),
StructFieldDecl(
name='nlight',
type=ValueType(name='int'),
doc='number of lights',
),
StructFieldDecl(
name='nmesh',
type=ValueType(name='int'),
doc='number of meshes',
),
StructFieldDecl(
name='nmeshvert',
type=ValueType(name='int'),
doc='number of vertices in all meshes',
),
StructFieldDecl(
name='nmeshnormal',
type=ValueType(name='int'),
doc='number of normals in all meshes',
),
StructFieldDecl(
name='nmeshtexcoord',
type=ValueType(name='int'),
doc='number of texcoords in all meshes',
),
StructFieldDecl(
name='nmeshface',
type=ValueType(name='int'),
doc='number of triangular faces in all meshes',
),
StructFieldDecl(
name='nmeshgraph',
type=ValueType(name='int'),
doc='number of ints in mesh auxiliary data',
),
StructFieldDecl(
name='nskin',
type=ValueType(name='int'),
doc='number of skins',
),
StructFieldDecl(
name='nskinvert',
type=ValueType(name='int'),
doc='number of vertices in all skins',
),
StructFieldDecl(
name='nskintexvert',
type=ValueType(name='int'),
doc='number of vertiex with texcoords in all skins',
),
StructFieldDecl(
name='nskinface',
type=ValueType(name='int'),
doc='number of triangular faces in all skins',
),
StructFieldDecl(
name='nskinbone',
type=ValueType(name='int'),
doc='number of bones in all skins',
),
StructFieldDecl(
name='nskinbonevert',
type=ValueType(name='int'),
doc='number of vertices in all skin bones',
),
StructFieldDecl(
name='nhfield',
type=ValueType(name='int'),
doc='number of heightfields',
),
StructFieldDecl(
name='nhfielddata',
type=ValueType(name='int'),
doc='number of data points in all heightfields',
),
StructFieldDecl(
name='ntex',
type=ValueType(name='int'),
doc='number of textures',
),
StructFieldDecl(
name='ntexdata',
type=ValueType(name='int'),
doc='number of bytes in texture rgb data',
),
StructFieldDecl(
name='nmat',
type=ValueType(name='int'),
doc='number of materials',
),
StructFieldDecl(
name='npair',
type=ValueType(name='int'),
doc='number of predefined geom pairs',
),
StructFieldDecl(
name='nexclude',
type=ValueType(name='int'),
doc='number of excluded geom pairs',
),
StructFieldDecl(
name='neq',
type=ValueType(name='int'),
doc='number of equality constraints',
),
StructFieldDecl(
name='ntendon',
type=ValueType(name='int'),
doc='number of tendons',
),
StructFieldDecl(
name='nwrap',
type=ValueType(name='int'),
doc='number of wrap objects in all tendon paths',
),
StructFieldDecl(
name='nsensor',
type=ValueType(name='int'),
doc='number of sensors',
),
StructFieldDecl(
name='nnumeric',
type=ValueType(name='int'),
doc='number of numeric custom fields',
),
StructFieldDecl(
name='nnumericdata',
type=ValueType(name='int'),
doc='number of mjtNums in all numeric fields',
),
StructFieldDecl(
name='ntext',
type=ValueType(name='int'),
doc='number of text custom fields',
),
StructFieldDecl(
name='ntextdata',
type=ValueType(name='int'),
doc='number of mjtBytes in all text fields',
),
StructFieldDecl(
name='ntuple',
type=ValueType(name='int'),
doc='number of tuple custom fields',
),
StructFieldDecl(
name='ntupledata',
type=ValueType(name='int'),
doc='number of objects in all tuple fields',
),
StructFieldDecl(
name='nkey',
type=ValueType(name='int'),
doc='number of keyframes',
),
StructFieldDecl(
name='nmocap',
type=ValueType(name='int'),
doc='number of mocap bodies',
),
StructFieldDecl(
name='nplugin',
type=ValueType(name='int'),
doc='number of plugin instances',
),
StructFieldDecl(
name='npluginattr',
type=ValueType(name='int'),
doc='number of chars in all plugin config attributes',
),
StructFieldDecl(
name='nuser_body',
type=ValueType(name='int'),
doc='number of mjtNums in body_user',
),
StructFieldDecl(
name='nuser_jnt',
type=ValueType(name='int'),
doc='number of mjtNums in jnt_user',
),
StructFieldDecl(
name='nuser_geom',
type=ValueType(name='int'),
doc='number of mjtNums in geom_user',
),
StructFieldDecl(
name='nuser_site',
type=ValueType(name='int'),
doc='number of mjtNums in site_user',
),
StructFieldDecl(
name='nuser_cam',
type=ValueType(name='int'),
doc='number of mjtNums in cam_user',
),
StructFieldDecl(
name='nuser_tendon',
type=ValueType(name='int'),
doc='number of mjtNums in tendon_user',
),
StructFieldDecl(
name='nuser_actuator',
type=ValueType(name='int'),
doc='number of mjtNums in actuator_user',
),
StructFieldDecl(
name='nuser_sensor',
type=ValueType(name='int'),
doc='number of mjtNums in sensor_user',
),
StructFieldDecl(
name='nnames',
type=ValueType(name='int'),
doc='number of chars in all names',
),
StructFieldDecl(
name='nnames_map',
type=ValueType(name='int'),
doc='number of slots in the names hash map',
),
StructFieldDecl(
name='nM',
type=ValueType(name='int'),
doc='number of non-zeros in sparse inertia matrix',
),
StructFieldDecl(
name='nD',
type=ValueType(name='int'),
doc='number of non-zeros in sparse dof-dof matrix',
),
StructFieldDecl(
name='nB',
type=ValueType(name='int'),
doc='number of non-zeros in sparse body-dof matrix',
),
StructFieldDecl(
name='ntree',
type=ValueType(name='int'),
doc='number of kinematic trees under world body',
),
StructFieldDecl(
name='nemax',
type=ValueType(name='int'),
doc='number of potential equality-constraint rows',
),
StructFieldDecl(
name='njmax',
type=ValueType(name='int'),
doc='number of available rows in constraint Jacobian',
),
StructFieldDecl(
name='nconmax',
type=ValueType(name='int'),
doc='number of potential contacts in contact list',
),
StructFieldDecl(
name='nuserdata',
type=ValueType(name='int'),
doc='number of extra fields in mjData',
),
StructFieldDecl(
name='nsensordata',
type=ValueType(name='int'),
doc='number of fields in sensor data vector',
),
StructFieldDecl(
name='npluginstate',
type=ValueType(name='int'),
doc='number of fields in plugin state vector',
),
StructFieldDecl(
name='narena',
type=ValueType(name='size_t'),
doc='number of bytes in the mjData arena (inclusive of stack)',
),
StructFieldDecl(
name='nbuffer',
type=ValueType(name='size_t'),
doc='number of bytes in buffer',
),
StructFieldDecl(
name='opt',
type=ValueType(name='mjOption'),
doc='physics options',
),
StructFieldDecl(
name='vis',
type=ValueType(name='mjVisual'),
doc='visualization options',
),
StructFieldDecl(
name='stat',
type=ValueType(name='mjStatistic'),
doc='model statistics',
),
StructFieldDecl(
name='buffer',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='main buffer; all pointers point in it (nbuffer)',
),
StructFieldDecl(
name='qpos0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='qpos values at default pose (nq x 1)',
),
StructFieldDecl(
name='qpos_spring',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='reference pose for springs (nq x 1)',
),
StructFieldDecl(
name='body_parentid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of body's parent (nbody x 1)",
),
StructFieldDecl(
name='body_rootid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of root above body (nbody x 1)',
),
StructFieldDecl(
name='body_weldid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of body that this body is welded to (nbody x 1)',
),
StructFieldDecl(
name='body_mocapid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of mocap data; -1: none (nbody x 1)',
),
StructFieldDecl(
name='body_jntnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of joints for this body (nbody x 1)',
),
StructFieldDecl(
name='body_jntadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='start addr of joints; -1: no joints (nbody x 1)',
),
StructFieldDecl(
name='body_dofnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of motion degrees of freedom (nbody x 1)',
),
StructFieldDecl(
name='body_dofadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='start addr of dofs; -1: no dofs (nbody x 1)',
),
StructFieldDecl(
name='body_treeid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of body's kinematic tree; -1: static (nbody x 1)",
),
StructFieldDecl(
name='body_geomnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of geoms (nbody x 1)',
),
StructFieldDecl(
name='body_geomadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='start addr of geoms; -1: no geoms (nbody x 1)',
),
StructFieldDecl(
name='body_simple',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='body is simple (has diagonal M) (nbody x 1)',
),
StructFieldDecl(
name='body_sameframe',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='inertial frame is same as body frame (nbody x 1)',
),
StructFieldDecl(
name='body_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='position offset rel. to parent body (nbody x 3)',
),
StructFieldDecl(
name='body_quat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='orientation offset rel. to parent body (nbody x 4)',
),
StructFieldDecl(
name='body_ipos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local position of center of mass (nbody x 3)',
),
StructFieldDecl(
name='body_iquat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local orientation of inertia ellipsoid (nbody x 4)',
),
StructFieldDecl(
name='body_mass',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='mass (nbody x 1)',
),
StructFieldDecl(
name='body_subtreemass',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='mass of subtree starting at this body (nbody x 1)',
),
StructFieldDecl(
name='body_inertia',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='diagonal inertia in ipos/iquat frame (nbody x 3)',
),
StructFieldDecl(
name='body_invweight0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='mean inv inert in qpos0 (trn, rot) (nbody x 2)',
),
StructFieldDecl(
name='body_gravcomp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='antigravity force, units of body weight (nbody x 1)',
),
StructFieldDecl(
name='body_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (nbody x nuser_body)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='body_plugin',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='plugin instance id; -1: not in use (nbody x 1)',
),
StructFieldDecl(
name='body_bvhadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of bvh root (nbody x 1)',
),
StructFieldDecl(
name='body_bvhnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of bounding volumes (nbody x 1)',
),
StructFieldDecl(
name='bvh_depth',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='depth in the bounding volume hierarchy (nbvh x 1)',
),
StructFieldDecl(
name='bvh_child',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='left and right children in tree (nbvh x 2)',
),
StructFieldDecl(
name='bvh_geomid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom id of the node; -1: non-leaf (nbvh x 1)',
),
StructFieldDecl(
name='bvh_aabb',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='bounding box of node (center, size) (nbvh x 6)',
),
StructFieldDecl(
name='jnt_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='type of joint (mjtJoint) (njnt x 1)',
),
StructFieldDecl(
name='jnt_qposadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="start addr in 'qpos' for joint's data (njnt x 1)",
),
StructFieldDecl(
name='jnt_dofadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="start addr in 'qvel' for joint's data (njnt x 1)",
),
StructFieldDecl(
name='jnt_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of joint's body (njnt x 1)",
),
StructFieldDecl(
name='jnt_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='group for visibility (njnt x 1)',
),
StructFieldDecl(
name='jnt_limited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='does joint have limits (njnt x 1)',
),
StructFieldDecl(
name='jnt_actfrclimited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='does joint have actuator force limits (njnt x 1)',
),
StructFieldDecl(
name='jnt_solref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver reference: limit (njnt x mjNREF)',
),
StructFieldDecl(
name='jnt_solimp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver impedance: limit (njnt x mjNIMP)',
),
StructFieldDecl(
name='jnt_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local anchor position (njnt x 3)',
),
StructFieldDecl(
name='jnt_axis',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local joint axis (njnt x 3)',
),
StructFieldDecl(
name='jnt_stiffness',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='stiffness coefficient (njnt x 1)',
),
StructFieldDecl(
name='jnt_range',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='joint limits (njnt x 2)',
),
StructFieldDecl(
name='jnt_actfrcrange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='range of total actuator force (njnt x 2)',
),
StructFieldDecl(
name='jnt_margin',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='min distance for limit detection (njnt x 1)',
),
StructFieldDecl(
name='jnt_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (njnt x nuser_jnt)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='dof_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of dof's body (nv x 1)",
),
StructFieldDecl(
name='dof_jntid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of dof's joint (nv x 1)",
),
StructFieldDecl(
name='dof_parentid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of dof's parent; -1: none (nv x 1)",
),
StructFieldDecl(
name='dof_treeid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of dof's kinematic tree (nv x 1)",
),
StructFieldDecl(
name='dof_Madr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='dof address in M-diagonal (nv x 1)',
),
StructFieldDecl(
name='dof_simplenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of consecutive simple dofs (nv x 1)',
),
StructFieldDecl(
name='dof_solref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver reference:frictionloss (nv x mjNREF)',
),
StructFieldDecl(
name='dof_solimp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver impedance:frictionloss (nv x mjNIMP)',
),
StructFieldDecl(
name='dof_frictionloss',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='dof friction loss (nv x 1)',
),
StructFieldDecl(
name='dof_armature',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='dof armature inertia/mass (nv x 1)',
),
StructFieldDecl(
name='dof_damping',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='damping coefficient (nv x 1)',
),
StructFieldDecl(
name='dof_invweight0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='diag. inverse inertia in qpos0 (nv x 1)',
),
StructFieldDecl(
name='dof_M0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='diag. inertia in qpos0 (nv x 1)',
),
StructFieldDecl(
name='geom_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geometric type (mjtGeom) (ngeom x 1)',
),
StructFieldDecl(
name='geom_contype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom contact type (ngeom x 1)',
),
StructFieldDecl(
name='geom_conaffinity',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom contact affinity (ngeom x 1)',
),
StructFieldDecl(
name='geom_condim',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='contact dimensionality (1, 3, 4, 6) (ngeom x 1)',
),
StructFieldDecl(
name='geom_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of geom's body (ngeom x 1)",
),
StructFieldDecl(
name='geom_dataid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of geom's mesh/hfield; -1: none (ngeom x 1)",
),
StructFieldDecl(
name='geom_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='material id for rendering; -1: none (ngeom x 1)',
),
StructFieldDecl(
name='geom_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='group for visibility (ngeom x 1)',
),
StructFieldDecl(
name='geom_priority',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom contact priority (ngeom x 1)',
),
StructFieldDecl(
name='geom_plugin',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='plugin instance id; -1: not in use (ngeom x 1)',
),
StructFieldDecl(
name='geom_sameframe',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='same as body frame (1) or iframe (2) (ngeom x 1)',
),
StructFieldDecl(
name='geom_solmix',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='mixing coef for solref/imp in geom pair (ngeom x 1)',
),
StructFieldDecl(
name='geom_solref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver reference: contact (ngeom x mjNREF)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='geom_solimp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver impedance: contact (ngeom x mjNIMP)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='geom_size',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='geom-specific size parameters (ngeom x 3)',
),
StructFieldDecl(
name='geom_aabb',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='bounding box, (center, size) (ngeom x 6)',
),
StructFieldDecl(
name='geom_rbound',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='radius of bounding sphere (ngeom x 1)',
),
StructFieldDecl(
name='geom_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local position offset rel. to body (ngeom x 3)',
),
StructFieldDecl(
name='geom_quat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local orientation offset rel. to body (ngeom x 4)',
),
StructFieldDecl(
name='geom_friction',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='friction for (slide, spin, roll) (ngeom x 3)',
),
StructFieldDecl(
name='geom_margin',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='detect contact if dist<margin(ngeom x 1)',
),
StructFieldDecl(
name='geom_gap',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='include in solver if dist<margin-gap (ngeom x 1)',
),
StructFieldDecl(
name='geom_fluid',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='fluid interaction parameters (ngeom x mjNFLUID)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='geom_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (ngeom x nuser_geom)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='geom_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='rgba when material is omitted (ngeom x 4)',
),
StructFieldDecl(
name='site_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom type for rendering (mjtGeom) (nsite x 1)',
),
StructFieldDecl(
name='site_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of site's body (nsite x 1)",
),
StructFieldDecl(
name='site_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='material id for rendering; -1: none (nsite x 1)',
),
StructFieldDecl(
name='site_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='group for visibility (nsite x 1)',
),
StructFieldDecl(
name='site_sameframe',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='same as body frame (1) or iframe (2) (nsite x 1)',
),
StructFieldDecl(
name='site_size',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='geom size for rendering (nsite x 3)',
),
StructFieldDecl(
name='site_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local position offset rel. to body (nsite x 3)',
),
StructFieldDecl(
name='site_quat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='local orientation offset rel. to body (nsite x 4)',
),
StructFieldDecl(
name='site_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (nsite x nuser_site)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='site_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='rgba when material is omitted (nsite x 4)',
),
StructFieldDecl(
name='cam_mode',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='camera tracking mode (mjtCamLight) (ncam x 1)',
),
StructFieldDecl(
name='cam_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of camera's body (ncam x 1)",
),
StructFieldDecl(
name='cam_targetbodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of targeted body; -1: none (ncam x 1)',
),
StructFieldDecl(
name='cam_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='position rel. to body frame (ncam x 3)',
),
StructFieldDecl(
name='cam_quat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='orientation rel. to body frame (ncam x 4)',
),
StructFieldDecl(
name='cam_poscom0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='global position rel. to sub-com in qpos0 (ncam x 3)',
),
StructFieldDecl(
name='cam_pos0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='global position rel. to body in qpos0 (ncam x 3)',
),
StructFieldDecl(
name='cam_mat0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='global orientation in qpos0 (ncam x 9)',
),
StructFieldDecl(
name='cam_resolution',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='[width, height] in pixels (ncam x 2)',
),
StructFieldDecl(
name='cam_fovy',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='y-field of view (deg) (ncam x 1)',
),
StructFieldDecl(
name='cam_ipd',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='inter-pupilary distance (ncam x 1)',
),
StructFieldDecl(
name='cam_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (ncam x nuser_cam)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='light_mode',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='light tracking mode (mjtCamLight) (nlight x 1)',
),
StructFieldDecl(
name='light_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="id of light's body (nlight x 1)",
),
StructFieldDecl(
name='light_targetbodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of targeted body; -1: none (nlight x 1)',
),
StructFieldDecl(
name='light_directional',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='directional light (nlight x 1)',
),
StructFieldDecl(
name='light_castshadow',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='does light cast shadows (nlight x 1)',
),
StructFieldDecl(
name='light_active',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='is light on (nlight x 1)',
),
StructFieldDecl(
name='light_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='position rel. to body frame (nlight x 3)',
),
StructFieldDecl(
name='light_dir',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='direction rel. to body frame (nlight x 3)',
),
StructFieldDecl(
name='light_poscom0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='global position rel. to sub-com in qpos0 (nlight x 3)',
),
StructFieldDecl(
name='light_pos0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='global position rel. to body in qpos0 (nlight x 3)',
),
StructFieldDecl(
name='light_dir0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='global direction in qpos0 (nlight x 3)',
),
StructFieldDecl(
name='light_attenuation',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='OpenGL attenuation (quadratic model) (nlight x 3)',
),
StructFieldDecl(
name='light_cutoff',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='OpenGL cutoff (nlight x 1)',
),
StructFieldDecl(
name='light_exponent',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='OpenGL exponent (nlight x 1)',
),
StructFieldDecl(
name='light_ambient',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='ambient rgb (alpha=1) (nlight x 3)',
),
StructFieldDecl(
name='light_diffuse',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='diffuse rgb (alpha=1) (nlight x 3)',
),
StructFieldDecl(
name='light_specular',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='specular rgb (alpha=1) (nlight x 3)',
),
StructFieldDecl(
name='mesh_vertadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first vertex address (nmesh x 1)',
),
StructFieldDecl(
name='mesh_vertnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of vertices (nmesh x 1)',
),
StructFieldDecl(
name='mesh_faceadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first face address (nmesh x 1)',
),
StructFieldDecl(
name='mesh_facenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of faces (nmesh x 1)',
),
StructFieldDecl(
name='mesh_bvhadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of bvh root (nmesh x 1)',
),
StructFieldDecl(
name='mesh_bvhnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of bvh (nmesh x 1)',
),
StructFieldDecl(
name='mesh_normaladr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first normal address (nmesh x 1)',
),
StructFieldDecl(
name='mesh_normalnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of normals (nmesh x 1)',
),
StructFieldDecl(
name='mesh_texcoordadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='texcoord data address; -1: no texcoord (nmesh x 1)',
),
StructFieldDecl(
name='mesh_texcoordnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of texcoord (nmesh x 1)',
),
StructFieldDecl(
name='mesh_graphadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='graph data address; -1: no graph (nmesh x 1)',
),
StructFieldDecl(
name='mesh_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='translation applied to asset vertices (nmesh x 3)',
),
StructFieldDecl(
name='mesh_quat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='rotation applied to asset vertices (nmesh x 4)',
),
StructFieldDecl(
name='mesh_vert',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='vertex positions for all meshes (nmeshvert x 3)',
),
StructFieldDecl(
name='mesh_normal',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='normals for all meshes (nmeshnormal x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='mesh_texcoord',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='vertex texcoords for all meshes (nmeshtexcoord x 2)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='mesh_face',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='vertex face data (nmeshface x 3)',
),
StructFieldDecl(
name='mesh_facenormal',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='normal face data (nmeshface x 3)',
),
StructFieldDecl(
name='mesh_facetexcoord',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='texture face data (nmeshface x 3)',
),
StructFieldDecl(
name='mesh_graph',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='convex graph data (nmeshgraph x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='skin_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='skin material id; -1: none (nskin x 1)',
),
StructFieldDecl(
name='skin_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='group for visibility (nskin x 1)',
),
StructFieldDecl(
name='skin_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='skin rgba (nskin x 4)',
),
StructFieldDecl(
name='skin_inflate',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='inflate skin in normal direction (nskin x 1)',
),
StructFieldDecl(
name='skin_vertadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first vertex address (nskin x 1)',
),
StructFieldDecl(
name='skin_vertnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of vertices (nskin x 1)',
),
StructFieldDecl(
name='skin_texcoordadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='texcoord data address; -1: no texcoord (nskin x 1)',
),
StructFieldDecl(
name='skin_faceadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first face address (nskin x 1)',
),
StructFieldDecl(
name='skin_facenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of faces (nskin x 1)',
),
StructFieldDecl(
name='skin_boneadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first bone in skin (nskin x 1)',
),
StructFieldDecl(
name='skin_bonenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of bones in skin (nskin x 1)',
),
StructFieldDecl(
name='skin_vert',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='vertex positions for all skin meshes (nskinvert x 3)',
),
StructFieldDecl(
name='skin_texcoord',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='vertex texcoords for all skin meshes (nskintexvert x 2)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='skin_face',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='triangle faces for all skin meshes (nskinface x 3)',
),
StructFieldDecl(
name='skin_bonevertadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first vertex in each bone (nskinbone x 1)',
),
StructFieldDecl(
name='skin_bonevertnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of vertices in each bone (nskinbone x 1)',
),
StructFieldDecl(
name='skin_bonebindpos',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='bind pos of each bone (nskinbone x 3)',
),
StructFieldDecl(
name='skin_bonebindquat',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='bind quat of each bone (nskinbone x 4)',
),
StructFieldDecl(
name='skin_bonebodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='body id of each bone (nskinbone x 1)',
),
StructFieldDecl(
name='skin_bonevertid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='mesh ids of vertices in each bone (nskinbonevert x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='skin_bonevertweight',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='weights of vertices in each bone (nskinbonevert x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='hfield_size',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='(x, y, z_top, z_bottom) (nhfield x 4)',
),
StructFieldDecl(
name='hfield_nrow',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of rows in grid (nhfield x 1)',
),
StructFieldDecl(
name='hfield_ncol',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of columns in grid (nhfield x 1)',
),
StructFieldDecl(
name='hfield_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address in hfield_data (nhfield x 1)',
),
StructFieldDecl(
name='hfield_data',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='elevation data (nhfielddata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tex_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='texture type (mjtTexture) (ntex x 1)',
),
StructFieldDecl(
name='tex_height',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of rows in texture image (ntex x 1)',
),
StructFieldDecl(
name='tex_width',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of columns in texture image (ntex x 1)',
),
StructFieldDecl(
name='tex_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address in rgb (ntex x 1)',
),
StructFieldDecl(
name='tex_rgb',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='rgb (alpha = 1) (ntexdata x 1)',
),
StructFieldDecl(
name='mat_texid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='texture id; -1: none (nmat x 1)',
),
StructFieldDecl(
name='mat_texuniform',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='make texture cube uniform (nmat x 1)',
),
StructFieldDecl(
name='mat_texrepeat',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='texture repetition for 2d mapping (nmat x 2)',
),
StructFieldDecl(
name='mat_emission',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='emission (x rgb) (nmat x 1)',
),
StructFieldDecl(
name='mat_specular',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='specular (x white) (nmat x 1)',
),
StructFieldDecl(
name='mat_shininess',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='shininess coef (nmat x 1)',
),
StructFieldDecl(
name='mat_reflectance',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='reflectance (0: disable) (nmat x 1)',
),
StructFieldDecl(
name='mat_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='rgba (nmat x 4)',
),
StructFieldDecl(
name='pair_dim',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='contact dimensionality (npair x 1)',
),
StructFieldDecl(
name='pair_geom1',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of geom1 (npair x 1)',
),
StructFieldDecl(
name='pair_geom2',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of geom2 (npair x 1)',
),
StructFieldDecl(
name='pair_signature',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='(body1+1)<<16 + body2+1 (npair x 1)',
),
StructFieldDecl(
name='pair_solref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='solver reference: contact normal (npair x mjNREF)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='pair_solreffriction',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='solver reference: contact friction (npair x mjNREF)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='pair_solimp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='solver impedance: contact (npair x mjNIMP)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='pair_margin',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='detect contact if dist<margin(npair x 1)',
),
StructFieldDecl(
name='pair_gap',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='include in solver if dist<margin-gap (npair x 1)',
),
StructFieldDecl(
name='pair_friction',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='tangent1, 2, spin, roll1, 2 (npair x 5)',
),
StructFieldDecl(
name='exclude_signature',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='(body1+1)<<16 + body2+1 (nexclude x 1)',
),
StructFieldDecl(
name='eq_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='constraint type (mjtEq) (neq x 1)',
),
StructFieldDecl(
name='eq_obj1id',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of object 1 (neq x 1)',
),
StructFieldDecl(
name='eq_obj2id',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of object 2 (neq x 1)',
),
StructFieldDecl(
name='eq_active',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='enable/disable constraint (neq x 1)',
),
StructFieldDecl(
name='eq_solref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver reference (neq x mjNREF)',
),
StructFieldDecl(
name='eq_solimp',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver impedance (neq x mjNIMP)',
),
StructFieldDecl(
name='eq_data',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='numeric data for constraint (neq x mjNEQDATA)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="address of first object in tendon's path (ntendon x 1)",
),
StructFieldDecl(
name='tendon_num',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="number of objects in tendon's path (ntendon x 1)",
),
StructFieldDecl(
name='tendon_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='material id for rendering (ntendon x 1)',
),
StructFieldDecl(
name='tendon_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='group for visibility (ntendon x 1)',
),
StructFieldDecl(
name='tendon_limited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='does tendon have length limits (ntendon x 1)',
),
StructFieldDecl(
name='tendon_width',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='width for rendering (ntendon x 1)',
),
StructFieldDecl(
name='tendon_solref_lim',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver reference: limit (ntendon x mjNREF)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_solimp_lim',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver impedance: limit (ntendon x mjNIMP)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_solref_fri',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver reference: friction (ntendon x mjNREF)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_solimp_fri',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint solver impedance: friction (ntendon x mjNIMP)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_range',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='tendon length limits (ntendon x 2)',
),
StructFieldDecl(
name='tendon_margin',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='min distance for limit detection (ntendon x 1)',
),
StructFieldDecl(
name='tendon_stiffness',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='stiffness coefficient (ntendon x 1)',
),
StructFieldDecl(
name='tendon_damping',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='damping coefficient (ntendon x 1)',
),
StructFieldDecl(
name='tendon_frictionloss',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='loss due to friction (ntendon x 1)',
),
StructFieldDecl(
name='tendon_lengthspring',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='spring resting length range (ntendon x 2)',
),
StructFieldDecl(
name='tendon_length0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='tendon length in qpos0 (ntendon x 1)',
),
StructFieldDecl(
name='tendon_invweight0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='inv. weight in qpos0 (ntendon x 1)',
),
StructFieldDecl(
name='tendon_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (ntendon x nuser_tendon)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='rgba when material is omitted (ntendon x 4)',
),
StructFieldDecl(
name='wrap_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='wrap object type (mjtWrap) (nwrap x 1)',
),
StructFieldDecl(
name='wrap_objid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='object id: geom, site, joint (nwrap x 1)',
),
StructFieldDecl(
name='wrap_prm',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='divisor, joint coef, or site id (nwrap x 1)',
),
StructFieldDecl(
name='actuator_trntype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='transmission type (mjtTrn) (nu x 1)',
),
StructFieldDecl(
name='actuator_dyntype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='dynamics type (mjtDyn) (nu x 1)',
),
StructFieldDecl(
name='actuator_gaintype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='gain type (mjtGain) (nu x 1)',
),
StructFieldDecl(
name='actuator_biastype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='bias type (mjtBias) (nu x 1)',
),
StructFieldDecl(
name='actuator_trnid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='transmission id: joint, tendon, site (nu x 2)',
),
StructFieldDecl(
name='actuator_actadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first activation address; -1: stateless (nu x 1)',
),
StructFieldDecl(
name='actuator_actnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of activation variables (nu x 1)',
),
StructFieldDecl(
name='actuator_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='group for visibility (nu x 1)',
),
StructFieldDecl(
name='actuator_ctrllimited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='is control limited (nu x 1)',
),
StructFieldDecl(
name='actuator_forcelimited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='is force limited (nu x 1)',
),
StructFieldDecl(
name='actuator_actlimited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='is activation limited (nu x 1)',
),
StructFieldDecl(
name='actuator_dynprm',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='dynamics parameters (nu x mjNDYN)',
),
StructFieldDecl(
name='actuator_gainprm',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='gain parameters (nu x mjNGAIN)',
),
StructFieldDecl(
name='actuator_biasprm',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='bias parameters (nu x mjNBIAS)',
),
StructFieldDecl(
name='actuator_actearly',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='step activation before force (nu x 1)',
),
StructFieldDecl(
name='actuator_ctrlrange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='range of controls (nu x 2)',
),
StructFieldDecl(
name='actuator_forcerange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='range of forces (nu x 2)',
),
StructFieldDecl(
name='actuator_actrange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='range of activations (nu x 2)',
),
StructFieldDecl(
name='actuator_gear',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='scale length and transmitted force (nu x 6)',
),
StructFieldDecl(
name='actuator_cranklength',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='crank length for slider-crank (nu x 1)',
),
StructFieldDecl(
name='actuator_acc0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='acceleration from unit force in qpos0 (nu x 1)',
),
StructFieldDecl(
name='actuator_length0',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator length in qpos0 (nu x 1)',
),
StructFieldDecl(
name='actuator_lengthrange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='feasible actuator length range (nu x 2)',
),
StructFieldDecl(
name='actuator_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (nu x nuser_actuator)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='actuator_plugin',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='plugin instance id; -1: not a plugin (nu x 1)',
),
StructFieldDecl(
name='sensor_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='sensor type (mjtSensor) (nsensor x 1)',
),
StructFieldDecl(
name='sensor_datatype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='numeric data type (mjtDataType) (nsensor x 1)',
),
StructFieldDecl(
name='sensor_needstage',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='required compute stage (mjtStage) (nsensor x 1)',
),
StructFieldDecl(
name='sensor_objtype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='type of sensorized object (mjtObj) (nsensor x 1)',
),
StructFieldDecl(
name='sensor_objid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of sensorized object (nsensor x 1)',
),
StructFieldDecl(
name='sensor_reftype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='type of reference frame (mjtObj) (nsensor x 1)',
),
StructFieldDecl(
name='sensor_refid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of reference frame; -1: global frame (nsensor x 1)',
),
StructFieldDecl(
name='sensor_dim',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of scalar outputs (nsensor x 1)',
),
StructFieldDecl(
name='sensor_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address in sensor array (nsensor x 1)',
),
StructFieldDecl(
name='sensor_cutoff',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='cutoff for real and positive; 0: ignore (nsensor x 1)',
),
StructFieldDecl(
name='sensor_noise',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='noise standard deviation (nsensor x 1)',
),
StructFieldDecl(
name='sensor_user',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data (nsensor x nuser_sensor)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='sensor_plugin',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='plugin instance id; -1: not a plugin (nsensor x 1)',
),
StructFieldDecl(
name='plugin',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='globally registered plugin slot number (nplugin x 1)',
),
StructFieldDecl(
name='plugin_stateadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address in the plugin state array (nplugin x 1)',
),
StructFieldDecl(
name='plugin_statenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of states in the plugin instance (nplugin x 1)',
),
StructFieldDecl(
name='plugin_attr',
type=PointerType(
inner_type=ValueType(name='char'),
),
doc='config attributes of plugin instances (npluginattr x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='plugin_attradr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="address to each instance's config attrib (nplugin x 1)",
),
StructFieldDecl(
name='numeric_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of field in numeric_data (nnumeric x 1)',
),
StructFieldDecl(
name='numeric_size',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='size of numeric field (nnumeric x 1)',
),
StructFieldDecl(
name='numeric_data',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='array of all numeric fields (nnumericdata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='text_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of text in text_data (ntext x 1)',
),
StructFieldDecl(
name='text_size',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='size of text field (strlen+1) (ntext x 1)',
),
StructFieldDecl(
name='text_data',
type=PointerType(
inner_type=ValueType(name='char'),
),
doc='array of all text fields (0-terminated) (ntextdata x 1)',
),
StructFieldDecl(
name='tuple_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of text in text_data (ntuple x 1)',
),
StructFieldDecl(
name='tuple_size',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of objects in tuple (ntuple x 1)',
),
StructFieldDecl(
name='tuple_objtype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='array of object types in all tuples (ntupledata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tuple_objid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='array of object ids in all tuples (ntupledata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tuple_objprm',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='array of object params in all tuples (ntupledata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='key_time',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key time (nkey x 1)',
),
StructFieldDecl(
name='key_qpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key position (nkey x nq)',
),
StructFieldDecl(
name='key_qvel',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key velocity (nkey x nv)',
),
StructFieldDecl(
name='key_act',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key activation (nkey x na)',
),
StructFieldDecl(
name='key_mpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key mocap position (nkey x 3*nmocap)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='key_mquat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key mocap quaternion (nkey x 4*nmocap)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='key_ctrl',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='key control (nkey x nu)',
),
StructFieldDecl(
name='name_bodyadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='body name pointers (nbody x 1)',
),
StructFieldDecl(
name='name_jntadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='joint name pointers (njnt x 1)',
),
StructFieldDecl(
name='name_geomadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom name pointers (ngeom x 1)',
),
StructFieldDecl(
name='name_siteadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='site name pointers (nsite x 1)',
),
StructFieldDecl(
name='name_camadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='camera name pointers (ncam x 1)',
),
StructFieldDecl(
name='name_lightadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='light name pointers (nlight x 1)',
),
StructFieldDecl(
name='name_meshadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='mesh name pointers (nmesh x 1)',
),
StructFieldDecl(
name='name_skinadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='skin name pointers (nskin x 1)',
),
StructFieldDecl(
name='name_hfieldadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='hfield name pointers (nhfield x 1)',
),
StructFieldDecl(
name='name_texadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='texture name pointers (ntex x 1)',
),
StructFieldDecl(
name='name_matadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='material name pointers (nmat x 1)',
),
StructFieldDecl(
name='name_pairadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom pair name pointers (npair x 1)',
),
StructFieldDecl(
name='name_excludeadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='exclude name pointers (nexclude x 1)',
),
StructFieldDecl(
name='name_eqadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='equality constraint name pointers (neq x 1)',
),
StructFieldDecl(
name='name_tendonadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='tendon name pointers (ntendon x 1)',
),
StructFieldDecl(
name='name_actuatoradr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='actuator name pointers (nu x 1)',
),
StructFieldDecl(
name='name_sensoradr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='sensor name pointers (nsensor x 1)',
),
StructFieldDecl(
name='name_numericadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='numeric name pointers (nnumeric x 1)',
),
StructFieldDecl(
name='name_textadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='text name pointers (ntext x 1)',
),
StructFieldDecl(
name='name_tupleadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='tuple name pointers (ntuple x 1)',
),
StructFieldDecl(
name='name_keyadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='keyframe name pointers (nkey x 1)',
),
StructFieldDecl(
name='name_pluginadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='plugin instance name pointers (nplugin x 1)',
),
StructFieldDecl(
name='names',
type=PointerType(
inner_type=ValueType(name='char'),
),
doc='names of all objects, 0-terminated (nnames x 1)',
),
StructFieldDecl(
name='names_map',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='internal hash map of names (nnames_map x 1)', # pylint: disable=line-too-long
),
),
)),
('mjContact',
StructDecl(
name='mjContact',
declname='struct mjContact_',
fields=(
StructFieldDecl(
name='dist',
type=ValueType(name='mjtNum'),
doc='distance between nearest points; neg: penetration',
),
StructFieldDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='position of contact point: midpoint between geoms',
),
StructFieldDecl(
name='frame',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(9,),
),
doc='normal is in [0-2]',
),
StructFieldDecl(
name='includemargin',
type=ValueType(name='mjtNum'),
doc='include if dist<includemargin=margin-gap',
),
StructFieldDecl(
name='friction',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(5,),
),
doc='tangent1, 2, spin, roll1, 2',
),
StructFieldDecl(
name='solref',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(2,),
),
doc='constraint solver reference, normal direction',
),
StructFieldDecl(
name='solreffriction',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(2,),
),
doc='constraint solver reference, friction directions',
),
StructFieldDecl(
name='solimp',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(5,),
),
doc='constraint solver impedance',
),
StructFieldDecl(
name='mu',
type=ValueType(name='mjtNum'),
doc='friction of regularized cone, set by mj_makeConstraint',
),
StructFieldDecl(
name='H',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(36,),
),
doc='cone Hessian, set by mj_updateConstraint',
),
StructFieldDecl(
name='dim',
type=ValueType(name='int'),
doc='contact space dimensionality: 1, 3, 4 or 6',
),
StructFieldDecl(
name='geom1',
type=ValueType(name='int'),
doc='id of geom 1',
),
StructFieldDecl(
name='geom2',
type=ValueType(name='int'),
doc='id of geom 2',
),
StructFieldDecl(
name='exclude',
type=ValueType(name='int'),
doc='0: include, 1: in gap, 2: fused, 3: no dofs',
),
StructFieldDecl(
name='efc_address',
type=ValueType(name='int'),
doc='address in efc; -1: not included',
),
),
)),
('mjWarningStat',
StructDecl(
name='mjWarningStat',
declname='struct mjWarningStat_',
fields=(
StructFieldDecl(
name='lastinfo',
type=ValueType(name='int'),
doc='info from last warning',
),
StructFieldDecl(
name='number',
type=ValueType(name='int'),
doc='how many times was warning raised',
),
),
)),
('mjTimerStat',
StructDecl(
name='mjTimerStat',
declname='struct mjTimerStat_',
fields=(
StructFieldDecl(
name='duration',
type=ValueType(name='mjtNum'),
doc='cumulative duration',
),
StructFieldDecl(
name='number',
type=ValueType(name='int'),
doc='how many times was timer called',
),
),
)),
('mjSolverStat',
StructDecl(
name='mjSolverStat',
declname='struct mjSolverStat_',
fields=(
StructFieldDecl(
name='improvement',
type=ValueType(name='mjtNum'),
doc='cost reduction, scaled by 1/trace(M(qpos0))',
),
StructFieldDecl(
name='gradient',
type=ValueType(name='mjtNum'),
doc='gradient norm (primal only, scaled)',
),
StructFieldDecl(
name='lineslope',
type=ValueType(name='mjtNum'),
doc='slope in linesearch',
),
StructFieldDecl(
name='nactive',
type=ValueType(name='int'),
doc='number of active constraints',
),
StructFieldDecl(
name='nchange',
type=ValueType(name='int'),
doc='number of constraint state changes',
),
StructFieldDecl(
name='neval',
type=ValueType(name='int'),
doc='number of cost evaluations in line search',
),
StructFieldDecl(
name='nupdate',
type=ValueType(name='int'),
doc='number of Cholesky updates in line search',
),
),
)),
('mjData',
StructDecl(
name='mjData',
declname='struct mjData_',
fields=(
StructFieldDecl(
name='narena',
type=ValueType(name='size_t'),
doc='size of the arena in bytes (inclusive of the stack)',
),
StructFieldDecl(
name='nbuffer',
type=ValueType(name='size_t'),
doc='size of main buffer in bytes',
),
StructFieldDecl(
name='nplugin',
type=ValueType(name='int'),
doc='number of plugin instances',
),
StructFieldDecl(
name='pstack',
type=ValueType(name='size_t'),
doc='first available mjtNum address in stack',
),
StructFieldDecl(
name='pbase',
type=ValueType(name='size_t'),
doc='value of pstack when mj_markStack was last called',
),
StructFieldDecl(
name='parena',
type=ValueType(name='size_t'),
doc='first available byte in arena',
),
StructFieldDecl(
name='maxuse_stack',
type=ValueType(name='size_t'),
doc='maximum stack allocation',
),
StructFieldDecl(
name='maxuse_arena',
type=ValueType(name='size_t'),
doc='maximum arena allocation',
),
StructFieldDecl(
name='maxuse_con',
type=ValueType(name='int'),
doc='maximum number of contacts',
),
StructFieldDecl(
name='maxuse_efc',
type=ValueType(name='int'),
doc='maximum number of scalar constraints',
),
StructFieldDecl(
name='warning',
type=ArrayType(
inner_type=ValueType(name='mjWarningStat'),
extents=(8,),
),
doc='warning statistics',
),
StructFieldDecl(
name='timer',
type=ArrayType(
inner_type=ValueType(name='mjTimerStat'),
extents=(13,),
),
doc='timer statistics',
),
StructFieldDecl(
name='solver',
type=ArrayType(
inner_type=ValueType(name='mjSolverStat'),
extents=(4000,),
),
doc='solver statistics per island, per iteration',
),
StructFieldDecl(
name='solver_nisland',
type=ValueType(name='int'),
doc='number of islands processed by solver',
),
StructFieldDecl(
name='solver_niter',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(20,),
),
doc='number of solver iterations, per island',
),
StructFieldDecl(
name='solver_nnz',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(20,),
),
doc='number of non-zeros in Hessian or efc_AR, per island',
),
StructFieldDecl(
name='solver_fwdinv',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(2,),
),
doc='forward-inverse comparison: qfrc, efc',
),
StructFieldDecl(
name='nbodypair_broad',
type=ValueType(name='int'),
doc='number of body pairs in collision according to the broad-phase', # pylint: disable=line-too-long
),
StructFieldDecl(
name='nbodypair_narrow',
type=ValueType(name='int'),
doc='number of body pairs actually in collision in the narrow-phase', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ngeompair_mid',
type=ValueType(name='int'),
doc='number of geom pairs in collision according to the mid-phase', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ngeompair_narrow',
type=ValueType(name='int'),
doc='number of geom pairs actually in collision in the narrow-phase', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ne',
type=ValueType(name='int'),
doc='number of equality constraints',
),
StructFieldDecl(
name='nf',
type=ValueType(name='int'),
doc='number of friction constraints',
),
StructFieldDecl(
name='nl',
type=ValueType(name='int'),
doc='number of limit constraints',
),
StructFieldDecl(
name='nefc',
type=ValueType(name='int'),
doc='number of constraints',
),
StructFieldDecl(
name='nnzJ',
type=ValueType(name='int'),
doc='number of non-zeros in constraint Jacobian',
),
StructFieldDecl(
name='ncon',
type=ValueType(name='int'),
doc='number of detected contacts',
),
StructFieldDecl(
name='nisland',
type=ValueType(name='int'),
doc='number of detected constraint islands',
),
StructFieldDecl(
name='time',
type=ValueType(name='mjtNum'),
doc='simulation time',
),
StructFieldDecl(
name='energy',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(2,),
),
doc='potential, kinetic energy',
),
StructFieldDecl(
name='buffer',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='main buffer; all pointers point in it (nbuffer bytes)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='arena',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='arena+stack buffer (nstack*sizeof(mjtNum) bytes)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='position (nq x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qvel',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='velocity (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='act',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator activation (na x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qacc_warmstart',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='acceleration used for warmstart (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='plugin_state',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='plugin state (npluginstate x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ctrl',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='control (nu x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_applied',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='applied generalized force (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xfrc_applied',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='applied Cartesian force/torque (nbody x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='mocap_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='positions of mocap bodies (nmocap x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='mocap_quat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='orientations of mocap bodies (nmocap x 4)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qacc',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='acceleration (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='act_dot',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='time-derivative of actuator activation (na x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='userdata',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='user data, not touched by engine (nuserdata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='sensordata',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='sensor data array (nsensordata x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='plugin',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='copy of m->plugin, required for deletion (nplugin x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='plugin_data',
type=PointerType(
inner_type=ValueType(name='uintptr_t'),
),
doc='pointer to plugin-managed data structure (nplugin x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian position of body frame (nbody x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xquat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian orientation of body frame (nbody x 4)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian orientation of body frame (nbody x 9)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xipos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian position of body com (nbody x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ximat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian orientation of body inertia (nbody x 9)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xanchor',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian position of joint anchor (njnt x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='xaxis',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian joint axis (njnt x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='geom_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian geom position (ngeom x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='geom_xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian geom orientation (ngeom x 9)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='site_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian site position (nsite x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='site_xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian site orientation (nsite x 9)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cam_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian camera position (ncam x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cam_xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian camera orientation (ncam x 9)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='light_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian light position (nlight x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='light_xdir',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian light direction (nlight x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='subtree_com',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='center of mass of each subtree (nbody x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cdof',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based motion axis of each dof (nv x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cinert',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based body inertia and mass (nbody x 10)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_wrapadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc="start address of tendon's path (ntendon x 1)", # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_wrapnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of wrap points in path (ntendon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_J_rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of non-zeros in Jacobian row (ntendon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_J_rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='row start address in colind array (ntendon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_J_colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='column indices in sparse Jacobian (ntendon x nv)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_length',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='tendon lengths (ntendon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_J',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='tendon Jacobian (ntendon x nv)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='wrap_obj',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='geom id; -1: site; -2: pulley (nwrap*2 x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='wrap_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='Cartesian 3D points in all path (nwrap*2 x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='actuator_length',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator lengths (nu x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='actuator_moment',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator moments (nu x nv)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='crb',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based composite inertia and mass (nbody x 10)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qM',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='total inertia (sparse) (nM x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qLD',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc="L'*D*L factorization of M (sparse) (nM x 1)", # pylint: disable=line-too-long
),
StructFieldDecl(
name='qLDiagInv',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='1/diag(D) (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qLDiagSqrtInv',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='1/sqrt(diag(D)) (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='bvh_active',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='volume has been added to collisions (nbvh x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='ten_velocity',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='tendon velocities (ntendon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='actuator_velocity',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator velocities (nu x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cvel',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based velocity [3D rot; 3D tran] (nbody x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cdof_dot',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='time-derivative of cdof (nv x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_bias',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='C(qpos,qvel) (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_passive',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='passive force (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='subtree_linvel',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='linear velocity of subtree com (nbody x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='subtree_angmom',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='angular momentum about subtree com (nbody x 3)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qH',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc="L'*D*L factorization of modified M (nM x 1)", # pylint: disable=line-too-long
),
StructFieldDecl(
name='qHDiagInv',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='1/diag(D) of modified M (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='D_rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='non-zeros in each row (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='D_rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of each row in D_colind (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='D_colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='column indices of non-zeros (nD x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='B_rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='non-zeros in each row (nbody x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='B_rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of each row in B_colind (nbody x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='B_colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='column indices of non-zeros (nB x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qDeriv',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='d (passive + actuator - bias) / d qvel (nD x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qLU',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='sparse LU of (qM - dt*qDeriv) (nD x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='actuator_force',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator force in actuation space (nu x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_actuator',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='actuator force (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_smooth',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='net unconstrained force (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qacc_smooth',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='unconstrained acceleration (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_constraint',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint force (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='qfrc_inverse',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc="net external force; should equal: (nv x 1)qfrc_applied + J'*xfrc_applied + qfrc_actuator", # pylint: disable=line-too-long
),
StructFieldDecl(
name='cacc',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based acceleration (nbody x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cfrc_int',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based interaction force with parent (nbody x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='cfrc_ext',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='com-based external force on body (nbody x 6)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='contact',
type=PointerType(
inner_type=ValueType(name='mjContact'),
),
doc='list of all detected contacts (ncon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='constraint type (mjtConstraint) (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_id',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='id of object of specified type (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_J_rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of non-zeros in constraint Jacobian row (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_J_rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='row start address in colind array (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_J_rowsuper',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of subsequent rows in supernode (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_J_colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='column indices in constraint Jacobian (nnzJ x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_JT_rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of non-zeros in constraint Jacobian row T (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_JT_rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='row start address in colind array T (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_JT_rowsuper',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of subsequent rows in supernode T (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_JT_colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='column indices in constraint Jacobian T (nnzJ x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_J',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint Jacobian (nnzJ x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_JT',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint Jacobian transposed (nnzJ x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_pos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint position (equality, contact) (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_margin',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='inclusion margin (contact) (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_frictionloss',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='frictionloss (friction) (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_diagApprox',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='approximation to diagonal of A (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_KBIP',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc="stiffness, damping, impedance, imp' (nefc x 4)", # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_D',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint mass (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_R',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='inverse constraint mass (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='tendon_efcadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='first efc address involving tendon; -1: none (ntendon x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='dof_island',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='island id of this dof; -1: none (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='island_dofnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of dofs in island (nisland x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='island_dofadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='start address in island_dofind (nisland x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='island_dofind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='island dof indices; -1: none (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='dof_islandind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='dof island indices; -1: none (nv x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_island',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='island id of this constraint (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='island_efcnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of constraints in island (nisland x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='island_efcadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='start address in island_efcind (nisland x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='island_efcind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='island constraint indices (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_AR_rownnz',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of non-zeros in AR (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_AR_rowadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='row start address in colind array (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_AR_colind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='column indices in sparse AR (nefc x nefc)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_AR',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc="J*inv(M)*J' + R (nefc x nefc)", # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_vel',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='velocity in constraint space: J*qvel (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_aref',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='reference pseudo-acceleration (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_b',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='linear cost term: J*qacc_smooth - aref (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_force',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='constraint force in constraint space (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='efc_state',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='constraint state (mjtConstraintState) (nefc x 1)', # pylint: disable=line-too-long
),
StructFieldDecl(
name='threadpool',
type=ValueType(name='uintptr_t'),
doc='ThreadPool for multithreaded operations',
),
),
)),
('mjvPerturb',
StructDecl(
name='mjvPerturb',
declname='struct mjvPerturb_',
fields=(
StructFieldDecl(
name='select',
type=ValueType(name='int'),
doc='selected body id; non-positive: none',
),
StructFieldDecl(
name='skinselect',
type=ValueType(name='int'),
doc='selected skin id; negative: none',
),
StructFieldDecl(
name='active',
type=ValueType(name='int'),
doc='perturbation bitmask (mjtPertBit)',
),
StructFieldDecl(
name='active2',
type=ValueType(name='int'),
doc='secondary perturbation bitmask (mjtPertBit)',
),
StructFieldDecl(
name='refpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='reference position for selected object',
),
StructFieldDecl(
name='refquat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(4,),
),
doc='reference orientation for selected object',
),
StructFieldDecl(
name='refselpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='reference position for selection point',
),
StructFieldDecl(
name='localpos',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='selection point in object coordinates',
),
StructFieldDecl(
name='localmass',
type=ValueType(name='mjtNum'),
doc='spatial inertia at selection point',
),
StructFieldDecl(
name='scale',
type=ValueType(name='mjtNum'),
doc='relative mouse motion-to-space scaling (set by initPerturb)', # pylint: disable=line-too-long
),
),
)),
('mjvCamera',
StructDecl(
name='mjvCamera',
declname='struct mjvCamera_',
fields=(
StructFieldDecl(
name='type',
type=ValueType(name='int'),
doc='camera type (mjtCamera)',
),
StructFieldDecl(
name='fixedcamid',
type=ValueType(name='int'),
doc='fixed camera id',
),
StructFieldDecl(
name='trackbodyid',
type=ValueType(name='int'),
doc='body id to track',
),
StructFieldDecl(
name='lookat',
type=ArrayType(
inner_type=ValueType(name='mjtNum'),
extents=(3,),
),
doc='lookat point',
),
StructFieldDecl(
name='distance',
type=ValueType(name='mjtNum'),
doc='distance to lookat point or tracked body',
),
StructFieldDecl(
name='azimuth',
type=ValueType(name='mjtNum'),
doc='camera azimuth (deg)',
),
StructFieldDecl(
name='elevation',
type=ValueType(name='mjtNum'),
doc='camera elevation (deg)',
),
),
)),
('mjvGLCamera',
StructDecl(
name='mjvGLCamera',
declname='struct mjvGLCamera_',
fields=(
StructFieldDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='position',
),
StructFieldDecl(
name='forward',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='forward direction',
),
StructFieldDecl(
name='up',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='up direction',
),
StructFieldDecl(
name='frustum_center',
type=ValueType(name='float'),
doc='hor. center (left,right set to match aspect)',
),
StructFieldDecl(
name='frustum_bottom',
type=ValueType(name='float'),
doc='bottom',
),
StructFieldDecl(
name='frustum_top',
type=ValueType(name='float'),
doc='top',
),
StructFieldDecl(
name='frustum_near',
type=ValueType(name='float'),
doc='near',
),
StructFieldDecl(
name='frustum_far',
type=ValueType(name='float'),
doc='far',
),
),
)),
('mjvGeom',
StructDecl(
name='mjvGeom',
declname='struct mjvGeom_',
fields=(
StructFieldDecl(
name='type',
type=ValueType(name='int'),
doc='geom type (mjtGeom)',
),
StructFieldDecl(
name='dataid',
type=ValueType(name='int'),
doc='mesh, hfield or plane id; -1: none',
),
StructFieldDecl(
name='objtype',
type=ValueType(name='int'),
doc='mujoco object type; mjOBJ_UNKNOWN for decor',
),
StructFieldDecl(
name='objid',
type=ValueType(name='int'),
doc='mujoco object id; -1 for decor',
),
StructFieldDecl(
name='category',
type=ValueType(name='int'),
doc='visual category',
),
StructFieldDecl(
name='texid',
type=ValueType(name='int'),
doc='texture id; -1: no texture',
),
StructFieldDecl(
name='texuniform',
type=ValueType(name='int'),
doc='uniform cube mapping',
),
StructFieldDecl(
name='texcoord',
type=ValueType(name='int'),
doc='mesh geom has texture coordinates',
),
StructFieldDecl(
name='segid',
type=ValueType(name='int'),
doc='segmentation id; -1: not shown',
),
StructFieldDecl(
name='texrepeat',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(2,),
),
doc='texture repetition for 2D mapping',
),
StructFieldDecl(
name='size',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='size parameters',
),
StructFieldDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='Cartesian position',
),
StructFieldDecl(
name='mat',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(9,),
),
doc='Cartesian orientation',
),
StructFieldDecl(
name='rgba',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='color and transparency',
),
StructFieldDecl(
name='emission',
type=ValueType(name='float'),
doc='emission coef',
),
StructFieldDecl(
name='specular',
type=ValueType(name='float'),
doc='specular coef',
),
StructFieldDecl(
name='shininess',
type=ValueType(name='float'),
doc='shininess coef',
),
StructFieldDecl(
name='reflectance',
type=ValueType(name='float'),
doc='reflectance coef',
),
StructFieldDecl(
name='label',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(100,),
),
doc='text label',
),
StructFieldDecl(
name='camdist',
type=ValueType(name='float'),
doc='distance to camera (used by sorter)',
),
StructFieldDecl(
name='modelrbound',
type=ValueType(name='float'),
doc='geom rbound from model, 0 if not model geom',
),
StructFieldDecl(
name='transparent',
type=ValueType(name='mjtByte'),
doc='treat geom as transparent',
),
),
)),
('mjvLight',
StructDecl(
name='mjvLight',
declname='struct mjvLight_',
fields=(
StructFieldDecl(
name='pos',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='position rel. to body frame',
),
StructFieldDecl(
name='dir',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='direction rel. to body frame',
),
StructFieldDecl(
name='attenuation',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='OpenGL attenuation (quadratic model)',
),
StructFieldDecl(
name='cutoff',
type=ValueType(name='float'),
doc='OpenGL cutoff',
),
StructFieldDecl(
name='exponent',
type=ValueType(name='float'),
doc='OpenGL exponent',
),
StructFieldDecl(
name='ambient',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='ambient rgb (alpha=1)',
),
StructFieldDecl(
name='diffuse',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='diffuse rgb (alpha=1)',
),
StructFieldDecl(
name='specular',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='specular rgb (alpha=1)',
),
StructFieldDecl(
name='headlight',
type=ValueType(name='mjtByte'),
doc='headlight',
),
StructFieldDecl(
name='directional',
type=ValueType(name='mjtByte'),
doc='directional light',
),
StructFieldDecl(
name='castshadow',
type=ValueType(name='mjtByte'),
doc='does light cast shadows',
),
),
)),
('mjvOption',
StructDecl(
name='mjvOption',
declname='struct mjvOption_',
fields=(
StructFieldDecl(
name='label',
type=ValueType(name='int'),
doc='what objects to label (mjtLabel)',
),
StructFieldDecl(
name='frame',
type=ValueType(name='int'),
doc='which frame to show (mjtFrame)',
),
StructFieldDecl(
name='geomgroup',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(6,),
),
doc='geom visualization by group',
),
StructFieldDecl(
name='sitegroup',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(6,),
),
doc='site visualization by group',
),
StructFieldDecl(
name='jointgroup',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(6,),
),
doc='joint visualization by group',
),
StructFieldDecl(
name='tendongroup',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(6,),
),
doc='tendon visualization by group',
),
StructFieldDecl(
name='actuatorgroup',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(6,),
),
doc='actuator visualization by group',
),
StructFieldDecl(
name='skingroup',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(6,),
),
doc='skin visualization by group',
),
StructFieldDecl(
name='flags',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(27,),
),
doc='visualization flags (indexed by mjtVisFlag)',
),
StructFieldDecl(
name='bvh_depth',
type=ValueType(name='int'),
doc='depth of the bounding volume hierarchy to be visualized',
),
),
)),
('mjvScene',
StructDecl(
name='mjvScene',
declname='struct mjvScene_',
fields=(
StructFieldDecl(
name='maxgeom',
type=ValueType(name='int'),
doc='size of allocated geom buffer',
),
StructFieldDecl(
name='ngeom',
type=ValueType(name='int'),
doc='number of geoms currently in buffer',
),
StructFieldDecl(
name='geoms',
type=PointerType(
inner_type=ValueType(name='mjvGeom'),
),
doc='buffer for geoms (ngeom)',
),
StructFieldDecl(
name='geomorder',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='buffer for ordering geoms by distance to camera (ngeom)',
),
StructFieldDecl(
name='nskin',
type=ValueType(name='int'),
doc='number of skins',
),
StructFieldDecl(
name='skinfacenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of faces in skin (nskin)',
),
StructFieldDecl(
name='skinvertadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='address of skin vertices (nskin)',
),
StructFieldDecl(
name='skinvertnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='number of vertices in skin (nskin)',
),
StructFieldDecl(
name='skinvert',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='skin vertex data (nskin)',
),
StructFieldDecl(
name='skinnormal',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='skin normal data (nskin)',
),
StructFieldDecl(
name='nlight',
type=ValueType(name='int'),
doc='number of lights currently in buffer',
),
StructFieldDecl(
name='lights',
type=ArrayType(
inner_type=ValueType(name='mjvLight'),
extents=(100,),
),
doc='buffer for lights (nlight)',
),
StructFieldDecl(
name='camera',
type=ArrayType(
inner_type=ValueType(name='mjvGLCamera'),
extents=(2,),
),
doc='left and right camera',
),
StructFieldDecl(
name='enabletransform',
type=ValueType(name='mjtByte'),
doc='enable model transformation',
),
StructFieldDecl(
name='translate',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='model translation',
),
StructFieldDecl(
name='rotate',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='model quaternion rotation',
),
StructFieldDecl(
name='scale',
type=ValueType(name='float'),
doc='model scaling',
),
StructFieldDecl(
name='stereo',
type=ValueType(name='int'),
doc='stereoscopic rendering (mjtStereo)',
),
StructFieldDecl(
name='flags',
type=ArrayType(
inner_type=ValueType(name='mjtByte'),
extents=(10,),
),
doc='rendering flags (indexed by mjtRndFlag)',
),
StructFieldDecl(
name='framewidth',
type=ValueType(name='int'),
doc='frame pixel width; 0: disable framing',
),
StructFieldDecl(
name='framergb',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='frame color',
),
),
)),
('mjvFigure',
StructDecl(
name='mjvFigure',
declname='struct mjvFigure_',
fields=(
StructFieldDecl(
name='flg_legend',
type=ValueType(name='int'),
doc='show legend',
),
StructFieldDecl(
name='flg_ticklabel',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(2,),
),
doc='show grid tick labels (x,y)',
),
StructFieldDecl(
name='flg_extend',
type=ValueType(name='int'),
doc='automatically extend axis ranges to fit data',
),
StructFieldDecl(
name='flg_barplot',
type=ValueType(name='int'),
doc='isolated line segments (i.e. GL_LINES)',
),
StructFieldDecl(
name='flg_selection',
type=ValueType(name='int'),
doc='vertical selection line',
),
StructFieldDecl(
name='flg_symmetric',
type=ValueType(name='int'),
doc='symmetric y-axis',
),
StructFieldDecl(
name='linewidth',
type=ValueType(name='float'),
doc='line width',
),
StructFieldDecl(
name='gridwidth',
type=ValueType(name='float'),
doc='grid line width',
),
StructFieldDecl(
name='gridsize',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(2,),
),
doc='number of grid points in (x,y)',
),
StructFieldDecl(
name='gridrgb',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='grid line rgb',
),
StructFieldDecl(
name='figurergba',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='figure color and alpha',
),
StructFieldDecl(
name='panergba',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='pane color and alpha',
),
StructFieldDecl(
name='legendrgba',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='legend color and alpha',
),
StructFieldDecl(
name='textrgb',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='text color',
),
StructFieldDecl(
name='linergb',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(100, 3),
),
doc='line colors',
),
StructFieldDecl(
name='range',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(2, 2),
),
doc='axis ranges; (min>=max) automatic',
),
StructFieldDecl(
name='xformat',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(20,),
),
doc='x-tick label format for sprintf',
),
StructFieldDecl(
name='yformat',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(20,),
),
doc='y-tick label format for sprintf',
),
StructFieldDecl(
name='minwidth',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(20,),
),
doc='string used to determine min y-tick width',
),
StructFieldDecl(
name='title',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(1000,),
),
doc='figure title; subplots separated with 2+ spaces',
),
StructFieldDecl(
name='xlabel',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(100,),
),
doc='x-axis label',
),
StructFieldDecl(
name='linename',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(100, 100),
),
doc='line names for legend',
),
StructFieldDecl(
name='legendoffset',
type=ValueType(name='int'),
doc='number of lines to offset legend',
),
StructFieldDecl(
name='subplot',
type=ValueType(name='int'),
doc='selected subplot (for title rendering)',
),
StructFieldDecl(
name='highlight',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(2,),
),
doc='if point is in legend rect, highlight line',
),
StructFieldDecl(
name='highlightid',
type=ValueType(name='int'),
doc='if id>=0 and no point, highlight id',
),
StructFieldDecl(
name='selection',
type=ValueType(name='float'),
doc='selection line x-value',
),
StructFieldDecl(
name='linepnt',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(100,),
),
doc='number of points in line; (0) disable',
),
StructFieldDecl(
name='linedata',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(100, 2000),
),
doc='line data (x,y)',
),
StructFieldDecl(
name='xaxispixel',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(2,),
),
doc='range of x-axis in pixels',
),
StructFieldDecl(
name='yaxispixel',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(2,),
),
doc='range of y-axis in pixels',
),
StructFieldDecl(
name='xaxisdata',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(2,),
),
doc='range of x-axis in data units',
),
StructFieldDecl(
name='yaxisdata',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(2,),
),
doc='range of y-axis in data units',
),
),
)),
('mjvSceneState',
StructDecl(
name='mjvSceneState',
declname='struct mjvSceneState_',
fields=(
StructFieldDecl(
name='nbuffer',
type=ValueType(name='int'),
doc='size of the buffer in bytes',
),
StructFieldDecl(
name='buffer',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='heap-allocated memory for all arrays in this struct',
),
StructFieldDecl(
name='maxgeom',
type=ValueType(name='int'),
doc='maximum number of mjvGeom supported by this state object',
),
StructFieldDecl(
name='plugincache',
type=ValueType(name='mjvScene'),
doc='scratch space for vis geoms inserted by plugins',
),
StructFieldDecl(
name='model',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='nv',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nu',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='na',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nbody',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nbvh',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='njnt',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='ngeom',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nsite',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='ncam',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nlight',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nmesh',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nskin',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nskinvert',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nskinface',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nskinbone',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nskinbonevert',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nmat',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='neq',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='ntendon',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='ntree',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nwrap',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nsensor',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nnames',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nsensordata',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='opt',
type=ValueType(name='mjOption'),
doc='',
),
StructFieldDecl(
name='vis',
type=ValueType(name='mjVisual'),
doc='',
),
StructFieldDecl(
name='stat',
type=ValueType(name='mjStatistic'),
doc='',
),
StructFieldDecl(
name='body_parentid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_rootid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_weldid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_mocapid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_jntnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_jntadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_dofnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_dofadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_geomnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_geomadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_iquat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='body_mass',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='body_inertia',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='body_bvhadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='body_bvhnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='bvh_depth',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='bvh_child',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='bvh_geomid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='bvh_aabb',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='jnt_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='jnt_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='jnt_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_contype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_conaffinity',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_dataid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='geom_size',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='geom_aabb',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='geom_rbound',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='geom_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='site_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='site_bodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='site_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='site_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='site_size',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='site_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='cam_fovy',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='cam_ipd',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='light_directional',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='light_castshadow',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='light_active',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='light_attenuation',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='light_cutoff',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='light_exponent',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='light_ambient',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='light_diffuse',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='light_specular',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mesh_bvhadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='mesh_bvhnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='mesh_texcoordadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='mesh_graphadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='skin_inflate',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='skin_vertadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_vertnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_texcoordadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_faceadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_facenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_boneadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_bonenum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_vert',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='skin_face',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_bonevertadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_bonevertnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_bonebindpos',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='skin_bonebindquat',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='skin_bonebodyid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_bonevertid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='skin_bonevertweight',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mat_texid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='mat_texuniform',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='mat_texrepeat',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mat_emission',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mat_specular',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mat_shininess',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mat_reflectance',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='mat_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='eq_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='eq_obj1id',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='eq_obj2id',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='eq_active',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='eq_data',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_num',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='tendon_matid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='tendon_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='tendon_limited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='tendon_width',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_range',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_stiffness',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_damping',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_frictionloss',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_lengthspring',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='tendon_rgba',
type=PointerType(
inner_type=ValueType(name='float'),
),
doc='',
),
StructFieldDecl(
name='actuator_trntype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='actuator_dyntype',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='actuator_trnid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='actuator_actadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='actuator_actnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='actuator_group',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='actuator_ctrllimited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='actuator_actlimited',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='actuator_ctrlrange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='actuator_actrange',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='actuator_cranklength',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='sensor_type',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='sensor_objid',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='sensor_adr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_bodyadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_jntadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_geomadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_siteadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_camadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_lightadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_eqadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_tendonadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='name_actuatoradr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='names',
type=PointerType(
inner_type=ValueType(name='char'),
),
doc='',
),
),
),
doc='',
),
StructFieldDecl(
name='data',
type=AnonymousStructDecl(
fields=(
StructFieldDecl(
name='warning',
type=ArrayType(
inner_type=ValueType(name='mjWarningStat'),
extents=(8,),
),
doc='',
),
StructFieldDecl(
name='nefc',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='ncon',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='nisland',
type=ValueType(name='int'),
doc='',
),
StructFieldDecl(
name='time',
type=ValueType(name='mjtNum'),
doc='',
),
StructFieldDecl(
name='act',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='ctrl',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xfrc_applied',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='sensordata',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xquat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xipos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='ximat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xanchor',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='xaxis',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='geom_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='geom_xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='site_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='site_xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='cam_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='cam_xmat',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='light_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='light_xdir',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='subtree_com',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='ten_wrapadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='ten_wrapnum',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='wrap_obj',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='ten_length',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='wrap_xpos',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
StructFieldDecl(
name='bvh_active',
type=PointerType(
inner_type=ValueType(name='mjtByte'),
),
doc='',
),
StructFieldDecl(
name='island_dofadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='island_dofind',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='dof_island',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='efc_island',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='tendon_efcadr',
type=PointerType(
inner_type=ValueType(name='int'),
),
doc='',
),
StructFieldDecl(
name='contact',
type=PointerType(
inner_type=ValueType(name='mjContact'),
),
doc='',
),
StructFieldDecl(
name='efc_force',
type=PointerType(
inner_type=ValueType(name='mjtNum'),
),
doc='',
),
),
),
doc='',
),
),
)),
('mjrRect',
StructDecl(
name='mjrRect',
declname='struct mjrRect_',
fields=(
StructFieldDecl(
name='left',
type=ValueType(name='int'),
doc='left (usually 0)',
),
StructFieldDecl(
name='bottom',
type=ValueType(name='int'),
doc='bottom (usually 0)',
),
StructFieldDecl(
name='width',
type=ValueType(name='int'),
doc='width (usually buffer width)',
),
StructFieldDecl(
name='height',
type=ValueType(name='int'),
doc='height (usually buffer height)',
),
),
)),
('mjrContext',
StructDecl(
name='mjrContext',
declname='struct mjrContext_',
fields=(
StructFieldDecl(
name='lineWidth',
type=ValueType(name='float'),
doc='line width for wireframe rendering',
),
StructFieldDecl(
name='shadowClip',
type=ValueType(name='float'),
doc='clipping radius for directional lights',
),
StructFieldDecl(
name='shadowScale',
type=ValueType(name='float'),
doc='fraction of light cutoff for spot lights',
),
StructFieldDecl(
name='fogStart',
type=ValueType(name='float'),
doc='fog start = stat.extent * vis.map.fogstart',
),
StructFieldDecl(
name='fogEnd',
type=ValueType(name='float'),
doc='fog end = stat.extent * vis.map.fogend',
),
StructFieldDecl(
name='fogRGBA',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(4,),
),
doc='fog rgba',
),
StructFieldDecl(
name='shadowSize',
type=ValueType(name='int'),
doc='size of shadow map texture',
),
StructFieldDecl(
name='offWidth',
type=ValueType(name='int'),
doc='width of offscreen buffer',
),
StructFieldDecl(
name='offHeight',
type=ValueType(name='int'),
doc='height of offscreen buffer',
),
StructFieldDecl(
name='offSamples',
type=ValueType(name='int'),
doc='number of offscreen buffer multisamples',
),
StructFieldDecl(
name='fontScale',
type=ValueType(name='int'),
doc='font scale',
),
StructFieldDecl(
name='auxWidth',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(10,),
),
doc='auxiliary buffer width',
),
StructFieldDecl(
name='auxHeight',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(10,),
),
doc='auxiliary buffer height',
),
StructFieldDecl(
name='auxSamples',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(10,),
),
doc='auxiliary buffer multisamples',
),
StructFieldDecl(
name='offFBO',
type=ValueType(name='unsigned int'),
doc='offscreen framebuffer object',
),
StructFieldDecl(
name='offFBO_r',
type=ValueType(name='unsigned int'),
doc='offscreen framebuffer for resolving multisamples',
),
StructFieldDecl(
name='offColor',
type=ValueType(name='unsigned int'),
doc='offscreen color buffer',
),
StructFieldDecl(
name='offColor_r',
type=ValueType(name='unsigned int'),
doc='offscreen color buffer for resolving multisamples',
),
StructFieldDecl(
name='offDepthStencil',
type=ValueType(name='unsigned int'),
doc='offscreen depth and stencil buffer',
),
StructFieldDecl(
name='offDepthStencil_r',
type=ValueType(name='unsigned int'),
doc='offscreen depth and stencil buffer for resolving multisamples', # pylint: disable=line-too-long
),
StructFieldDecl(
name='shadowFBO',
type=ValueType(name='unsigned int'),
doc='shadow map framebuffer object',
),
StructFieldDecl(
name='shadowTex',
type=ValueType(name='unsigned int'),
doc='shadow map texture',
),
StructFieldDecl(
name='auxFBO',
type=ArrayType(
inner_type=ValueType(name='unsigned int'),
extents=(10,),
),
doc='auxiliary framebuffer object',
),
StructFieldDecl(
name='auxFBO_r',
type=ArrayType(
inner_type=ValueType(name='unsigned int'),
extents=(10,),
),
doc='auxiliary framebuffer object for resolving',
),
StructFieldDecl(
name='auxColor',
type=ArrayType(
inner_type=ValueType(name='unsigned int'),
extents=(10,),
),
doc='auxiliary color buffer',
),
StructFieldDecl(
name='auxColor_r',
type=ArrayType(
inner_type=ValueType(name='unsigned int'),
extents=(10,),
),
doc='auxiliary color buffer for resolving',
),
StructFieldDecl(
name='ntexture',
type=ValueType(name='int'),
doc='number of allocated textures',
),
StructFieldDecl(
name='textureType',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(100,),
),
doc='type of texture (mjtTexture) (ntexture)',
),
StructFieldDecl(
name='texture',
type=ArrayType(
inner_type=ValueType(name='unsigned int'),
extents=(100,),
),
doc='texture names',
),
StructFieldDecl(
name='basePlane',
type=ValueType(name='unsigned int'),
doc='all planes from model',
),
StructFieldDecl(
name='baseMesh',
type=ValueType(name='unsigned int'),
doc='all meshes from model',
),
StructFieldDecl(
name='baseHField',
type=ValueType(name='unsigned int'),
doc='all hfields from model',
),
StructFieldDecl(
name='baseBuiltin',
type=ValueType(name='unsigned int'),
doc='all buildin geoms, with quality from model',
),
StructFieldDecl(
name='baseFontNormal',
type=ValueType(name='unsigned int'),
doc='normal font',
),
StructFieldDecl(
name='baseFontShadow',
type=ValueType(name='unsigned int'),
doc='shadow font',
),
StructFieldDecl(
name='baseFontBig',
type=ValueType(name='unsigned int'),
doc='big font',
),
StructFieldDecl(
name='rangePlane',
type=ValueType(name='int'),
doc='all planes from model',
),
StructFieldDecl(
name='rangeMesh',
type=ValueType(name='int'),
doc='all meshes from model',
),
StructFieldDecl(
name='rangeHField',
type=ValueType(name='int'),
doc='all hfields from model',
),
StructFieldDecl(
name='rangeBuiltin',
type=ValueType(name='int'),
doc='all builtin geoms, with quality from model',
),
StructFieldDecl(
name='rangeFont',
type=ValueType(name='int'),
doc='all characters in font',
),
StructFieldDecl(
name='nskin',
type=ValueType(name='int'),
doc='number of skins',
),
StructFieldDecl(
name='skinvertVBO',
type=PointerType(
inner_type=ValueType(name='unsigned int'),
),
doc='skin vertex position VBOs (nskin)',
),
StructFieldDecl(
name='skinnormalVBO',
type=PointerType(
inner_type=ValueType(name='unsigned int'),
),
doc='skin vertex normal VBOs (nskin)',
),
StructFieldDecl(
name='skintexcoordVBO',
type=PointerType(
inner_type=ValueType(name='unsigned int'),
),
doc='skin vertex texture coordinate VBOs (nskin)',
),
StructFieldDecl(
name='skinfaceVBO',
type=PointerType(
inner_type=ValueType(name='unsigned int'),
),
doc='skin face index VBOs (nskin)',
),
StructFieldDecl(
name='charWidth',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(127,),
),
doc='character widths: normal and shadow',
),
StructFieldDecl(
name='charWidthBig',
type=ArrayType(
inner_type=ValueType(name='int'),
extents=(127,),
),
doc='chacarter widths: big',
),
StructFieldDecl(
name='charHeight',
type=ValueType(name='int'),
doc='character heights: normal and shadow',
),
StructFieldDecl(
name='charHeightBig',
type=ValueType(name='int'),
doc='character heights: big',
),
StructFieldDecl(
name='glInitialized',
type=ValueType(name='int'),
doc='is OpenGL initialized',
),
StructFieldDecl(
name='windowAvailable',
type=ValueType(name='int'),
doc='is default/window framebuffer available',
),
StructFieldDecl(
name='windowSamples',
type=ValueType(name='int'),
doc='number of samples for default/window framebuffer',
),
StructFieldDecl(
name='windowStereo',
type=ValueType(name='int'),
doc='is stereo available for default/window framebuffer',
),
StructFieldDecl(
name='windowDoublebuffer',
type=ValueType(name='int'),
doc='is default/window framebuffer double buffered',
),
StructFieldDecl(
name='currentBuffer',
type=ValueType(name='int'),
doc='currently active framebuffer: mjFB_WINDOW or mjFB_OFFSCREEN', # pylint: disable=line-too-long
),
StructFieldDecl(
name='readPixelFormat',
type=ValueType(name='int'),
doc='default color pixel format for mjr_readPixels',
),
),
)),
('mjThreadPool',
StructDecl(
name='mjThreadPool',
declname='struct mjThreadPool_',
fields=(
StructFieldDecl(
name='nworker',
type=ValueType(name='int'),
doc='number of workers in the pool',
),
),
)),
('mjTask',
StructDecl(
name='mjTask',
declname='struct mjTask_',
fields=(
StructFieldDecl(
name='func',
type=ValueType(name='mjfTask'),
doc='pointer to the function that implements the task',
),
StructFieldDecl(
name='args',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='arguments to func',
),
StructFieldDecl(
name='status',
type=ValueType(name='int', is_volatile=True),
doc='status of the task',
),
),
)),
('mjuiState',
StructDecl(
name='mjuiState',
declname='struct mjuiState_',
fields=(
StructFieldDecl(
name='nrect',
type=ValueType(name='int'),
doc='number of rectangles used',
),
StructFieldDecl(
name='rect',
type=ArrayType(
inner_type=ValueType(name='mjrRect'),
extents=(25,),
),
doc='rectangles (index 0: entire window)',
),
StructFieldDecl(
name='userdata',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='pointer to user data (for callbacks)',
),
StructFieldDecl(
name='type',
type=ValueType(name='int'),
doc='(type mjtEvent)',
),
StructFieldDecl(
name='left',
type=ValueType(name='int'),
doc='is left button down',
),
StructFieldDecl(
name='right',
type=ValueType(name='int'),
doc='is right button down',
),
StructFieldDecl(
name='middle',
type=ValueType(name='int'),
doc='is middle button down',
),
StructFieldDecl(
name='doubleclick',
type=ValueType(name='int'),
doc='is last press a double click',
),
StructFieldDecl(
name='button',
type=ValueType(name='int'),
doc='which button was pressed (mjtButton)',
),
StructFieldDecl(
name='buttontime',
type=ValueType(name='double'),
doc='time of last button press',
),
StructFieldDecl(
name='x',
type=ValueType(name='double'),
doc='x position',
),
StructFieldDecl(
name='y',
type=ValueType(name='double'),
doc='y position',
),
StructFieldDecl(
name='dx',
type=ValueType(name='double'),
doc='x displacement',
),
StructFieldDecl(
name='dy',
type=ValueType(name='double'),
doc='y displacement',
),
StructFieldDecl(
name='sx',
type=ValueType(name='double'),
doc='x scroll',
),
StructFieldDecl(
name='sy',
type=ValueType(name='double'),
doc='y scroll',
),
StructFieldDecl(
name='control',
type=ValueType(name='int'),
doc='is control down',
),
StructFieldDecl(
name='shift',
type=ValueType(name='int'),
doc='is shift down',
),
StructFieldDecl(
name='alt',
type=ValueType(name='int'),
doc='is alt down',
),
StructFieldDecl(
name='key',
type=ValueType(name='int'),
doc='which key was pressed',
),
StructFieldDecl(
name='keytime',
type=ValueType(name='double'),
doc='time of last key press',
),
StructFieldDecl(
name='mouserect',
type=ValueType(name='int'),
doc='which rectangle contains mouse',
),
StructFieldDecl(
name='dragrect',
type=ValueType(name='int'),
doc='which rectangle is dragged with mouse',
),
StructFieldDecl(
name='dragbutton',
type=ValueType(name='int'),
doc='which button started drag (mjtButton)',
),
StructFieldDecl(
name='dropcount',
type=ValueType(name='int'),
doc='number of files dropped',
),
StructFieldDecl(
name='droppaths',
type=PointerType(
inner_type=PointerType(
inner_type=ValueType(name='char', is_const=True),
),
),
doc='paths to files dropped',
),
),
)),
('mjuiThemeSpacing',
StructDecl(
name='mjuiThemeSpacing',
declname='struct mjuiThemeSpacing_',
fields=(
StructFieldDecl(
name='total',
type=ValueType(name='int'),
doc='total width',
),
StructFieldDecl(
name='scroll',
type=ValueType(name='int'),
doc='scrollbar width',
),
StructFieldDecl(
name='label',
type=ValueType(name='int'),
doc='label width',
),
StructFieldDecl(
name='section',
type=ValueType(name='int'),
doc='section gap',
),
StructFieldDecl(
name='itemside',
type=ValueType(name='int'),
doc='item side gap',
),
StructFieldDecl(
name='itemmid',
type=ValueType(name='int'),
doc='item middle gap',
),
StructFieldDecl(
name='itemver',
type=ValueType(name='int'),
doc='item vertical gap',
),
StructFieldDecl(
name='texthor',
type=ValueType(name='int'),
doc='text horizontal gap',
),
StructFieldDecl(
name='textver',
type=ValueType(name='int'),
doc='text vertical gap',
),
StructFieldDecl(
name='linescroll',
type=ValueType(name='int'),
doc='number of pixels to scroll',
),
StructFieldDecl(
name='samples',
type=ValueType(name='int'),
doc='number of multisamples',
),
),
)),
('mjuiThemeColor',
StructDecl(
name='mjuiThemeColor',
declname='struct mjuiThemeColor_',
fields=(
StructFieldDecl(
name='master',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='master background',
),
StructFieldDecl(
name='thumb',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='scrollbar thumb',
),
StructFieldDecl(
name='secttitle',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='section title',
),
StructFieldDecl(
name='sectfont',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='section font',
),
StructFieldDecl(
name='sectsymbol',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='section symbol',
),
StructFieldDecl(
name='sectpane',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='section pane',
),
StructFieldDecl(
name='shortcut',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='shortcut background',
),
StructFieldDecl(
name='fontactive',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='font active',
),
StructFieldDecl(
name='fontinactive',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='font inactive',
),
StructFieldDecl(
name='decorinactive',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='decor inactive',
),
StructFieldDecl(
name='decorinactive2',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='inactive slider color 2',
),
StructFieldDecl(
name='button',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='button',
),
StructFieldDecl(
name='check',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='check',
),
StructFieldDecl(
name='radio',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='radio',
),
StructFieldDecl(
name='select',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='select',
),
StructFieldDecl(
name='select2',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='select pane',
),
StructFieldDecl(
name='slider',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='slider',
),
StructFieldDecl(
name='slider2',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='slider color 2',
),
StructFieldDecl(
name='edit',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='edit',
),
StructFieldDecl(
name='edit2',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='edit invalid',
),
StructFieldDecl(
name='cursor',
type=ArrayType(
inner_type=ValueType(name='float'),
extents=(3,),
),
doc='edit cursor',
),
),
)),
('mjuiItem',
StructDecl(
name='mjuiItem',
declname='struct mjuiItem_',
fields=(
StructFieldDecl(
name='type',
type=ValueType(name='int'),
doc='type (mjtItem)',
),
StructFieldDecl(
name='name',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(40,),
),
doc='name',
),
StructFieldDecl(
name='state',
type=ValueType(name='int'),
doc='0: disable, 1: enable, 2+: use predicate',
),
StructFieldDecl(
name='pdata',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='data pointer (type-specific)',
),
StructFieldDecl(
name='sectionid',
type=ValueType(name='int'),
doc='id of section containing item',
),
StructFieldDecl(
name='itemid',
type=ValueType(name='int'),
doc='id of item within section',
),
AnonymousUnionDecl(
fields=(
StructFieldDecl(
name='single',
type=ValueType(name='struct mjuiItemSingle_'),
doc='check and button',
),
StructFieldDecl(
name='multi',
type=ValueType(name='struct mjuiItemMulti_'),
doc='static, radio and select',
),
StructFieldDecl(
name='slider',
type=ValueType(name='struct mjuiItemSlider_'),
doc='slider',
),
StructFieldDecl(
name='edit',
type=ValueType(name='struct mjuiItemEdit_'),
doc='edit',
),
),
),
StructFieldDecl(
name='rect',
type=ValueType(name='mjrRect'),
doc='rectangle occupied by item',
),
),
)),
('mjuiSection',
StructDecl(
name='mjuiSection',
declname='struct mjuiSection_',
fields=(
StructFieldDecl(
name='name',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(40,),
),
doc='name',
),
StructFieldDecl(
name='state',
type=ValueType(name='int'),
doc='0: closed, 1: open',
),
StructFieldDecl(
name='modifier',
type=ValueType(name='int'),
doc='0: none, 1: control, 2: shift; 4: alt',
),
StructFieldDecl(
name='shortcut',
type=ValueType(name='int'),
doc='shortcut key; 0: undefined',
),
StructFieldDecl(
name='nitem',
type=ValueType(name='int'),
doc='number of items in use',
),
StructFieldDecl(
name='item',
type=ArrayType(
inner_type=ValueType(name='mjuiItem'),
extents=(100,),
),
doc='preallocated array of items',
),
StructFieldDecl(
name='rtitle',
type=ValueType(name='mjrRect'),
doc='rectangle occupied by title',
),
StructFieldDecl(
name='rcontent',
type=ValueType(name='mjrRect'),
doc='rectangle occupied by content',
),
),
)),
('mjUI',
StructDecl(
name='mjUI',
declname='struct mjUI_',
fields=(
StructFieldDecl(
name='spacing',
type=ValueType(name='mjuiThemeSpacing'),
doc='UI theme spacing',
),
StructFieldDecl(
name='color',
type=ValueType(name='mjuiThemeColor'),
doc='UI theme color',
),
StructFieldDecl(
name='predicate',
type=ValueType(name='mjfItemEnable'),
doc='callback to set item state programmatically',
),
StructFieldDecl(
name='userdata',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='pointer to user data (passed to predicate)',
),
StructFieldDecl(
name='rectid',
type=ValueType(name='int'),
doc='index of this ui rectangle in mjuiState',
),
StructFieldDecl(
name='auxid',
type=ValueType(name='int'),
doc='aux buffer index of this ui',
),
StructFieldDecl(
name='radiocol',
type=ValueType(name='int'),
doc='number of radio columns (0 defaults to 2)',
),
StructFieldDecl(
name='width',
type=ValueType(name='int'),
doc='width',
),
StructFieldDecl(
name='height',
type=ValueType(name='int'),
doc='current heigth',
),
StructFieldDecl(
name='maxheight',
type=ValueType(name='int'),
doc='height when all sections open',
),
StructFieldDecl(
name='scroll',
type=ValueType(name='int'),
doc='scroll from top of UI',
),
StructFieldDecl(
name='mousesect',
type=ValueType(name='int'),
doc='0: none, -1: scroll, otherwise 1+section',
),
StructFieldDecl(
name='mouseitem',
type=ValueType(name='int'),
doc='item within section',
),
StructFieldDecl(
name='mousehelp',
type=ValueType(name='int'),
doc='help button down: print shortcuts',
),
StructFieldDecl(
name='editsect',
type=ValueType(name='int'),
doc='0: none, otherwise 1+section',
),
StructFieldDecl(
name='edititem',
type=ValueType(name='int'),
doc='item within section',
),
StructFieldDecl(
name='editcursor',
type=ValueType(name='int'),
doc='cursor position',
),
StructFieldDecl(
name='editscroll',
type=ValueType(name='int'),
doc='horizontal scroll',
),
StructFieldDecl(
name='edittext',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(300,),
),
doc='current text',
),
StructFieldDecl(
name='editchanged',
type=PointerType(
inner_type=ValueType(name='mjuiItem'),
),
doc='pointer to changed edit in last mjui_event',
),
StructFieldDecl(
name='nsect',
type=ValueType(name='int'),
doc='number of sections in use',
),
StructFieldDecl(
name='sect',
type=ArrayType(
inner_type=ValueType(name='mjuiSection'),
extents=(10,),
),
doc='preallocated array of sections',
),
),
)),
('mjuiDef',
StructDecl(
name='mjuiDef',
declname='struct mjuiDef_',
fields=(
StructFieldDecl(
name='type',
type=ValueType(name='int'),
doc='type (mjtItem); -1: section',
),
StructFieldDecl(
name='name',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(40,),
),
doc='name',
),
StructFieldDecl(
name='state',
type=ValueType(name='int'),
doc='state',
),
StructFieldDecl(
name='pdata',
type=PointerType(
inner_type=ValueType(name='void'),
),
doc='pointer to data',
),
StructFieldDecl(
name='other',
type=ArrayType(
inner_type=ValueType(name='char'),
extents=(300,),
),
doc='string with type-specific properties',
),
),
)),
])
| mujoco-main | introspect/structs.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functions.py."""
from absl.testing import absltest
from . import ast_nodes
from . import functions
from . import type_parsing
class FunctionsTest(absltest.TestCase):
def test_mj_copyData(self): # pylint: disable=invalid-name
func_decl = functions.FUNCTIONS['mj_copyData']
self.assertEqual(func_decl.name, 'mj_copyData')
self.assertEqual(func_decl.return_type, type_parsing.parse_type('mjData*'))
self.assertEqual(
func_decl.parameters,
(ast_nodes.FunctionParameterDecl(
name='dest', type=type_parsing.parse_type('mjData*')),
ast_nodes.FunctionParameterDecl(
name='m', type=type_parsing.parse_type('const mjModel*')),
ast_nodes.FunctionParameterDecl(
name='src', type=type_parsing.parse_type('const mjData*'))))
self.assertEqual(
func_decl.doc, 'Copy mjData. '
'm is only required to contain the size fields from MJMODEL_INTS.')
def test_mju_transformSpatial(self): # pylint: disable=invalid-name
func_decl = functions.FUNCTIONS['mju_transformSpatial']
self.assertEqual(func_decl.name, 'mju_transformSpatial')
self.assertEqual(func_decl.return_type, type_parsing.parse_type('void'))
self.assertEqual(
func_decl.parameters,
(ast_nodes.FunctionParameterDecl(
name='res', type=type_parsing.parse_type('mjtNum[6]')),
ast_nodes.FunctionParameterDecl(
name='vec', type=type_parsing.parse_type('const mjtNum[6]')),
ast_nodes.FunctionParameterDecl(
name='flg_force', type=type_parsing.parse_type('int')),
ast_nodes.FunctionParameterDecl(
name='newpos', type=type_parsing.parse_type('const mjtNum[3]')),
ast_nodes.FunctionParameterDecl(
name='oldpos', type=type_parsing.parse_type('const mjtNum[3]')),
ast_nodes.FunctionParameterDecl(
name='rotnew2old',
type=type_parsing.parse_type('const mjtNum[9]'))))
self.assertEqual(
func_decl.doc, 'Coordinate transform of 6D motion or force vector in ' +
'rotation:translation format. rotnew2old is 3-by-3, ' +
'NULL means no rotation; flg_force specifies force or motion type.')
if __name__ == '__main__':
absltest.main()
| mujoco-main | introspect/functions_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes that roughly correspond to Clang AST node types."""
import collections
import dataclasses
import re
from typing import Dict, Optional, Sequence, Tuple, Union
# We are relying on Clang to do the actual source parsing and are only doing
# a little bit of extra parsing of function parameter type declarations here.
# These patterns are here for sanity checking rather than actual parsing.
VALID_TYPE_NAME_PATTERN = re.compile('(struct )?[A-Za-z_][A-Za-z0-9_]*')
C_INVALID_TYPE_NAMES = frozenset([
'auto', 'break', 'case', 'const', 'continue', 'default', 'do', 'else',
'enum', 'extern', 'for', 'goto', 'if', 'inline', 'register', 'restrict',
'return', 'sizeof', 'static', 'struct', 'switch', 'typedef', 'union',
'volatile', 'while', '_Alignas', '_Atomic', '_Generic', '_Imaginary',
'_Noreturn', '_Static_assert', '_Thread_local', '__attribute__', '_Pragma'])
def _is_valid_integral_type(type_str: str):
"""Checks if a string is a valid integral type."""
parts = re.split(r'\s+', type_str)
counter = collections.defaultdict(lambda: 0)
wildcard_counter = 0
for part in parts:
if part in ('signed', 'unsigned', 'short', 'long', 'int', 'char'):
counter[part] += 1
elif VALID_TYPE_NAME_PATTERN.fullmatch(part):
# a non-keyword can be a typedef for int
wildcard_counter += 1
else:
return False
if (counter['signed'] + counter['unsigned'] > 1 or
counter['short'] > 1 or counter['long'] > 2 or
(counter['short'] and counter['long']) or
((counter['short'] or counter['long']) and counter['char']) or
counter['char'] + counter['int'] + wildcard_counter > 1):
return False
else:
return True
@dataclasses.dataclass
class ValueType:
"""Represents a C type that is neither a pointer type nor an array type."""
name: str
is_const: bool = False
is_volatile: bool = False
def __init__(self, name: str, is_const: bool = False,
is_volatile: bool = False):
is_valid_type_name = (
name == 'void *(*)(void *)' or
VALID_TYPE_NAME_PATTERN.fullmatch(name) or
_is_valid_integral_type(name)) and name not in C_INVALID_TYPE_NAMES
if not is_valid_type_name:
raise ValueError(f'{name!r} is not a valid value type name')
self.name = name
self.is_const = is_const
self.is_volatile = is_volatile
def decl(self, name_or_decl: Optional[str] = None) -> str:
parts = []
if self.is_const:
parts.append('const')
if self.is_volatile:
parts.append('volatile')
parts.append(self.name)
if name_or_decl:
parts.append(name_or_decl)
return ' '.join(parts)
def __str__(self):
return self.decl()
@dataclasses.dataclass
class ArrayType:
"""Represents a C array type."""
inner_type: Union[ValueType, 'PointerType']
extents: Tuple[int, ...]
def __init__(self, inner_type: Union[ValueType, 'PointerType'],
extents: Sequence[int]):
self.inner_type = inner_type
self.extents = tuple(extents)
@property
def _extents_str(self) -> str:
return ''.join(f'[{n}]' for n in self.extents)
def decl(self, name_or_decl: Optional[str] = None) -> str:
name_or_decl = name_or_decl or ''
return self.inner_type.decl(f'{name_or_decl}{self._extents_str}')
def __str__(self):
return self.decl()
@dataclasses.dataclass
class PointerType:
"""Represents a C pointer type."""
inner_type: Union[ValueType, ArrayType, 'PointerType']
is_const: bool = False
is_volatile: bool = False
is_restrict: bool = False
def decl(self, name_or_decl: Optional[str] = None) -> str:
"""Creates a string that declares an object of this type."""
parts = ['*']
if self.is_const:
parts.append('const')
if self.is_volatile:
parts.append('volatile')
if self.is_restrict:
parts.append('restrict')
if name_or_decl:
parts.append(name_or_decl)
ptr_decl = ' '.join(parts)
if isinstance(self.inner_type, ArrayType):
ptr_decl = f'({ptr_decl})'
return self.inner_type.decl(ptr_decl)
def __str__(self):
return self.decl()
@dataclasses.dataclass
class FunctionParameterDecl:
"""Represents a parameter in a function declaration.
Note that according to the C language rule, a function parameter of array
type undergoes array-to-pointer decay, and therefore appears as a pointer
parameter in an actual C AST. We retain the arrayness of a parameter here
since the array's extents are informative.
"""
name: str
type: Union[ValueType, ArrayType, PointerType]
def __str__(self):
return self.type.decl(self.name)
@property
def decltype(self) -> str:
return self.type.decl()
@dataclasses.dataclass
class FunctionDecl:
"""Represents a function declaration."""
name: str
return_type: Union[ValueType, ArrayType, PointerType]
parameters: Tuple[FunctionParameterDecl, ...]
doc: str
def __init__(self, name: str,
return_type: Union[ValueType, ArrayType, PointerType],
parameters: Sequence[FunctionParameterDecl],
doc: str):
self.name = name
self.return_type = return_type
self.parameters = tuple(parameters)
self.doc = doc
def __str__(self):
param_str = ', '.join(str(p) for p in self.parameters)
return f'{self.return_type} {self.name}({param_str})'
@property
def decltype(self) -> str:
param_str = ', '.join(str(p.decltype) for p in self.parameters)
return f'{self.return_type} ({param_str})'
class _EnumDeclValues(Dict[str, int]):
"""A dict with modified stringified representation.
The __repr__ method of this class adds a trailing comma to the list of values.
This is done as a hint for code formatters to place one item per line when
the stringified OrderedDict is used in generated Python code.
"""
def __repr__(self):
out = super().__repr__()
if self:
out = re.sub(r'\(\[(.+)\]\)\Z', r'([\1,])', out)
return re.sub(r'\A_EnumDeclValues', 'dict', out)
@dataclasses.dataclass
class EnumDecl:
"""Represents an enum declaration."""
name: str
declname: str
values: Dict[str, int]
def __init__(self, name: str, declname: str, values: Dict[str, int]):
self.name = name
self.declname = declname
self.values = _EnumDeclValues(values)
@dataclasses.dataclass
class StructFieldDecl:
"""Represents a field in a struct or union declaration."""
name: str
type: Union[
ValueType,
ArrayType,
PointerType,
'AnonymousStructDecl',
'AnonymousUnionDecl',
]
doc: str
def __str__(self):
return self.type.decl(self.name)
@property
def decltype(self) -> str:
return self.type.decl()
@dataclasses.dataclass
class AnonymousStructDecl:
"""Represents an anonymous struct declaration."""
fields: Tuple[Union[StructFieldDecl, 'AnonymousUnionDecl'], ...]
def __init__(self, fields: Sequence[StructFieldDecl]):
self.fields = tuple(fields)
def __str__(self):
return self.decl()
def _inner_decl(self):
return '; '.join(str(field) for field in self.fields) + ';'
def decl(self, name_or_decl: Optional[str] = None):
parts = ['struct', f'{{{self._inner_decl()}}}']
if name_or_decl:
parts.append(name_or_decl)
return ' '.join(parts)
class AnonymousUnionDecl(AnonymousStructDecl):
"""Represents an anonymous union declaration."""
def decl(self, name_or_decl: Optional[str] = None):
parts = ['union', f'{{{self._inner_decl()}}}']
if name_or_decl:
parts.append(name_or_decl)
return ' '.join(parts)
@dataclasses.dataclass
class StructDecl:
"""Represents a struct declaration."""
name: str
declname: str
fields: Tuple[Union[StructFieldDecl, AnonymousUnionDecl], ...]
def __init__(self, name: str,
declname: str,
fields: Sequence[Union[StructFieldDecl, AnonymousUnionDecl]]):
self.name = name
self.declname = declname
self.fields = tuple(fields)
def decl(self, name_or_decl: Optional[str] = None) -> str:
parts = [self.name]
if name_or_decl:
parts.append(name_or_decl)
return ' '.join(parts)
| mujoco-main | introspect/ast_nodes.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for parsing C type declarations."""
import collections
import re
from typing import Mapping, MutableSequence, Optional, Sequence, Tuple, Union
from . import ast_nodes
ARRAY_EXTENTS_PATTERN = re.compile(r'(\[[^\]]+\]\s*)+\Z')
ARRAY_N_PATTERN = re.compile(r'\[([^\]]+)\]')
STARTS_WITH_CONST_PATTERN = re.compile(r'\Aconst(?![A-Za-z0-9_])')
ENDS_WITH_CONST_PATTERN = re.compile(r'(?<![A-Za-z0-9_])const\Z')
def _parse_qualifiers(
type_name: str,
qualifiers: Sequence[str]) -> Tuple[str, Mapping[str, bool]]:
"""Separates qualifiers from the rest of the type name."""
parts = re.split(r'\s+', type_name)
counter = collections.defaultdict(lambda: 0)
non_qualifiers = []
for part in parts:
if part in qualifiers:
counter[part] += 1
if counter[part] > 1:
raise ValueError('duplicate qualifier: {part!r}')
else:
non_qualifiers.append(part)
is_qualifier = dict()
for qualifier in qualifiers:
is_qualifier[f'is_{qualifier}'] = bool(counter[qualifier])
return ' '.join(non_qualifiers), is_qualifier
def _parse_maybe_array(
type_name: str, innermost_type: Optional[Union[ast_nodes.ValueType,
ast_nodes.PointerType]]
) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
"""Internal-only helper that parses a type that may be an array type."""
array_match = ARRAY_EXTENTS_PATTERN.search(type_name)
if array_match:
extents = tuple(
int(s.strip()) for s in ARRAY_N_PATTERN.findall(array_match.group(0)))
inner_type_str = type_name[:array_match.start()]
return ast_nodes.ArrayType(
inner_type=_parse_maybe_pointer(inner_type_str.strip(), innermost_type),
extents=extents)
else:
return _parse_maybe_pointer(type_name, innermost_type)
def _parse_maybe_pointer(
type_name: str, innermost_type: Optional[Union[ast_nodes.ValueType,
ast_nodes.PointerType]]
) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
"""Internal-only helper that parses a type that may be a pointer type."""
if type_name == 'void *(*)(void *)':
return ast_nodes.ValueType(name=type_name)
p = type_name.rfind('*')
if p != -1:
leftover, is_qualifier = _parse_qualifiers(
type_name[p + 1:].strip(), ('const', 'volatile', 'restrict'))
if leftover:
raise ValueError('invalid qualifier for pointer: {leftover!r}')
inner_type_str = type_name[:p].strip()
if inner_type_str:
inner_type = _parse_maybe_pointer(inner_type_str, innermost_type)
else:
assert innermost_type is not None
inner_type = innermost_type
return ast_nodes.PointerType(inner_type=inner_type, **is_qualifier)
else:
assert innermost_type is None # value type should be innermost
type_name, is_qualifier = _parse_qualifiers(
type_name.strip(), ('const', 'volatile'))
return ast_nodes.ValueType(name=type_name, **is_qualifier)
def _peel_nested_parens(input_str: str) -> MutableSequence[str]:
"""Extracts substrings from a string with nested parentheses.
The returned sequence starts from the substring enclosed in the innermost
parentheses and moves subsequently outwards. The contents of the inner
substrings are removed from the outer ones. For example, given the string
'lorem ipsum(dolor sit (consectetur adipiscing) amet)sed do eiusmod',
this function produces the sequence
['consectetur adipiscing', 'dolor sit amet', 'lorem ipsumsed do eiusmod'].
Args:
input_str: An input_str string consisting of zero or more nested
parentheses.
Returns:
A sequence of substrings enclosed with in respective parentheses. See the
description above for the precise detail of the output.
"""
if input_str == 'void *(*)(void *)':
return ['void *(*)(void *)']
start = input_str.find('(')
end = input_str.rfind(')')
if start == -1 and end == -1:
return [input_str]
else:
# Assertions to be re-raised into a meaningful error by the caller.
assert start != -1 # '(' should be present if there is a ')'
assert end != -1 # ')' should be present if there is a '('
assert start < end # '(' should come before ')'
out = _peel_nested_parens(input_str[start + 1:end])
out.append(input_str[:start] + input_str[end + 1:])
return out
def parse_type(
type_name: str
) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
"""Parses a string that represents a C type into an AST node."""
try:
type_str_stack = _peel_nested_parens(type_name.strip())
except AssertionError as e:
raise ValueError(f'{type_name!r} contains incorrectly nested '
f'parentheses') from e
result = None
while type_str_stack:
try:
result = _parse_maybe_array(type_str_stack.pop(), result)
except AssertionError as e:
raise ValueError(f'invalid type name {type_name!r}') from e
assert result # hint for pytype that `result` isn't None
return result
def parse_function_return_type(
type_name: str
) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
return parse_type(type_name[:type_name.find('(')])
| mujoco-main | introspect/type_parsing.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for formatting AST node as Python code."""
import contextlib
import dataclasses
from typing import Any, Iterable, Mapping, Sequence
INDENT_WIDTH = 4
MAX_LINE_WIDTH = 80
SIMPLE_TYPES = frozenset([int, float, str, bool, bytes, type(None)])
def format_as_python_code(obj: Any) -> str:
"""Formats an AST node object as well-indented Python code."""
formatter = _Formatter()
formatter.add(obj)
return str(formatter)
def _is_all_simple(seq: Iterable[Any]) -> bool:
return all(type(obj) in SIMPLE_TYPES for obj in seq)
class _Formatter:
"""A helper for pretty-printing AST nodes as Python code."""
def __init__(self):
self._line_prefix = ''
self._lines = []
self._add_to_last_line = False
@contextlib.contextmanager
def _indent(self, width: int = INDENT_WIDTH):
self._line_prefix += ' ' * width
yield
self._line_prefix = self._line_prefix[:-width]
@contextlib.contextmanager
def _append_at_end(self, s):
yield
self._lines[-1] += s
def _add_line(self, line: str, no_break: bool = False):
if self._add_to_last_line:
self._lines[-1] += line
else:
self._lines.append(self._line_prefix + line)
self._add_to_last_line = no_break
def _add_dict(self, obj: Mapping[Any, Any]):
"""Adds a dict to the formatted output."""
self._add_line('dict([')
with self._indent():
for k, v in obj.items():
# Try to fit everything into a single line first.
if _is_all_simple((k, v)):
single_line = f'({k!r}, {v!r}),'
if len(self._line_prefix) + len(single_line) <= MAX_LINE_WIDTH:
self._add_line(single_line)
continue
self._add_line(f"('{k}',")
with self._append_at_end('),'):
with self._indent(1):
self.add(v)
self._add_line('])')
def _add_dataclass(self, obj: Any):
"""Adds a dataclass object to the formatted output."""
# Filter out default values.
kv_pairs = []
for k in dataclasses.fields(obj):
v = getattr(obj, k.name)
if v != k.default:
kv_pairs.append((k, v))
# Try to fit everything into a single line first.
if _is_all_simple(v for _, v in kv_pairs):
single_line = ', '.join(f'{k.name}={v!r}' for k, v in kv_pairs)
single_line = f'{obj.__class__.__name__}({single_line})'
if len(self._line_prefix) + len(single_line) <= MAX_LINE_WIDTH:
self._add_line(single_line)
return
self._add_line(obj.__class__.__name__ + '(')
with self._indent():
for k, v in kv_pairs:
self._add_line(k.name + '=', no_break=True)
with self._append_at_end(','):
self.add(v)
self._add_line(')')
def _add_sequence(self, obj: Sequence[Any]) -> None:
"""Adds a sequence to the formatted output."""
default_str = repr(obj)
open_token, close_token = default_str[0], default_str[-1]
# Try to fit everything into a single line first.
if _is_all_simple(obj):
single_line = (
f"{open_token}{', '.join(repr(o) for o in obj)}{close_token}")
if close_token == ')' and len(obj) == 1:
single_line = f'{single_line[:-1]},)'
if len(self._line_prefix) + len(single_line) <= MAX_LINE_WIDTH:
self._add_line(single_line)
return
self._add_line(open_token)
with self._indent():
for v in obj:
with self._append_at_end(','):
self.add(v)
self._add_line(close_token)
def add(self, obj: Any) -> None:
"""Adds an object to the formatted output."""
if _is_all_simple((obj,)):
self._add_line(repr(obj))
elif dataclasses.is_dataclass(obj):
self._add_dataclass(obj)
elif isinstance(obj, Mapping):
self._add_dict(obj)
elif isinstance(obj, Sequence):
self._add_sequence(obj)
else:
raise NotImplementedError
def __str__(self):
lines = []
for line in self._lines:
if len(line) > MAX_LINE_WIDTH:
lines.append(f'{line} # pylint: disable=line-too-long')
else:
lines.append(line)
return '\n'.join(lines)
| mujoco-main | introspect/codegen/formatter.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates structs.py.
The JSON input can be generated via:
clang -Xclang -ast-dump=json -fsyntax-only -fparse-all-comments -x c mujoco.h
"""
import itertools
import json
import os
import re
from typing import Any, Mapping, Sequence, Union
from absl import app
from absl import flags
from introspect import ast_nodes
from introspect import type_parsing
from . import formatter
_JSON_PATH = flags.DEFINE_string(
'json_path', None,
'Path to the JSON file representing the Clang AST for mujoco.h')
ClangJsonNode = Mapping[str, Any]
_ANONYMOUS_KEY_PATTERN = re.compile(r'\d+:\d+(?=\))')
_EXCLUDED = (
'mjpPlugin',
'mjpPlugin_',
'mjpResourceProvider',
'mjpResourceProvider_',
'mjResource',
'mjResource_',
)
def traverse(node, visitor):
visitor.visit(node)
children = node.get('inner', [])
for child in children:
traverse(child, visitor)
class _AnonymousTypePlaceholder(ast_nodes.ValueType):
def __init__(self, anonymous_key: str):
self.name = anonymous_key
self.is_const = False
self.is_volatile = False
class MjStructVisitor:
"""A Clang AST JSON node visitor for MuJoCo API struct declarations."""
def __init__(self):
self._structs = {}
self._anonymous = {}
self._typedefs = {}
def _normalize_type(
self, declname: str
) -> Union[
ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
"""Resolves anonymous structs/unions and looks up existing typedefs."""
# Check for anonymous struct/union.
if '(unnamed ' in declname:
m = _ANONYMOUS_KEY_PATTERN.search(declname)
if not m:
raise RuntimeError('cannot parse anonymous key from {m!r}')
return _AnonymousTypePlaceholder(m.group(0))
# Lookup typedef name and use it instead if one exists.
for k, v in self._typedefs.items():
if declname == v.declname:
return type_parsing.parse_type(k)
# No valid normalization, just parse the declname.
return type_parsing.parse_type(declname)
def _make_comment(self, node: ClangJsonNode) -> str:
"""Makes a comment string from a Clang AST FullComment node."""
kind = node.get('kind')
if kind == 'TextComment':
return node['text'].replace('\N{NO-BREAK SPACE}', ' ').strip()
else:
strings = []
for child in node['inner']:
strings.append(self._make_comment(child))
return ''.join(strings).strip()
def _make_field(
self, node: ClangJsonNode
) -> Union[ast_nodes.StructFieldDecl, _AnonymousTypePlaceholder]:
"""Makes a StructFieldDecl object from a Clang AST FieldDecl node."""
doc = ''
for child in node.get('inner', ()):
if child['kind'] == 'FullComment':
doc = self._make_comment(child)
if 'name' in node:
field_type = self._normalize_type(node['type']['qualType'])
return ast_nodes.StructFieldDecl(
name=node['name'], type=field_type, doc=doc)
else:
return _AnonymousTypePlaceholder(self._make_anonymous_key(node))
def _make_struct(
self, node: ClangJsonNode
) -> Union[ast_nodes.AnonymousStructDecl, ast_nodes.StructDecl]:
"""Makes a Decl object from a Clang AST RecordDecl node."""
name = f"{node['tagUsed']} {node['name']}" if 'name' in node else ''
fields = []
for child in node['inner']:
child_kind = child.get('kind')
if child_kind == 'FieldDecl':
fields.append(self._make_field(child))
if name:
return ast_nodes.StructDecl(name=name, declname=name, fields=fields)
elif node['tagUsed'] == 'union':
return ast_nodes.AnonymousUnionDecl(fields=fields)
else:
return ast_nodes.AnonymousStructDecl(fields=fields)
def _is_mujoco_type(self, node: ClangJsonNode) -> bool:
node_name = node.get('name', '')
included_from = os.path.basename(
node['loc'].get('includedFrom', {}).get('file', '')
)
return node_name not in _EXCLUDED and (
node_name.startswith('mj')
or included_from == 'mujoco.h'
or included_from.startswith('mj')
)
def _make_anonymous_key(self, node: ClangJsonNode) -> str:
line = node['loc']['line']
col = node['loc']['col']
return f'{line}:{col}'
def visit(self, node: ClangJsonNode) -> None:
"""Visits a JSON node."""
if node.get('kind') == 'RecordDecl' and self._is_mujoco_type(node):
struct_decl = self._make_struct(node)
if hasattr(struct_decl, 'name'):
self._structs[struct_decl.name] = struct_decl
else:
anonymous_key = self._make_anonymous_key(node)
if anonymous_key in self._anonymous:
raise RuntimeError(
f'duplicate key for anonymous struct: {anonymous_key}')
self._anonymous[anonymous_key] = struct_decl
elif (node.get('kind') == 'TypedefDecl' and
node['type']['qualType'].startswith('struct mj') and
node['name'] not in _EXCLUDED):
struct = self._structs[node['type']['qualType']]
self._typedefs[node['name']] = ast_nodes.StructDecl(
name=node['name'], declname=struct.declname, fields=struct.fields)
def resolve_all_anonymous(self) -> None:
"""Replaces anonymous struct placeholders with corresponding decl."""
for struct in itertools.chain(
self._structs.values(), self._typedefs.values()
):
fields = []
for field in struct.fields:
if isinstance(field, _AnonymousTypePlaceholder):
fields.append(self._anonymous[field.name])
elif isinstance(field.type, _AnonymousTypePlaceholder):
fields.append(
ast_nodes.StructFieldDecl(
name=field.name,
type=self._anonymous[field.type.name],
doc=field.doc,
)
)
else:
fields.append(field)
struct.fields = tuple(fields)
@property
def structs(self) -> Mapping[str, ast_nodes.StructDecl]:
return self._structs
@property
def typedefs(self) -> Mapping[str, ast_nodes.StructDecl]:
return self._typedefs
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
with open(_JSON_PATH.value, 'r', encoding='utf-8') as f:
root = json.load(f)
visitor = MjStructVisitor()
traverse(root, visitor)
visitor.resolve_all_anonymous()
structs_str = formatter.format_as_python_code(visitor.typedefs)
print(f'''
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides information about MuJoCo API structs.
DO NOT EDIT. THIS FILE IS AUTOMATICALLY GENERATED.
"""
from typing import Mapping
from .ast_nodes import AnonymousStructDecl
from .ast_nodes import AnonymousUnionDecl
from .ast_nodes import ArrayType
from .ast_nodes import PointerType
from .ast_nodes import StructDecl
from .ast_nodes import StructFieldDecl
from .ast_nodes import ValueType
STRUCTS: Mapping[str, StructDecl] = {structs_str}
'''.strip()) # `print` adds a trailing newline
if __name__ == '__main__':
app.run(main)
| mujoco-main | introspect/codegen/generate_structs.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates enums.py.
The JSON input can be generated via:
clang -Xclang -ast-dump=json -fsyntax-only -fparse-all-comments -x c mujoco.h
"""
import json
from typing import Any, Mapping, Sequence
from absl import app
from absl import flags
from introspect import ast_nodes
from . import formatter
_JSON_PATH = flags.DEFINE_string(
'json_path', None,
'Path to the JSON file representing the Clang AST for mujoco.h')
ClangJsonNode = Mapping[str, Any]
def traverse(node, visitor):
visitor.visit(node)
children = node.get('inner', [])
for child in children:
traverse(child, visitor)
class MjEnumVisitor:
"""A Clang AST JSON node visitor for MuJoCo API enum declarations."""
def __init__(self):
self._enums = {}
self._typedefs = {}
def _make_enum(self, node: ClangJsonNode) -> ast_nodes.EnumDecl:
"""Makes a EnumDecl from a Clang AST EnumDecl node."""
name = f"enum {node['name']}"
values = []
for child in node['inner']:
child_kind = child.get('kind')
if child_kind == 'EnumConstantDecl':
next_idx = values[-1][1] + 1 if values else 0
if 'inner' in child:
value = int(child['inner'][0].get('value', next_idx))
else:
value = next_idx
values.append((child['name'], value))
return ast_nodes.EnumDecl(name=name, declname=name, values=dict(values))
def visit(self, node: ClangJsonNode) -> None:
if (node.get('kind') == 'EnumDecl' and
node.get('name', '').startswith('mj')):
enum_decl = self._make_enum(node)
self._enums[enum_decl.name] = enum_decl
elif (node.get('kind') == 'TypedefDecl' and
node['type']['qualType'].startswith('enum mj')):
enum = self._enums[node['type']['qualType']]
self._typedefs[node['name']] = ast_nodes.EnumDecl(
name=node['name'], declname=enum.declname, values=dict(enum.values))
@property
def enums(self) -> Mapping[str, ast_nodes.EnumDecl]:
return self._enums
@property
def typedefs(self) -> Mapping[str, ast_nodes.EnumDecl]:
return self._typedefs
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
with open(_JSON_PATH.value, 'r', encoding='utf-8') as f:
root = json.load(f)
visitor = MjEnumVisitor()
traverse(root, visitor)
enums_str = formatter.format_as_python_code(visitor.typedefs)
print(f'''
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides information about MuJoCo API enums.
DO NOT EDIT. THIS FILE IS AUTOMATICALLY GENERATED.
"""
from typing import Mapping
from .ast_nodes import EnumDecl
ENUMS: Mapping[str, EnumDecl] = {enums_str}
'''.strip()) # `print` adds a trailing newline
if __name__ == '__main__':
app.run(main)
| mujoco-main | introspect/codegen/generate_enums.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates functions.py.
The JSON input can be generated via:
clang -Xclang -ast-dump=json -fsyntax-only -fparse-all-comments -x c mujoco.h
"""
import json
from typing import Any, Mapping, Sequence
from absl import app
from absl import flags
from introspect import ast_nodes
from introspect import type_parsing
from . import formatter
_HEADER_PATH = flags.DEFINE_string(
'header_path', None, 'Path to the original mujoco.h')
_JSON_PATH = flags.DEFINE_string(
'json_path', None,
'Path to the JSON file representing the Clang AST for mujoco.h')
ClangJsonNode = Mapping[str, Any]
def traverse(node, visitor):
visitor.visit(node)
children = node.get('inner', [])
for child in children:
traverse(child, visitor)
class MjFunctionVisitor:
"""A Clang AST JSON node visitor for MuJoCo API function declarations."""
def __init__(self, raw_header):
self._raw_header = raw_header
self._functions = {}
def _make_function(self, node: ClangJsonNode) -> ast_nodes.FunctionDecl:
"""Makes a FunctionDecl from a Clang AST FunctionDecl node."""
name = node['name']
return_type = type_parsing.parse_function_return_type(
node['type']['qualType'])
parameters = []
comments = []
for child in node['inner']:
child_kind = child.get('kind')
if child_kind == 'ParmVarDecl':
parameters.append(self._make_parameter(child))
if child_kind == 'FullComment':
comments.append(self._make_comment(child))
comment = ' '.join(comments).strip()
return ast_nodes.FunctionDecl(
name=name, return_type=return_type, parameters=parameters, doc=comment)
def _make_parameter(
self, node: ClangJsonNode) -> ast_nodes.FunctionParameterDecl:
"""Makes a ParameterDecl from a Clang AST ParmVarDecl node."""
name = node['name']
type_name = node['type']['qualType']
# For a pointer parameters, look up in the original header to see if
# n array extent was declared there.
if type_name.endswith('*'):
decl_begin = node['range']['begin']['offset']
decl_end = node['range']['end']['offset'] + node['range']['end']['tokLen']
decl = self._raw_header[decl_begin:decl_end]
name_begin = node['loc']['offset'] - decl_begin
name_end = name_begin + node['loc']['tokLen']
type_name = decl[:name_begin] + decl[name_end:]
return ast_nodes.FunctionParameterDecl(
name=name, type=type_parsing.parse_type(type_name))
def _make_comment(self, node: ClangJsonNode) -> str:
"""Makes a comment string from a Clang AST FullComment node."""
kind = node.get('kind')
if kind == 'TextComment':
return node['text'].replace('\N{NO-BREAK SPACE}', ' ')
else:
strings = []
for child in node['inner']:
strings.append(self._make_comment(child))
return ''.join(strings)
def visit(self, node: ClangJsonNode) -> None:
if (node.get('kind') == 'FunctionDecl' and
node.get('name', '').startswith('mj')):
func_decl = self._make_function(node)
self._functions[func_decl.name] = func_decl
@property
def functions(self) -> Mapping[str, ast_nodes.FunctionDecl]:
return self._functions
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
with open(_JSON_PATH.value, 'r', encoding='utf-8') as f:
root = json.load(f)
with open(_HEADER_PATH.value, 'r') as f:
visitor = MjFunctionVisitor(f.read())
traverse(root, visitor)
functions_str = formatter.format_as_python_code(visitor.functions)
print(f'''
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides information about MuJoCo API functions.
DO NOT EDIT. THIS FILE IS AUTOMATICALLY GENERATED.
"""
from typing import Mapping
from .ast_nodes import ArrayType
from .ast_nodes import FunctionDecl
from .ast_nodes import FunctionParameterDecl
from .ast_nodes import PointerType
from .ast_nodes import ValueType
FUNCTIONS: Mapping[str, FunctionDecl] = {functions_str}
'''.strip()) # `print` adds a trailing newline
if __name__ == '__main__':
app.run(main)
| mujoco-main | introspect/codegen/generate_functions.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for MuJoCo."""
import fnmatch
import logging
import os
import platform
import random
import re
import shutil
import string
import subprocess
import sys
import sysconfig
import setuptools
from setuptools import find_packages
from setuptools import setup
from setuptools.command import build_ext
from setuptools.command import install_scripts
__version__ = '2.3.8'
MUJOCO_CMAKE = 'MUJOCO_CMAKE'
MUJOCO_CMAKE_ARGS = 'MUJOCO_CMAKE_ARGS'
MUJOCO_PATH = 'MUJOCO_PATH'
MUJOCO_PLUGIN_PATH = 'MUJOCO_PLUGIN_PATH'
EXT_PREFIX = 'mujoco.'
def get_long_description():
"""Creates a long description for the package from bundled markdown files."""
current_dir = os.path.dirname('__file__')
with open(os.path.join(current_dir, 'README.md')) as f:
description = f.read()
try:
with open(os.path.join(current_dir, 'LICENSES_THIRD_PARTY.md')) as f:
description = f'{description}\n{f.read()}'
except FileNotFoundError:
pass
return description
def get_mujoco_lib_pattern():
if platform.system() == 'Windows':
return 'mujoco.lib'
elif platform.system() == 'Darwin':
return 'libmujoco.*.dylib'
else:
return 'libmujoco.so.*'
def get_external_lib_patterns():
if platform.system() == 'Windows':
return ['mujoco.dll']
elif platform.system() == 'Darwin':
return ['libmujoco.*.dylib']
else:
return ['libmujoco.so.*']
def get_plugin_lib_patterns():
if platform.system() == 'Windows':
return ['*.dll']
elif platform.system() == 'Darwin':
return ['lib*.dylib']
else:
return ['lib*']
def start_and_end(iterable):
it = iter(iterable)
while True:
try:
first = next(it)
second = next(it)
yield first, second
except StopIteration:
return
def tokenize_quoted_substr(input_string, quote_char, placeholders=None):
"""Replace quoted substrings with random text placeholders with no spaces."""
# Matches quote characters not proceded with a backslash.
pattern = re.compile(r'(?<!\\)' + quote_char)
quote_positions = [m.start() for m in pattern.finditer(input_string)]
if len(quote_positions) % 2:
raise ValueError(f'unbalanced quotes {quote_char}...{quote_char}')
output_string = ''
placeholders = placeholders if placeholders is not None else dict()
prev_end = -1
for start, end in start_and_end(quote_positions):
output_string += input_string[prev_end+1:start]
while True:
placeholder = ''.join(random.choices(string.ascii_lowercase, k=5))
if placeholder not in input_string and placeholder not in output_string:
break
output_string += placeholder
placeholders[placeholder] = input_string[start+1:end]
prev_end = end
output_string += input_string[prev_end+1:]
return output_string, placeholders
def parse_cmake_args_from_environ(env_var_name=MUJOCO_CMAKE_ARGS):
"""Parses CMake arguments from an environment variable."""
raw_args = os.environ.get(env_var_name, '').strip()
unquoted, placeholders = tokenize_quoted_substr(raw_args, '"')
unquoted, placeholders = tokenize_quoted_substr(unquoted, "'", placeholders)
parts = re.split(r'\s+', unquoted.strip())
out = []
for part in parts:
for k, v in placeholders.items():
part = part.replace(k, v)
part = part.replace('\\"', '"').replace("\\'", "'")
if part:
out.append(part)
return out
class CMakeExtension(setuptools.Extension):
"""A Python extension that has been prebuilt by CMake.
We do not want distutils to handle the build process for our extensions, so
so we pass an empty list to the super constructor.
"""
def __init__(self, name):
super().__init__(name, sources=[])
class BuildCMakeExtension(build_ext.build_ext):
"""Uses CMake to build extensions."""
def run(self):
self._is_apple = (platform.system() == 'Darwin')
(self._mujoco_library_path,
self._mujoco_include_path,
self._mujoco_plugins_path,
self._mujoco_framework_path) = self._find_mujoco()
self._configure_cmake()
for ext in self.extensions:
assert ext.name.startswith(EXT_PREFIX)
assert '.' not in ext.name[len(EXT_PREFIX):]
self.build_extension(ext)
self._copy_external_libraries()
self._copy_mujoco_headers()
self._copy_plugin_libraries()
if self._is_apple:
self._copy_mjpython()
def _find_mujoco(self):
if MUJOCO_PATH not in os.environ:
raise RuntimeError(
f'{MUJOCO_PATH} environment variable is not set')
if MUJOCO_PLUGIN_PATH not in os.environ:
raise RuntimeError(
f'{MUJOCO_PLUGIN_PATH} environment variable is not set')
library_path = None
include_path = None
plugin_path = os.environ[MUJOCO_PLUGIN_PATH]
for directory, subdirs, filenames in os.walk(os.environ[MUJOCO_PATH]):
if self._is_apple and 'mujoco.framework' in subdirs:
return (os.path.join(directory, 'mujoco.framework/Versions/A'),
os.path.join(directory, 'mujoco.framework/Headers'),
plugin_path,
directory)
if fnmatch.filter(filenames, get_mujoco_lib_pattern()):
library_path = directory
if os.path.exists(os.path.join(directory, 'mujoco/mujoco.h')):
include_path = directory
if library_path and include_path:
return library_path, include_path, plugin_path, None
raise RuntimeError('Cannot find MuJoCo library and/or include paths')
def _copy_external_libraries(self):
dst = os.path.dirname(self.get_ext_fullpath(self.extensions[0].name))
for directory, _, filenames in os.walk(os.environ[MUJOCO_PATH]):
for pattern in get_external_lib_patterns():
for filename in fnmatch.filter(filenames, pattern):
shutil.copyfile(os.path.join(directory, filename),
os.path.join(dst, filename))
def _copy_plugin_libraries(self):
dst = os.path.join(
os.path.dirname(self.get_ext_fullpath(self.extensions[0].name)),
'plugin')
os.makedirs(dst)
for directory, _, filenames in os.walk(self._mujoco_plugins_path):
for pattern in get_plugin_lib_patterns():
for filename in fnmatch.filter(filenames, pattern):
shutil.copyfile(os.path.join(directory, filename),
os.path.join(dst, filename))
def _copy_mujoco_headers(self):
dst = os.path.join(
os.path.dirname(self.get_ext_fullpath(self.extensions[0].name)),
'include/mujoco')
os.makedirs(dst)
for directory, _, filenames in os.walk(self._mujoco_include_path):
for filename in fnmatch.filter(filenames, '*.h'):
shutil.copyfile(os.path.join(directory, filename),
os.path.join(dst, filename))
def _copy_mjpython(self):
src_dir = os.path.join(os.path.dirname(__file__), 'mujoco/mjpython')
dst_contents_dir = os.path.join(
os.path.dirname(self.get_ext_fullpath(self.extensions[0].name)),
'MuJoCo (mjpython).app/Contents')
os.makedirs(dst_contents_dir)
shutil.copyfile(os.path.join(src_dir, 'Info.plist'),
os.path.join(dst_contents_dir, 'Info.plist'))
dst_bin_dir = os.path.join(dst_contents_dir, 'MacOS')
os.makedirs(dst_bin_dir)
shutil.copyfile(os.path.join(self.build_temp, 'mjpython'),
os.path.join(dst_bin_dir, 'mjpython'))
os.chmod(os.path.join(dst_bin_dir, 'mjpython'), 0o755)
dst_resources_dir = os.path.join(dst_contents_dir, 'Resources')
os.makedirs(dst_resources_dir)
shutil.copyfile(os.path.join(src_dir, 'mjpython.icns'),
os.path.join(dst_resources_dir, 'mjpython.icns'))
def _configure_cmake(self):
"""Check for CMake."""
cmake = os.environ.get(MUJOCO_CMAKE, 'cmake')
build_cfg = 'Debug' if self.debug else 'Release'
cmake_module_path = os.path.join(os.path.dirname(__file__), 'cmake')
cmake_args = [
f'-DPython3_ROOT_DIR:PATH={sys.prefix}',
f'-DPython3_EXECUTABLE:STRING={sys.executable}',
f'-DCMAKE_MODULE_PATH:PATH={cmake_module_path}',
f'-DCMAKE_BUILD_TYPE:STRING={build_cfg}',
f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH={self.build_temp}',
f'-DCMAKE_INTERPROCEDURAL_OPTIMIZATION:BOOL={"OFF" if self.debug else "ON"}',
'-DCMAKE_Fortran_COMPILER:STRING=',
'-DBUILD_TESTING:BOOL=OFF',
]
if self._mujoco_framework_path is not None:
cmake_args.extend([
f'-DMUJOCO_FRAMEWORK_DIR:PATH={self._mujoco_framework_path}',
])
else:
cmake_args.extend([
f'-DMUJOCO_LIBRARY_DIR:PATH={self._mujoco_library_path}',
f'-DMUJOCO_INCLUDE_DIR:PATH={self._mujoco_include_path}',
])
if platform.system() != 'Windows':
cmake_args.extend([
f'-DPython3_LIBRARY={sysconfig.get_paths()["stdlib"]}',
f'-DPython3_INCLUDE_DIR={sysconfig.get_paths()["include"]}',
])
if platform.system() == 'Darwin' and os.environ.get('ARCHFLAGS'):
osx_archs = []
if '-arch x86_64' in os.environ['ARCHFLAGS']:
osx_archs.append('x86_64')
if '-arch arm64' in os.environ['ARCHFLAGS']:
osx_archs.append('arm64')
cmake_args.append(f'-DCMAKE_OSX_ARCHITECTURES={";".join(osx_archs)}')
cmake_args.extend(parse_cmake_args_from_environ())
os.makedirs(self.build_temp, exist_ok=True)
if platform.system() == 'Windows':
cmake_args = [arg.replace('\\', '/') for arg in cmake_args]
print('Configuring CMake with the following arguments:')
for arg in cmake_args:
print(f' {arg}')
subprocess.check_call(
[cmake] + cmake_args +
[os.path.join(os.path.dirname(__file__), 'mujoco')],
cwd=self.build_temp)
print('Building all extensions with CMake')
subprocess.check_call(
[cmake, '--build', '.', f'-j{os.cpu_count()}', '--config', build_cfg],
cwd=self.build_temp)
def build_extension(self, ext):
dest_path = self.get_ext_fullpath(ext.name)
build_path = os.path.join(self.build_temp, os.path.basename(dest_path))
shutil.copyfile(build_path, dest_path)
class InstallScripts(install_scripts.install_scripts):
"""Strips file extension from executable scripts whose names end in `.py`."""
def run(self):
super().run()
oldfiles = self.outfiles
files = set(oldfiles)
self.outfiles = []
for oldfile in oldfiles:
if oldfile.endswith('.py'):
newfile = oldfile[:-3]
else:
newfile = oldfile
renamed = False
if newfile not in files and not os.path.exists(newfile):
if not self.dry_run:
os.rename(oldfile, newfile)
renamed = True
if renamed:
logging.info(
'Renaming %s script to %s',
os.path.basename(oldfile),
os.path.basename(newfile),
)
self.outfiles.append(newfile)
files.remove(oldfile)
files.add(newfile)
else:
self.outfiles.append(oldfile)
def find_data_files(package_dir, patterns):
"""Recursively finds files whose names match the given shell patterns."""
paths = set()
for directory, _, filenames in os.walk(package_dir):
for pattern in patterns:
for filename in fnmatch.filter(filenames, pattern):
# NB: paths must be relative to the package directory.
relative_dirpath = os.path.relpath(directory, package_dir)
paths.add(os.path.join(relative_dirpath, filename))
return list(paths)
SETUP_KWARGS = dict(
name='mujoco',
version=__version__,
author='Google DeepMind',
author_email='[email protected]',
description='MuJoCo Physics Simulator',
long_description=get_long_description(),
long_description_content_type='text/markdown',
url='https://github.com/google-deepmind/mujoco',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering',
],
cmdclass=dict(
build_ext=BuildCMakeExtension,
install_scripts=InstallScripts,
),
ext_modules=[
CMakeExtension('mujoco._callbacks'),
CMakeExtension('mujoco._constants'),
CMakeExtension('mujoco._enums'),
CMakeExtension('mujoco._errors'),
CMakeExtension('mujoco._functions'),
CMakeExtension('mujoco._render'),
CMakeExtension('mujoco._rollout'),
CMakeExtension('mujoco._simulate'),
CMakeExtension('mujoco._structs'),
],
python_requires='>=3.8',
install_requires=[
'absl-py',
'glfw',
'numpy',
'pyopengl',
],
tests_require=[
'absl-py',
'glfw',
'numpy',
'pyopengl',
],
test_suite='mujoco',
packages=find_packages(),
package_data={
'mujoco':
find_data_files(
package_dir='mujoco',
patterns=[
'libmujoco.*.dylib',
'libmujoco*.so.*',
'mujoco.dll',
'include/mujoco/*.h',
]),
},
)
if platform.system() == 'Darwin':
SETUP_KWARGS['scripts'] = ['mujoco/mjpython/mjpython.py']
setup(**SETUP_KWARGS)
| mujoco-main | python/setup.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interactive GUI viewer for MuJoCo."""
import abc
import atexit
import contextlib
import math
import os
import queue
import sys
import threading
import time
from typing import Callable, Optional, Tuple, Union
import weakref
import glfw
import mujoco
from mujoco import _simulate
import numpy as np
if not glfw._glfw: # pylint: disable=protected-access
raise RuntimeError('GLFW dynamic library handle is not available')
else:
_simulate.set_glfw_dlhandle(glfw._glfw._handle) # pylint: disable=protected-access
# Logarithmically spaced realtime slow-down coefficients (percent).
PERCENT_REALTIME = (
100, 80, 66, 50, 40, 33, 25, 20, 16, 13,
10, 8, 6.6, 5, 4, 3.3, 2.5, 2, 1.6, 1.3,
1, 0.8, 0.66, 0.5, 0.4, 0.33, 0.25, 0.2, 0.16, 0.13,
0.1
)
# Maximum time mis-alignment before re-sync.
MAX_SYNC_MISALIGN = 0.1
# Fraction of refresh available for simulation.
SIM_REFRESH_FRACTION = 0.7
CallbackType = Callable[[mujoco.MjModel, mujoco.MjData], None]
LoaderType = Callable[[], Tuple[mujoco.MjModel, mujoco.MjData]]
KeyCallbackType = Callable[[int], None]
# Loader function that also returns a file path for the GUI to display.
_LoaderWithPathType = Callable[[], Tuple[mujoco.MjModel, mujoco.MjData, str]]
_InternalLoaderType = Union[LoaderType, _LoaderWithPathType]
_Simulate = _simulate.Simulate
class Handle:
"""A handle for interacting with a MuJoCo viewer."""
def __init__(
self,
sim: _Simulate,
scn: mujoco.MjvScene,
cam: mujoco.MjvCamera,
opt: mujoco.MjvOption,
pert: mujoco.MjvPerturb,
):
self._sim = weakref.ref(sim)
self._scn = scn
self._cam = cam
self._opt = opt
self._pert = pert
@property
def scn(self):
return self._scn
@property
def cam(self):
return self._cam
@property
def opt(self):
return self._opt
@property
def perturb(self):
return self._pert
def close(self):
sim = self._sim()
if sim is not None:
sim.exit()
def is_running(self) -> bool:
sim = self._sim()
if sim is not None:
return sim.exitrequest < 2
return False
def lock(self):
sim = self._sim()
if sim is not None:
return sim.lock()
return contextlib.nullcontext()
def sync(self):
sim = self._sim()
if sim is not None:
sim.sync() # locks internally
def update_hfield(self, hfieldid: int):
sim = self._sim()
if sim is not None:
sim.update_hfield(hfieldid) # locks internally and blocks until done
def update_mesh(self, meshid: int):
sim = self._sim()
if sim is not None:
sim.update_mesh(meshid) # locks internally and blocks until done
def update_texture(self, texid: int):
sim = self._sim()
if sim is not None:
sim.update_texture(texid) # locks internally and blocks until done
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Abstract base dispatcher class for systems that require UI calls to be made
# on a specific thread (e.g. macOS). This is subclassed by system-specific
# Python launcher (mjpython) to implement the required dispatching mechanism.
class _MjPythonBase(metaclass=abc.ABCMeta):
def launch_on_ui_thread(
self,
model: mujoco.MjModel,
data: mujoco.MjData,
handle_return: Optional['queue.Queue[Handle]'],
key_callback: Optional[KeyCallbackType],
):
pass
# When running under mjpython, the launcher initializes this object.
_MJPYTHON: Optional[_MjPythonBase] = None
def _file_loader(path: str) -> _LoaderWithPathType:
"""Loads an MJCF model from file path."""
def load(path=path) -> Tuple[mujoco.MjModel, mujoco.MjData, str]:
m = mujoco.MjModel.from_xml_path(path)
d = mujoco.MjData(m)
return m, d, path
return load
def _reload(
simulate: _Simulate, loader: _InternalLoaderType,
notify_loaded: Optional[Callable[[], None]] = None
) -> Optional[Tuple[mujoco.MjModel, mujoco.MjData]]:
"""Internal function for reloading a model in the viewer."""
try:
load_tuple = loader()
except Exception as e: # pylint: disable=broad-except
simulate.load_error = str(e)
else:
m, d = load_tuple[:2]
# If the loader does not raise an exception then we assume that it
# successfully created mjModel and mjData. This is specified in the type
# annotation, but we perform a runtime assertion here as well to prevent
# possible segmentation faults.
assert m is not None and d is not None
path = load_tuple[2] if len(load_tuple) == 3 else ''
simulate.load(m, d, path)
if notify_loaded:
notify_loaded()
return m, d
def _physics_loop(simulate: _Simulate, loader: Optional[_InternalLoaderType]):
"""Physics loop for the GUI, to be run in a separate thread."""
m: mujoco.MjModel = None
d: mujoco.MjData = None
ctrl_noise = np.array([])
reload = True
# CPU-sim synchronization point.
synccpu = 0.0
syncsim = 0.0
# Run until asked to exit.
while not simulate.exitrequest:
if simulate.droploadrequest:
simulate.droploadrequest = 0
loader = _file_loader(simulate.dropfilename)
reload = True
if simulate.uiloadrequest:
simulate.uiloadrequest_decrement()
reload = True
if reload and loader is not None:
result = _reload(simulate, loader)
if result is not None:
m, d = result
ctrl_noise = np.zeros((m.nu,))
reload = False
# Sleep for 1 ms or yield, to let main thread run.
if simulate.run != 0 and simulate.busywait != 0:
time.sleep(0)
else:
time.sleep(0.001)
with simulate.lock():
if m is not None:
assert d is not None
if simulate.run:
# Record CPU time at start of iteration.
startcpu = time.time()
elapsedcpu = startcpu - synccpu
elapsedsim = d.time - syncsim
# Inject noise.
if simulate.ctrl_noise_std != 0.0:
# Convert rate and scale to discrete time (Ornstein–Uhlenbeck).
rate = math.exp(-m.opt.timestep /
max(simulate.ctrl_noise_rate, mujoco.mjMINVAL))
scale = simulate.ctrl_noise_std * math.sqrt(1 - rate * rate)
for i in range(m.nu):
# Update noise.
ctrl_noise[i] = (rate * ctrl_noise[i] +
scale * mujoco.mju_standardNormal(None))
# Apply noise.
d.ctrl[i] = ctrl_noise[i]
# Requested slow-down factor.
slowdown = 100 / PERCENT_REALTIME[simulate.real_time_index]
# Misalignment: distance from target sim time > MAX_SYNC_MISALIGN.
misaligned = abs(elapsedcpu / slowdown -
elapsedsim) > MAX_SYNC_MISALIGN
# Out-of-sync (for any reason): reset sync times, step.
if (elapsedsim < 0 or elapsedcpu < 0 or synccpu == 0 or misaligned or
simulate.speed_changed):
# Re-sync.
synccpu = startcpu
syncsim = d.time
simulate.speed_changed = False
# Run single step, let next iteration deal with timing.
mujoco.mj_step(m, d)
# In-sync: step until ahead of cpu.
else:
measured = False
prevsim = d.time
refreshtime = SIM_REFRESH_FRACTION / simulate.refresh_rate
# Step while sim lags behind CPU and within refreshtime.
while (((d.time - syncsim) * slowdown <
(time.time() - synccpu)) and
((time.time() - startcpu) < refreshtime)):
# Measure slowdown before first step.
if not measured and elapsedsim:
simulate.measured_slowdown = elapsedcpu / elapsedsim
measured = True
# Call mj_step.
mujoco.mj_step(m, d)
# Break if reset.
if d.time < prevsim:
break
else: # simulate.run is False: GUI is paused.
# Run mj_forward, to update rendering and joint sliders.
mujoco.mj_forward(m, d)
def _launch_internal(
model: Optional[mujoco.MjModel] = None,
data: Optional[mujoco.MjData] = None,
*,
run_physics_thread: bool,
loader: Optional[_InternalLoaderType] = None,
handle_return: Optional['queue.Queue[Handle]'] = None,
key_callback: Optional[KeyCallbackType] = None,
) -> None:
"""Internal API, so that the public API has more readable type annotations."""
if model is None and data is not None:
raise ValueError('mjData is specified but mjModel is not')
elif callable(model) and data is not None:
raise ValueError(
'mjData should not be specified when an mjModel loader is used')
elif loader is not None and model is not None:
raise ValueError('model and loader are both specified')
elif run_physics_thread and handle_return is not None:
raise ValueError('run_physics_thread and handle_return are both specified')
if loader is None and model is not None:
def _loader(m=model, d=data) -> Tuple[mujoco.MjModel, mujoco.MjData]:
if d is None:
d = mujoco.MjData(m)
return m, d
loader = _loader
if model and not run_physics_thread:
scn = mujoco.MjvScene(model, _Simulate.MAX_GEOM)
else:
scn = mujoco.MjvScene()
cam = mujoco.MjvCamera()
opt = mujoco.MjvOption()
pert = mujoco.MjvPerturb()
simulate = _Simulate(scn, cam, opt, pert, run_physics_thread, key_callback)
# Initialize GLFW if not using mjpython.
if _MJPYTHON is None:
if not glfw.init():
raise mujoco.FatalError('could not initialize GLFW')
atexit.register(glfw.terminate)
notify_loaded = None
if handle_return:
notify_loaded = (
lambda: handle_return.put_nowait(Handle(simulate, scn, cam, opt, pert)))
if run_physics_thread:
side_thread = threading.Thread(
target=_physics_loop, args=(simulate, loader))
else:
side_thread = threading.Thread(
target=_reload, args=(simulate, loader, notify_loaded))
def make_exit(simulate):
def exit_simulate():
simulate.exit()
return exit_simulate
exit_simulate = make_exit(simulate)
atexit.register(exit_simulate)
side_thread.start()
simulate.render_loop()
atexit.unregister(exit_simulate)
side_thread.join()
simulate.destroy()
def launch(model: Optional[mujoco.MjModel] = None,
data: Optional[mujoco.MjData] = None,
*,
loader: Optional[LoaderType] = None) -> None:
"""Launches the Simulate GUI."""
_launch_internal(
model, data, run_physics_thread=True, loader=loader)
def launch_from_path(path: str) -> None:
"""Launches the Simulate GUI from file path."""
_launch_internal(run_physics_thread=True, loader=_file_loader(path))
def launch_passive(
model: mujoco.MjModel,
data: mujoco.MjData,
*,
key_callback: Optional[KeyCallbackType] = None,
) -> Handle:
"""Launches a passive Simulate GUI without blocking the running thread."""
if not isinstance(model, mujoco.MjModel):
raise ValueError(f'`model` is not a mujoco.MjModel: got {model!r}')
if not isinstance(data, mujoco.MjData):
raise ValueError(f'`data` is not a mujoco.MjData: got {data!r}')
if key_callback is not None and not callable(key_callback):
raise ValueError(
f'`key_callback` is not callable: got {key_callback!r}')
mujoco.mj_forward(model, data)
handle_return = queue.Queue(1)
if sys.platform != 'darwin':
thread = threading.Thread(
target=_launch_internal,
args=(model, data),
kwargs=dict(
run_physics_thread=False,
handle_return=handle_return,
key_callback=key_callback,
),
)
thread.daemon = True
thread.start()
else:
if not isinstance(_MJPYTHON, _MjPythonBase):
raise RuntimeError(
'`launch_passive` requires that the Python script be run under '
'`mjpython` on macOS')
_MJPYTHON.launch_on_ui_thread(model, data, handle_return, key_callback)
return handle_return.get()
if __name__ == '__main__':
# pylint: disable=g-bad-import-order
from absl import app # pylint: disable=g-import-not-at-top
from absl import flags # pylint: disable=g-import-not-at-top
_MJCF_PATH = flags.DEFINE_string('mjcf', None, 'Path to MJCF file.')
def main(argv) -> None:
del argv
if _MJCF_PATH.value is not None:
launch_from_path(os.path.expanduser(_MJCF_PATH.value))
else:
launch()
app.run(main)
| mujoco-main | python/mujoco/viewer.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines a renderer class for the MuJoCo Python native bindings."""
from typing import Optional, Union
from mujoco import _enums
from mujoco import _functions
from mujoco import _render
from mujoco import _structs
from mujoco import gl_context
import numpy as np
class Renderer:
"""Renders MuJoCo scenes."""
def __init__(
self,
model: _structs.MjModel,
height: int = 240,
width: int = 320,
max_geom: int = 10000
) -> None:
"""Initializes a new `Renderer`.
Args:
model: an MjModel instance.
height: image height in pixels.
width: image width in pixels.
max_geom: Optional integer specifying the maximum number of geoms that can
be rendered in the same scene. If None this will be chosen automatically
based on the estimated maximum number of renderable geoms in the model.
Raises:
ValueError: If `camera_id` is outside the valid range, or if `width` or
`height` exceed the dimensions of MuJoCo's offscreen framebuffer.
"""
buffer_width = model.vis.global_.offwidth
buffer_height = model.vis.global_.offheight
if width > buffer_width:
raise ValueError(f"""
Image width {width} > framebuffer width {buffer_width}. Either reduce the image
width or specify a larger offscreen framebuffer in the model XML using the
clause:
<visual>
<global offwidth="my_width"/>
</visual>""".lstrip())
if height > buffer_height:
raise ValueError(f"""
Image height {height} > framebuffer height {buffer_height}. Either reduce the
image height or specify a larger offscreen framebuffer in the model XML using
the clause:
<visual>
<global offheight="my_height"/>
</visual>""".lstrip())
self._width = width
self._height = height
self._model = model
self._scene = _structs.MjvScene(model=model, maxgeom=max_geom)
self._scene_option = _structs.MjvOption()
self._rect = _render.MjrRect(0, 0, self._width, self._height)
# Create render contexts.
# TODO(nimrod): Figure out why pytype doesn't like gl_context.GLContext
self._gl_context = gl_context.GLContext(width, height) # type: ignore
self._gl_context.make_current()
self._mjr_context = _render.MjrContext(
model, _enums.mjtFontScale.mjFONTSCALE_150.value
)
_render.mjr_setBuffer(
_enums.mjtFramebuffer.mjFB_OFFSCREEN.value, self._mjr_context
)
# Default render flags.
self._depth_rendering = False
self._segmentation_rendering = False
@property
def model(self):
return self._model
@property
def scene(self) -> _structs.MjvScene:
return self._scene
@property
def height(self):
return self._height
@property
def width(self):
return self._width
def enable_depth_rendering(self):
self._segmentation_rendering = False
self._depth_rendering = True
def disable_depth_rendering(self):
self._depth_rendering = False
def enable_segmentation_rendering(self):
self._segmentation_rendering = True
self._depth_rendering = False
def disable_segmentation_rendering(self):
self._segmentation_rendering = False
def render(self, *, out: Optional[np.ndarray] = None) -> np.ndarray:
"""Renders the scene as a numpy array of pixel values.
Args:
out: Alternative output array in which to place the resulting pixels. It
must have the same shape as the expected output but the type will be
cast if necessary. The expted shape depends on the value of
`self._depth_rendering`: when `True`, we expect `out.shape == (width,
height)`, and `out.shape == (width, height, 3)` when `False`.
Returns:
A new numpy array holding the pixels with shape `(H, W)` or `(H, W, 3)`,
depending on the value of `self._depth_rendering` unless
`out is None`, in which case a reference to `out` is returned.
"""
original_flags = self._scene.flags.copy()
if self._segmentation_rendering:
self._scene.flags[_enums.mjtRndFlag.mjRND_SEGMENT] = True
self._scene.flags[_enums.mjtRndFlag.mjRND_IDCOLOR] = True
self._gl_context.make_current()
if self._depth_rendering:
out_shape = (self._height, self._width)
out_dtype = np.float32
else:
out_shape = (self._height, self._width, 3)
out_dtype = np.uint8
if out is None:
out = np.empty(out_shape, dtype=out_dtype)
else:
if out.shape != out_shape:
raise ValueError(
f'Expected `out.shape == {out_shape}`. Got `out.shape={out.shape}`'
' instead. When using depth rendering, the out array should be of'
' shape `(width, height)` and otherwise (width, height, 3).'
f' Got `(self.height, self.width)={(self.height, self.width)}` and'
f' `self._depth_rendering={self._depth_rendering}`.'
)
# Render scene and read contents of RGB and depth buffers.
_render.mjr_render(self._rect, self._scene, self._mjr_context)
if self._depth_rendering:
_render.mjr_readPixels(None, out, self._rect, self._mjr_context)
# Get the distances to the near and far clipping planes.
extent = self._model.stat.extent
near = self._model.vis.map.znear * extent
far = self._model.vis.map.zfar * extent
# Convert from [0 1] to depth in units of length, see links below:
# http://stackoverflow.com/a/6657284/1461210
# https://www.khronos.org/opengl/wiki/Depth_Buffer_Precision
out = near / (1 - out * (1 - near / far))
elif self._segmentation_rendering:
_render.mjr_readPixels(out, None, self._rect, self._mjr_context)
# Convert 3-channel uint8 to 1-channel uint32.
image3 = out.astype(np.uint32)
segimage = (
image3[:, :, 0]
+ image3[:, :, 1] * (2**8)
+ image3[:, :, 2] * (2**16)
)
# Remap segid to 2-channel (object ID, object type) pair.
# Seg ID 0 is background -- will be remapped to (-1, -1).
ngeoms = self._scene.ngeom
segid2output = np.full(
(ngeoms + 1, 2), fill_value=-1, dtype=np.int32
) # Seg id cannot be > ngeom + 1.
visible_geoms = [g for g in self._scene.geoms[:ngeoms] if g.segid != -1]
visible_segids = np.array([g.segid + 1 for g in visible_geoms], np.int32)
visible_objid = np.array([g.objid for g in visible_geoms], np.int32)
visible_objtype = np.array([g.objtype for g in visible_geoms], np.int32)
segid2output[visible_segids, 0] = visible_objid
segid2output[visible_segids, 1] = visible_objtype
out = segid2output[segimage]
# Reset scene flags.
np.copyto(self._scene.flags, original_flags)
else:
_render.mjr_readPixels(out, None, self._rect, self._mjr_context)
out[:] = np.flipud(out)
return out
def update_scene(
self,
data: _structs.MjData,
camera: Union[int, str, _structs.MjvCamera] = -1,
scene_option: Optional[_structs.MjvOption] = None
):
"""Updates geometry used for rendering.
Args:
data: An instance of `MjData`.
camera: An instance of `MjvCamera`, a string or an integer
scene_option: A custom `MjvOption` instance to use to render
the scene instead of the default.
Raises:
ValueError: If `camera_id` is outside the valid range, or if camera does
not exist.
"""
if not isinstance(camera, _structs.MjvCamera):
camera_id = camera
if isinstance(camera_id, str):
camera_id = _functions.mj_name2id(
self._model, _enums.mjtObj.mjOBJ_CAMERA.value, camera_id
)
if camera_id == -1:
raise ValueError(f'The camera "{camera}" does not exist.')
if camera_id < -1 or camera_id >= self._model.ncam:
raise ValueError(f'The camera id {camera_id} is out of'
f' range [-1, {self._model.ncam}).')
# Render camera.
camera = _structs.MjvCamera()
camera.fixedcamid = camera_id
# Defaults to mjCAMERA_FREE, otherwise mjCAMERA_FIXED refers to a
# camera explicitly defined in the model.
if camera_id == -1:
camera.type = _enums.mjtCamera.mjCAMERA_FREE
_functions.mjv_defaultFreeCamera(self._model, camera)
else:
camera.type = _enums.mjtCamera.mjCAMERA_FIXED
scene_option = scene_option or self._scene_option
_functions.mjv_updateScene(
self._model,
data,
scene_option,
None,
camera, _enums.mjtCatBit.mjCAT_ALL.value,
self._scene,
)
| mujoco-main | python/mujoco/renderer.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MuJoCo renderer."""
from absl.testing import absltest
from absl.testing import parameterized
import mujoco
import numpy as np
@absltest.skipUnless(hasattr(mujoco, 'GLContext'),
'MuJoCo rendering is disabled')
class MuJoCoRendererTest(parameterized.TestCase):
def test_renderer_unknown_camera_name(self):
xml = """
<mujoco>
<worldbody>
<camera name="a"/>
</worldbody>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model, 50, 50)
mujoco.mj_forward(model, data)
with self.assertRaisesRegex(ValueError, r'camera "b" does not exist'):
renderer.update_scene(data, 'b')
def test_renderer_camera_under_range(self):
xml = """
<mujoco>
<worldbody>
<camera name="a"/>
</worldbody>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model, 50, 50)
mujoco.mj_forward(model, data)
with self.assertRaisesRegex(ValueError, '-2 is out of range'):
renderer.update_scene(data, -2)
def test_renderer_camera_over_range(self):
xml = """
<mujoco>
<worldbody>
<camera name="a"/>
</worldbody>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model, 50, 50)
mujoco.mj_forward(model, data)
with self.assertRaisesRegex(ValueError, '1 is out of range'):
renderer.update_scene(data, 1)
def test_renderer_renders_scene(self):
xml = """
<mujoco>
<worldbody>
<camera name="closeup" pos="0 -6 0" xyaxes="1 0 0 0 1 100"/>
<geom name="white_box" type="box" size="1 1 1" rgba="1 1 1 1"/>
</worldbody>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model, 50, 50)
mujoco.mj_forward(model, data)
renderer.update_scene(data, 'closeup')
pixels = renderer.render().flatten()
not_all_black = False
# Pixels should all be a neutral color.
for pixel in pixels:
if pixel > 0:
not_all_black = True
break
self.assertTrue(not_all_black)
def test_renderer_output_without_out(self):
xml = """
<mujoco>
<worldbody>
<camera name="closeup" pos="0 -6 0" xyaxes="1 0 0 0 1 100"/>
<geom name="white_box" type="box" size="1 1 1" rgba="1 1 1 1"/>
</worldbody>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
mujoco.mj_forward(model, data)
renderer = mujoco.Renderer(model, 50, 50)
renderer.update_scene(data, 'closeup')
pixels = [renderer.render()]
colors = (
(1.0, 0.0, 0.0, 1.0),
(0.0, 1.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
)
for i, color in enumerate(colors):
model.geom_rgba[0, :] = color
mujoco.mj_forward(model, data)
renderer.update_scene(data, 'closeup')
pixels.append(renderer.render())
self.assertIsNot(pixels[-2], pixels[-1])
# Pixels should change over steps.
self.assertFalse((pixels[i + 1] == pixels[i]).all())
def test_renderer_output_with_out(self):
xml = """
<mujoco>
<worldbody>
<camera name="closeup" pos="0 -6 0" xyaxes="1 0 0 0 1 100"/>
<geom name="white_box" type="box" size="1 1 1" rgba="1 1 1 1"/>
</worldbody>
</mujoco>
"""
render_size = (50, 50)
render_out = np.zeros((*render_size, 3), np.uint8)
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
mujoco.mj_forward(model, data)
renderer = mujoco.Renderer(model, *render_size)
renderer.update_scene(data, 'closeup')
self.assertTrue(np.all(render_out == 0))
pixels = renderer.render(out=render_out)
# Pixels should always refer to the same `render_out` array.
self.assertIs(pixels, render_out)
self.assertFalse(np.all(render_out == 0))
failing_render_size = (10, 10)
self.assertNotEqual(failing_render_size, render_size)
with self.assertRaises(ValueError):
pixels = renderer.render(
out=np.zeros((*failing_render_size, 3), np.uint8)
)
if __name__ == '__main__':
absltest.main()
| mujoco-main | python/mujoco/renderer_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python bindings for MuJoCo."""
import ctypes
import ctypes.util
import os
import platform
import subprocess
import warnings
_SYSTEM = platform.system()
if _SYSTEM == 'Windows':
ctypes.WinDLL(os.path.join(os.path.dirname(__file__), 'mujoco.dll'))
elif _SYSTEM == 'Darwin':
proc_translated = subprocess.run(
['sysctl', '-n', 'sysctl.proc_translated'], capture_output=True).stdout
try:
is_rosetta = bool(int(proc_translated))
except ValueError:
is_rosetta = False
if is_rosetta and platform.machine() == 'x86_64':
raise ImportError(
'You are running an x86_64 build of Python on an Apple Silicon '
'machine. This is not supported by MuJoCo. Please install and run a '
'native, arm64 build of Python.')
from mujoco._callbacks import *
from mujoco._constants import *
from mujoco._enums import *
from mujoco._errors import *
from mujoco._functions import *
from mujoco._render import *
from mujoco._structs import *
from mujoco.gl_context import *
from mujoco.renderer import Renderer
HEADERS_DIR = os.path.join(os.path.dirname(__file__), 'include/mujoco')
PLUGINS_DIR = os.path.join(os.path.dirname(__file__), 'plugin')
PLUGIN_HANDLES = []
def _load_all_bundled_plugins():
for directory, _, filenames in os.walk(PLUGINS_DIR):
for filename in filenames:
if os.path.splitext(filename)[-1] in [".dll", ".dylib", ".so"]:
PLUGIN_HANDLES.append(ctypes.CDLL(os.path.join(directory, filename)))
elif filename == "__init__.py":
pass
else:
warnings.warn('Ignoring non-library in plugin directory: '
f'{os.path.join(directory, filename)}', ImportWarning)
_load_all_bundled_plugins()
__version__ = mj_versionString() # pylint: disable=undefined-variable
| mujoco-main | python/mujoco/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Roll out open-loop trajectories from initial states, get subsequent states and sensor values."""
from mujoco import _rollout
import numpy as np
def rollout(model, data, initial_state=None, ctrl=None,
*, # require following arguments to be named
skip_checks=False,
nstate=None,
nstep=None,
initial_time=None,
initial_warmstart=None,
qfrc_applied=None,
xfrc_applied=None,
mocap=None,
state=None,
sensordata=None):
"""Roll out open-loop trajectories from initial states, get subsequent states and sensor values.
This function serves as a Python wrapper for the C++ functionality in
`rollout.cc`, please see documentation therein. This python funtion will
infer `nstate` and `nstep`, tile input arguments with singleton dimensions,
and allocate output arguments if none are given.
"""
# don't infer nstate/nstep, don't support singleton expansion, don't allocate
# output arrays, just call rollout
if skip_checks:
_rollout.rollout(model, data, nstate, nstep, initial_state, initial_time,
initial_warmstart, ctrl, qfrc_applied, xfrc_applied, mocap,
state, sensordata)
return state, sensordata
# check types
if nstate and not isinstance(nstate, int):
raise ValueError('nstate must be an integer')
if nstep and not isinstance(nstep, int):
raise ValueError('nstep must be an integer')
_check_must_be_numeric(
initial_state=initial_state,
initial_time=initial_time,
initial_warmstart=initial_warmstart,
ctrl=ctrl,
qfrc_applied=qfrc_applied,
xfrc_applied=xfrc_applied,
mocap=mocap,
state=state,
sensordata=sensordata)
# check number of dimensions
_check_number_of_dimensions(2,
initial_state=initial_state,
initial_time=initial_time,
initial_warmstart=initial_warmstart)
_check_number_of_dimensions(3,
ctrl=ctrl,
qfrc_applied=qfrc_applied,
xfrc_applied=xfrc_applied,
mocap=mocap,
state=state,
sensordata=sensordata)
# ensure 2D, make contiguous, row-major (C ordering)
initial_state = _ensure_2d(initial_state)
initial_time = _ensure_2d(initial_time)
initial_warmstart = _ensure_2d(initial_warmstart)
# ensure 3D, make contiguous, row-major (C ordering)
ctrl = _ensure_3d(ctrl)
qfrc_applied = _ensure_3d(qfrc_applied)
xfrc_applied = _ensure_3d(xfrc_applied)
mocap = _ensure_3d(mocap)
state = _ensure_3d(state)
sensordata = _ensure_3d(sensordata)
# check trailing dimensions
_check_trailing_dimension(model.nq + model.nv + model.na,
initial_state=initial_state, state=state)
_check_trailing_dimension(1, initial_time=initial_time)
_check_trailing_dimension(model.nu, ctrl=ctrl)
_check_trailing_dimension(model.nv, qfrc_applied=qfrc_applied)
_check_trailing_dimension(model.nbody*6, xfrc_applied=xfrc_applied)
_check_trailing_dimension(model.nmocap*7, mocap=mocap)
_check_trailing_dimension(model.nsensordata, sensordata=sensordata)
# infer nstate, check for incompatibilities
nstate = _infer_dimension(0, nstate or 1,
initial_state=initial_state,
initial_time=initial_time,
initial_warmstart=initial_warmstart,
ctrl=ctrl,
qfrc_applied=qfrc_applied,
xfrc_applied=xfrc_applied,
mocap=mocap,
state=state,
sensordata=sensordata)
# infer nstep, check for incompatibilities
nstep = _infer_dimension(1, nstep or 1,
ctrl=ctrl,
qfrc_applied=qfrc_applied,
xfrc_applied=xfrc_applied,
mocap=mocap,
state=state,
sensordata=sensordata)
# tile input arrays if required (singleton expansion)
initial_state = _tile_if_required(initial_state, nstate)
initial_time = _tile_if_required(initial_time, nstate)
initial_warmstart = _tile_if_required(initial_warmstart, nstate)
ctrl = _tile_if_required(ctrl, nstate, nstep)
qfrc_applied = _tile_if_required(qfrc_applied, nstate, nstep)
xfrc_applied = _tile_if_required(xfrc_applied, nstate, nstep)
mocap = _tile_if_required(mocap, nstate, nstep)
# allocate output if not provided
if state is None:
state = np.empty((nstate, nstep, model.nq + model.nv + model.na))
if sensordata is None:
sensordata = np.empty((nstate, nstep, model.nsensordata))
# call rollout
_rollout.rollout(model, data, nstate, nstep, initial_state, initial_time,
initial_warmstart, ctrl, qfrc_applied, xfrc_applied, mocap,
state, sensordata)
# return squeezed outputs
return state.squeeze(), sensordata.squeeze()
def _check_must_be_numeric(**kwargs):
for key, value in kwargs.items():
if value is None:
continue
if not isinstance(value, np.ndarray) and not isinstance(value, float):
raise ValueError(f'{key} must be a numpy array or float')
def _check_number_of_dimensions(ndim, **kwargs):
for key, value in kwargs.items():
if value is None:
continue
if value.ndim > ndim:
raise ValueError(f'{key} can have at most {ndim} dimensions')
def _check_trailing_dimension(dim, **kwargs):
for key, value in kwargs.items():
if value is None:
continue
if value.shape[-1] != dim:
raise ValueError(f'trailing dimension of {key} must be {dim}, got {value.shape[-1]}')
def _ensure_2d(arg):
if arg is None:
return None
else:
return np.ascontiguousarray(np.atleast_2d(arg), dtype=np.float64)
def _ensure_3d(arg):
if arg is None:
return None
else:
# np.atleast_3d adds both leading and trailing dims, we want only leading
if arg.ndim == 0:
arg = arg[np.newaxis, np.newaxis, np.newaxis, ...]
elif arg.ndim == 1:
arg = arg[np.newaxis, np.newaxis, ...]
elif arg.ndim == 2:
arg = arg[np.newaxis, ...]
return np.ascontiguousarray(arg, dtype=np.float64)
def _infer_dimension(dim, value, **kwargs):
for name, array in kwargs.items():
if array is None:
continue
if array.shape[dim] != value:
if value == 1:
value = array.shape[dim]
elif array.shape[dim] != 1:
raise ValueError(
f'dimension {dim} inferred as {value} but {name} has {array.shape[dim]}'
)
return value
def _tile_if_required(array, dim0, dim1=None):
if array is None:
return
reps = np.ones(array.ndim, dtype=int)
if array.shape[0] == 1:
reps[0] = dim0
if dim1 is not None and array.shape[1] == 1:
reps[1] = dim1
return np.tile(array, reps)
| mujoco-main | python/mujoco/rollout.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tests for rollout function."""
from absl.testing import absltest
from absl.testing import parameterized
import mujoco
import numpy as np
import concurrent.futures
import threading
from mujoco import rollout
#--------------------------- models used for testing ---------------------------
TEST_XML = r"""
<mujoco>
<worldbody>
<light pos="0 0 2"/>
<geom type="plane" size="5 5 .1"/>
<body pos="0 0 .1">
<joint name="yaw" axis="0 0 1"/>
<joint name="pitch" axis="0 1 0"/>
<geom type="capsule" size=".02" fromto="0 0 0 1 0 0"/>
<geom type="box" pos="1 0 0" size=".1 .1 .1"/>
<site name="site" pos="1 0 0"/>
</body>
</worldbody>
<actuator>
<general joint="pitch" gainprm="100"/>
<general joint="yaw" dyntype="filter" dynprm="1" gainprm="100"/>
</actuator>
<sensor>
<accelerometer site="site"/>
</sensor>
</mujoco>
"""
TEST_XML_NO_SENSORS = r"""
<mujoco>
<worldbody>
<light pos="0 0 2"/>
<geom type="plane" size="5 5 .1"/>
<body pos="0 0 .1">
<joint name="yaw" axis="0 0 1"/>
<joint name="pitch" axis="0 1 0"/>
<geom type="capsule" size=".02" fromto="0 0 0 1 0 0"/>
<geom type="box" pos="1 0 0" size=".1 .1 .1"/>
<site name="site" pos="1 0 0"/>
</body>
</worldbody>
<actuator>
<general joint="pitch" gainprm="100"/>
<general joint="yaw" dyntype="filter" dynprm="1" gainprm="100"/>
</actuator>
</mujoco>
"""
TEST_XML_NO_ACTUATORS = r"""
<mujoco>
<worldbody>
<light pos="0 0 2"/>
<geom type="plane" size="5 5 .1"/>
<body pos="0 0 .1">
<joint name="yaw" axis="0 0 1"/>
<joint name="pitch" axis="0 1 0"/>
<geom type="capsule" size=".02" fromto="0 0 0 1 0 0"/>
<geom type="box" pos="1 0 0" size=".1 .1 .1"/>
<site name="site" pos="1 0 0"/>
</body>
</worldbody>
<sensor>
<accelerometer site="site"/>
</sensor>
</mujoco>
"""
TEST_XML_MOCAP = r"""
<mujoco>
<worldbody>
<body name="1" mocap="true">
</body>
<body name="2" mocap="true">
</body>
</worldbody>
<sensor>
<framepos objtype="xbody" objname="1"/>
<framequat objtype="xbody" objname="2"/>
</sensor>
</mujoco>
"""
TEST_XML_EMPTY = r"""
<mujoco>
</mujoco>
"""
ALL_MODELS = {'TEST_XML': TEST_XML,
'TEST_XML_NO_SENSORS': TEST_XML_NO_SENSORS,
'TEST_XML_NO_ACTUATORS': TEST_XML_NO_ACTUATORS,
'TEST_XML_EMPTY': TEST_XML_EMPTY}
#------------------------------- tests -----------------------------------------
class MuJoCoRolloutTest(parameterized.TestCase):
def setUp(self):
super().setUp()
np.random.seed(42)
#----------------------------- test basic operation
@parameterized.parameters(ALL_MODELS.keys())
def test_single_step(self, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
mujoco.mj_resetData(model, data)
py_state, py_sensordata = step(model, data, initial_state, ctrl=ctrl)
np.testing.assert_array_equal(state, py_state)
np.testing.assert_array_equal(sensordata, py_sensordata)
@parameterized.parameters(ALL_MODELS.keys())
def test_single_rollout(self, model_name):
nstep = 3 # number of timesteps
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(nstep, model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
py_state, py_sensordata = single_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(state, np.asarray(py_state))
np.testing.assert_array_equal(sensordata, np.asarray(py_sensordata))
@parameterized.parameters(ALL_MODELS.keys())
def test_multi_step(self, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
nstate = 5 # number of initial states
initial_state = np.random.randn(nstate, model.nq + model.nv + model.na)
ctrl = np.random.randn(nstate, 1, model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
mujoco.mj_resetData(model, data)
py_state, py_sensordata = multi_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(state, py_state)
np.testing.assert_array_equal(sensordata, py_sensordata)
@parameterized.parameters(ALL_MODELS.keys())
def test_single_rollout_fixed_ctrl(self, model_name):
nstep = 3
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(model.nu)
state = np.empty((nstep, model.nq + model.nv + model.na))
sensordata = np.empty((nstep, model.nsensordata))
rollout.rollout(model, data, initial_state, ctrl,
state=state, sensordata=sensordata)
ctrl = np.tile(ctrl, (nstep, 1)) # repeat??
py_state, py_sensordata = single_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(state, py_state)
np.testing.assert_array_equal(sensordata, py_sensordata)
@parameterized.parameters(ALL_MODELS.keys())
def test_multi_rollout(self, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
nstate = 2 # number of initial states
nstep = 3 # number of timesteps
initial_state = np.random.randn(nstate, model.nq + model.nv + model.na)
ctrl = np.random.randn(nstate, nstep, model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
py_state, py_sensordata = multi_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(py_state, py_state)
np.testing.assert_array_equal(py_sensordata, py_sensordata)
@parameterized.parameters(ALL_MODELS.keys())
def test_multi_rollout_fixed_ctrl_infer_from_output(self, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
nstate = 2 # number of initial states
nstep = 3 # number of timesteps
initial_state = np.random.randn(nstate, model.nq + model.nv + model.na)
ctrl = np.random.randn(nstate, 1, model.nu) # 1 control in the time dimension
state = np.empty((nstate, nstep, model.nq + model.nv + model.na))
state, sensordata = rollout.rollout(model, data, initial_state, ctrl,
state=state)
ctrl = np.repeat(ctrl, nstep, axis=1)
py_state, py_sensordata = multi_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(state, py_state)
np.testing.assert_array_equal(sensordata, py_sensordata)
@parameterized.product(arg_nstep=[[3, 1, 1], [3, 3, 1], [3, 1, 3]],
model_name=list(ALL_MODELS.keys()))
def test_multi_rollout_multiple_inputs(self, arg_nstep, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
nstate = 4 # number of initial states
initial_state = np.random.randn(nstate, model.nq + model.nv + model.na)
# arg_nstep is the horizon for {ctrl, qfrc_applied, xfrc_applied}, respectively
ctrl = np.random.randn(nstate, arg_nstep[0], model.nu)
qfrc_applied = np.random.randn(nstate, arg_nstep[1], model.nv)
xfrc_applied = np.random.randn(nstate, arg_nstep[2], model.nbody*6)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl,
qfrc_applied=qfrc_applied,
xfrc_applied=xfrc_applied)
# tile singleton arguments
nstep = max(arg_nstep)
if arg_nstep[0] == 1:
ctrl = np.repeat(ctrl, nstep, axis=1)
if arg_nstep[1] == 1:
qfrc_applied = np.repeat(qfrc_applied, nstep, axis=1)
if arg_nstep[2] == 1:
xfrc_applied = np.repeat(xfrc_applied, nstep, axis=1)
py_state, py_sensordata = multi_rollout(model, data, initial_state,
ctrl=ctrl,
qfrc_applied=qfrc_applied,
xfrc_applied=xfrc_applied)
np.testing.assert_array_equal(state, py_state)
np.testing.assert_array_equal(sensordata, py_sensordata)
#----------------------------- test threaded operation
def test_threading(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
num_workers = 32
nstate = 10000
nstep = 5
initial_state = np.random.randn(nstate, model.nq+model.nv+model.na)
state = np.zeros((nstate, nstep, model.nq+model.nv+model.na))
sensordata = np.zeros((nstate, nstep, model.nsensordata))
ctrl = np.random.randn(nstate, nstep, model.nu)
thread_local = threading.local()
def thread_initializer():
thread_local.data = mujoco.MjData(model)
def call_rollout(initial_state, ctrl, state):
rollout.rollout(model, thread_local.data, skip_checks=True,
nstate=initial_state.shape[0], nstep=nstep,
initial_state=initial_state, ctrl=ctrl, state=state)
n = initial_state.shape[0] // num_workers # integer division
chunks = [] # a list of tuples, one per worker
for i in range(num_workers-1):
chunks.append(
(initial_state[i*n:(i+1)*n], ctrl[i*n:(i+1)*n], state[i*n:(i+1)*n]))
# last chunk, absorbing the remainder:
chunks.append(
(initial_state[(num_workers-1)*n:], ctrl[(num_workers-1)*n:],
state[(num_workers-1)*n:]))
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers, initializer=thread_initializer) as executor:
futures = []
for chunk in chunks:
futures.append(executor.submit(call_rollout, *chunk))
for future in concurrent.futures.as_completed(futures):
future.result()
data = mujoco.MjData(model)
py_state, py_sensordata = multi_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(state, py_state)
#----------------------------- test advanced operation
def test_time(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
data = mujoco.MjData(model)
nstate = 1
nstep = 3
initial_time = np.array([[2.]])
initial_state = np.random.randn(nstate, model.nq + model.nv + model.na)
ctrl = np.random.randn(nstate, nstep, model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl,
initial_time=initial_time)
self.assertAlmostEqual(data.time, 2 + nstep*model.opt.timestep)
def test_warmstart(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
data = mujoco.MjData(model)
state0 = np.zeros(model.nq + model.nv + model.na)
ctrl = np.zeros(model.nu)
state1, _ = step(model, data, state0, ctrl=ctrl)
initial_warmstart = data.qacc_warmstart.copy()
state2, _ = step(model, data, state1, ctrl=ctrl)
state, _ = rollout.rollout(model, data, state1, ctrl)
assert np.linalg.norm(state-state2) > 0
state, _ = rollout.rollout(model, data, state1, ctrl,
initial_warmstart=initial_warmstart)
np.testing.assert_array_equal(state, state2)
def test_mocap(self):
model = mujoco.MjModel.from_xml_string(TEST_XML_MOCAP)
data = mujoco.MjData(model)
initial_state = np.zeros(model.nq + model.nv + model.na)
pos1 = np.array((1., 2., 3.))
quat1 = np.array((1., 2., 3., 4.))
quat1 /= np.linalg.norm(quat1)
pos2 = np.array((2., 3., 4.))
quat2 = np.array((2., 3., 4., 5.))
quat2 /= np.linalg.norm(quat2)
mocap = np.hstack((pos1, quat1, pos2, quat2))
state, sensordata = rollout.rollout(model, data, initial_state, mocap=mocap)
np.testing.assert_array_almost_equal(sensordata[:3], pos1)
np.testing.assert_array_almost_equal(sensordata[3:], quat2)
#----------------------------- test correctness
def test_intercept_mj_errors(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
data = mujoco.MjData(model)
initial_state = np.zeros(model.nq + model.nv + model.na)
ctrl = np.zeros((3, model.nu))
model.opt.solver = 10 # invalid solver type
with self.assertRaisesWithLiteralMatch(
mujoco.FatalError, 'mj_fwdConstraint: unknown solver type 10'):
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
def test_invalid(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
data = mujoco.MjData(model)
initial_state = np.zeros(model.nq + model.nv + model.na)
ctrl = 'string'
with self.assertRaisesWithLiteralMatch(
ValueError, 'ctrl must be a numpy array or float'):
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
qfrc_applied = np.zeros((2, 3, 4, 5))
with self.assertRaisesWithLiteralMatch(
ValueError, 'qfrc_applied can have at most 3 dimensions'):
state, sensordata = rollout.rollout(model, data, initial_state,
qfrc_applied=qfrc_applied)
def test_bad_sizes(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
data = mujoco.MjData(model)
initial_state = np.random.randn(model.nq + model.nv + model.na+1)
with self.assertRaisesWithLiteralMatch(
ValueError, 'trailing dimension of initial_state must be 5, got 6'):
state, sensordata = rollout.rollout(model, data, initial_state)
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(model.nu+1)
with self.assertRaisesWithLiteralMatch(
ValueError, 'trailing dimension of ctrl must be 2, got 3'):
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
ctrl = np.random.randn(2, model.nu)
qfrc_applied = np.random.randn(3, model.nv) # incompatible horizon
with self.assertRaisesWithLiteralMatch(
ValueError, 'dimension 1 inferred as 2 but qfrc_applied has 3'):
state, sensordata = rollout.rollout(model, data, initial_state, ctrl,
qfrc_applied=qfrc_applied)
def test_stateless(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
model.opt.disableflags |= mujoco.mjtDisableBit.mjDSBL_WARMSTART.value
data = mujoco.MjData(model)
# call step with a clean mjData
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
# fill mjData with some debug value, see that we still get the same outputs
mujoco.mj_resetDataDebug(model, data, 255)
debug_state, debug_sensordata = rollout.rollout(model, data, initial_state,
ctrl)
np.testing.assert_array_equal(state, debug_state)
np.testing.assert_array_equal(sensordata, debug_sensordata)
#--------------- Python implementation of rollout functionality ----------------
def get_state(data):
return np.hstack((data.qpos, data.qvel, data.act))
def set_state(model, data, state):
data.qpos = state[:model.nq]
data.qvel = state[model.nq:model.nq+model.nv]
data.act = state[model.nq+model.nv:model.nq+model.nv+model.na]
def step(model, data, state, **kwargs):
if state is not None:
set_state(model, data, state)
for key, value in kwargs.items():
if value is not None:
setattr(data, key, np.reshape(value, getattr(data, key).shape))
mujoco.mj_step(model, data)
return (get_state(data), data.sensordata)
def single_rollout(model, data, initial_state, **kwargs):
arg_nstep = set([a.shape[0] for a in kwargs.values()])
assert len(arg_nstep) == 1 # nstep dimensions must match
nstep = arg_nstep.pop()
state = np.empty((nstep, model.nq + model.nv + model.na))
sensordata = np.empty((nstep, model.nsensordata))
mujoco.mj_resetData(model, data)
for t in range(nstep):
kwargs_t = {}
for key, value in kwargs.items():
kwargs_t[key] = value[0 if value.ndim == 1 else t]
state[t], sensordata[t] = step(model, data,
initial_state if t==0 else None,
**kwargs_t)
return state, sensordata
def multi_rollout(model, data, initial_state, **kwargs):
nstate = initial_state.shape[0]
arg_nstep = set([a.shape[1] for a in kwargs.values()])
assert len(arg_nstep) == 1 # nstep dimensions must match
nstep = arg_nstep.pop()
state = np.empty((nstate, nstep, model.nq + model.nv + model.na))
sensordata = np.empty((nstate, nstep, model.nsensordata))
for s in range(nstate):
kwargs_s = {key : value[s] for key, value in kwargs.items()}
state_s, sensordata_s = single_rollout(model, data, initial_state[s],
**kwargs_s)
state[s] = state_s
sensordata[s] = sensordata_s
return state.squeeze(), sensordata.squeeze()
if __name__ == '__main__':
absltest.main()
| mujoco-main | python/mujoco/rollout_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MuJoCo Python bindings."""
import contextlib
import copy
import pickle
import sys
from absl.testing import absltest
from absl.testing import parameterized
import mujoco
import numpy as np
TEST_XML = r"""
<mujoco model="test">
<compiler coordinate="local" angle="radian" eulerseq="xyz"/>
<option timestep="0.002" gravity="0 0 -9.81"/>
<visual>
<global fovy="50" />
<quality shadowsize="51" />
</visual>
<worldbody>
<geom name="myplane" type="plane" size="10 10 1"/>
<body name="mybox" pos="0 0 0.1">
<geom name="mybox" type="box" size="0.1 0.1 0.1" mass="0.25"/>
<freejoint name="myfree"/>
</body>
<body>
<inertial pos="0 0 0" mass="1" diaginertia="1 1 1"/>
<site pos="0 0 -1" name="mysite" type="sphere"/>
<joint name="myhinge" type="hinge" axis="0 1 0" damping="1"/>
</body>
<body>
<inertial pos="0 0 0" mass="1" diaginertia="1 1 1"/>
<joint name="myball" type="ball"/>
</body>
<body mocap="true" pos="42 0 42">
<geom type="sphere" size="0.1"/>
</body>
</worldbody>
<actuator>
<position name="myactuator" joint="myhinge"/>
</actuator>
<sensor>
<jointvel name="myjointvel" joint="myhinge"/>
<accelerometer name="myaccelerometer" site="mysite"/>
</sensor>
</mujoco>
"""
TEST_XML_SENSOR = r"""
<mujoco model="test">
<worldbody>
<geom name="myplane" type="plane" size="10 10 1"/>
</worldbody>
<sensor>
<user objtype="geom" objname="myplane"
datatype="real" needstage="vel" dim="1"/>
</sensor>
</mujoco>
"""
TEST_XML_PLUGIN = r"""
<mujoco model="test">
<extension>
<plugin plugin="mujoco.elasticity.cable"/>
</extension>
</mujoco>
"""
@contextlib.contextmanager
def temporary_callback(setter, callback):
setter(callback)
yield
setter(None)
class MuJoCoBindingsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.model: mujoco.MjModel = mujoco.MjModel.from_xml_string(TEST_XML)
self.data = mujoco.MjData(self.model)
def test_load_xml_can_handle_name_clash(self):
xml_1 = r"""
<mujoco>
<worldbody>
<geom name="plane" type="plane" size="1 1 1"/>
<include file="model_.xml"/>
<include file="model__.xml"/>
</worldbody>
</mujoco>"""
xml_2 = rb"""<mujoco><geom name="box" type="box" size="1 1 1"/></mujoco>"""
xml_3 = rb"""<mujoco><geom name="ball" type="sphere" size="1"/></mujoco>"""
model = mujoco.MjModel.from_xml_string(
xml_1, {'model_.xml': xml_2, 'model__.xml': xml_3})
self.assertEqual(
mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_GEOM, 'plane'), 0)
self.assertEqual(
mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_GEOM, 'box'), 1)
self.assertEqual(
mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_GEOM, 'ball'), 2)
def test_can_read_array(self):
np.testing.assert_array_equal(
self.model.body_pos,
[[0, 0, 0], [0, 0, 0.1], [0, 0, 0], [0, 0, 0], [42.0, 0, 42.0]])
def test_can_set_array(self):
self.data.qpos = 0.12345
np.testing.assert_array_equal(
self.data.qpos, [0.12345]*len(self.data.qpos))
def test_array_is_a_view(self):
qpos_ref = self.data.qpos
self.data.qpos = 0.789
np.testing.assert_array_equal(
qpos_ref, [0.789]*len(self.data.qpos))
# This test is disabled on PyPy as it uses sys.getrefcount
# However PyPy is not officially supported by MuJoCo
@absltest.skipIf(sys.implementation.name == 'pypy',
reason='requires sys.getrefcount')
def test_array_keeps_struct_alive(self):
model = mujoco.MjModel.from_xml_string(TEST_XML)
qpos0 = model.qpos0
qpos_spring = model.qpos_spring
# This only fails reliably under ASAN, which detects heap-use-after-free.
# However, often the assertEqual is enough since the memory block is
# already reused between mjModel deallocation and the subsequent read.
qpos0[:] = 1
del model
self.assertEqual(qpos0[0], 1)
# When running under test coverage tools, the refcount of objects can be
# higher than normal. To take this into account, we first measure the
# refcount of a dummy object with no other referrer.
dummy = []
base_refcount = sys.getrefcount(dummy) - 1
# Here `base` is actually a PyCapsule that holds the raw mjModel* rather
# than the actual MjModel wrapper object itself.
capsule = qpos0.base
self.assertEqual(sys.getrefcount(capsule) - base_refcount, 3)
del qpos0
self.assertEqual(sys.getrefcount(capsule) - base_refcount, 2)
del qpos_spring
self.assertEqual(sys.getrefcount(capsule) - base_refcount, 1)
def test_named_indexing_actuator_ctrl(self):
actuator_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_ACTUATOR, 'myactuator')
self.assertIs(self.data.actuator('myactuator'),
self.data.actuator(actuator_id))
self.assertIs(self.data.actuator('myactuator').ctrl,
self.data.actuator(actuator_id).ctrl)
self.assertEqual(self.data.actuator('myactuator').ctrl.shape, (1,))
# Test that the indexer is returning a view into the underlying struct.
ctrl_from_indexer = self.data.actuator('myactuator').ctrl
self.data.ctrl[actuator_id] = 5
np.testing.assert_array_equal(ctrl_from_indexer, [5])
self.data.actuator('myactuator').ctrl = 7
np.testing.assert_array_equal(self.data.ctrl[actuator_id], [7])
def test_named_indexing_invalid_names_in_model(self):
with self.assertRaisesRegex(
KeyError,
r"Invalid name 'badgeom'\. Valid names: \['mybox', 'myplane'\]"):
self.model.geom('badgeom')
def test_named_indexing_no_name_argument_in_model(self):
with self.assertRaisesRegex(
KeyError,
r"Invalid name ''\. Valid names: \['myball', 'myfree', 'myhinge'\]"):
self.model.joint()
def test_named_indexing_invalid_names_in_data(self):
with self.assertRaisesRegex(
KeyError,
r"Invalid name 'badgeom'\. Valid names: \['mybox', 'myplane'\]"):
self.data.geom('badgeom')
def test_named_indexing_no_name_argument_in_data(self):
with self.assertRaisesRegex(
KeyError,
r"Invalid name ''\. Valid names: \['myball', 'myfree', 'myhinge'\]"):
self.data.jnt()
def test_named_indexing_invalid_index_in_model(self):
with self.assertRaisesRegex(
IndexError, r'Invalid index 3\. Valid indices from 0 to 2'):
self.model.geom(3)
with self.assertRaisesRegex(
IndexError, r'Invalid index -1\. Valid indices from 0 to 2'):
self.model.geom(-1)
def test_named_indexing_invalid_index_in_data(self):
with self.assertRaisesRegex(
IndexError, r'Invalid index 3\. Valid indices from 0 to 2'):
self.data.geom(3)
with self.assertRaisesRegex(
IndexError, r'Invalid index -1\. Valid indices from 0 to 2'):
self.data.geom(-1)
def test_named_indexing_geom_size(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_GEOM, 'mybox')
self.assertIs(self.model.geom('mybox'), self.model.geom(box_id))
self.assertIs(self.model.geom('mybox').size, self.model.geom(box_id).size)
self.assertEqual(self.model.geom('mybox').size.shape, (3,))
# Test that the indexer is returning a view into the underlying struct.
size_from_indexer = self.model.geom('mybox').size
self.model.geom_size[box_id] = [7, 11, 13]
np.testing.assert_array_equal(size_from_indexer, [7, 11, 13])
self.model.geom('mybox').size = [5, 3, 2]
np.testing.assert_array_equal(self.model.geom_size[box_id], [5, 3, 2])
def test_named_indexing_geom_quat(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_GEOM, 'mybox')
self.assertIs(self.model.geom('mybox'), self.model.geom(box_id))
self.assertIs(self.model.geom('mybox').quat, self.model.geom(box_id).quat)
self.assertEqual(self.model.geom('mybox').quat.shape, (4,))
# Test that the indexer is returning a view into the underlying struct.
quat_from_indexer = self.model.geom('mybox').quat
self.model.geom_quat[box_id] = [5, 10, 15, 20]
np.testing.assert_array_equal(quat_from_indexer, [5, 10, 15, 20])
self.model.geom('mybox').quat = [12, 9, 6, 3]
np.testing.assert_array_equal(self.model.geom_quat[box_id], [12, 9, 6, 3])
def test_named_indexing_ragged_qpos(self):
balljoint_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_JOINT, 'myball')
self.assertIs(self.data.joint('myball'), self.data.joint(balljoint_id))
self.assertIs(self.data.joint('myball').qpos,
self.data.joint(balljoint_id).qpos)
self.assertEqual(self.data.joint('myball').qpos.shape, (4,))
# Test that the indexer is returning a view into the underlying struct.
qpos_from_indexer = self.data.joint('myball').qpos
qpos_idx = self.model.jnt_qposadr[balljoint_id]
self.data.qpos[qpos_idx:qpos_idx+4] = [4, 5, 6, 7]
np.testing.assert_array_equal(qpos_from_indexer, [4, 5, 6, 7])
self.data.joint('myball').qpos = [9, 8, 7, 6]
np.testing.assert_array_equal(self.data.qpos[qpos_idx:qpos_idx+4],
[9, 8, 7, 6])
def test_named_indexing_ragged2d_cdof(self):
freejoint_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_JOINT, 'myfree')
self.assertIs(self.data.joint('myfree'), self.data.joint(freejoint_id))
self.assertIs(self.data.joint('myfree').cdof,
self.data.joint(freejoint_id).cdof)
self.assertEqual(self.data.joint('myfree').cdof.shape, (6, 6))
# Test that the indexer is returning a view into the underlying struct.
cdof_from_indexer = self.data.joint('myfree').cdof
dof_idx = self.model.jnt_dofadr[freejoint_id]
self.data.cdof[dof_idx:dof_idx+6, :] = np.reshape(range(36), (6, 6))
np.testing.assert_array_equal(cdof_from_indexer,
np.reshape(range(36), (6, 6)))
self.data.joint('myfree').cdof = 42
np.testing.assert_array_equal(self.data.cdof[dof_idx:dof_idx+6], [[42]*6]*6)
def test_named_indexing_repr_in_data(self):
expected_repr = '''<_MjDataGeomViews
id: 1
name: 'mybox'
xmat: array([0., 0., 0., 0., 0., 0., 0., 0., 0.])
xpos: array([0., 0., 0.])
>'''
self.assertEqual(expected_repr, repr(self.data.geom('mybox')))
def test_named_indexing_body_repr_in_data(self):
view_repr = repr(self.data.body('mybox'))
self.assertStartsWith(view_repr, '<_MjDataBodyViews')
self.assertIn('xpos: array([0., 0., 0.])', view_repr)
self.assertEndsWith(view_repr, '>')
def test_named_indexing_repr_in_model(self):
view_repr = repr(self.model.geom('mybox'))
self.assertStartsWith(view_repr, '<_MjModelGeomViews')
self.assertIn('size: array([0.1, 0.1, 0.1])', view_repr)
self.assertEndsWith(view_repr, '>')
def test_addresses_differ_between_structs(self):
model2 = mujoco.MjModel.from_xml_string(TEST_XML)
data2 = mujoco.MjData(model2)
self.assertGreater(self.model._address, 0)
self.assertGreater(self.data._address, 0)
self.assertGreater(model2._address, 0)
self.assertGreater(data2._address, 0)
self.assertLen({self.model._address, self.data._address,
model2._address, data2._address}, 4)
def test_mjmodel_can_read_and_write_opt(self):
self.assertEqual(self.model.opt.timestep, 0.002)
np.testing.assert_array_equal(self.model.opt.gravity, [0, 0, -9.81])
opt = self.model.opt
self.model.opt.timestep = 0.001
self.assertEqual(opt.timestep, 0.001)
gravity = opt.gravity
self.model.opt.gravity[1] = 0.1
np.testing.assert_array_equal(gravity, [0, 0.1, -9.81])
self.model.opt.gravity = 0.2
np.testing.assert_array_equal(gravity, [0.2, 0.2, 0.2])
def test_mjmodel_can_read_and_write_stat(self):
self.assertNotEqual(self.model.stat.meanmass, 0)
stat = self.model.stat
self.model.stat.meanmass = 1.2
self.assertEqual(stat.meanmass, 1.2)
def test_mjmodel_can_read_and_write_vis(self):
self.assertEqual(self.model.vis.quality.shadowsize, 51)
self.model.vis.quality.shadowsize = 100
self.assertEqual(self.model.vis.quality.shadowsize, 100)
def test_mjmodel_can_access_names_directly(self):
# mjModel offers direct access to names array, to allow usecases other than
# id2name
model_name = str(self.model.names[0:self.model.names.find(b'\0')], 'utf-8')
self.assertEqual(model_name, 'test')
start_index = self.model.name_geomadr[0]
end_index = self.model.names.find(b'\0', start_index)
geom_name = str(self.model.names[start_index:end_index], 'utf-8')
self.assertEqual(geom_name, 'myplane')
def test_mjmodel_names_doesnt_copy(self):
names = self.model.names
self.assertIs(names, self.model.names)
def test_vis_global_exposed_as_global_(self):
self.assertEqual(self.model.vis.global_.fovy, 50)
self.model.vis.global_.fovy = 100
self.assertEqual(self.model.vis.global_.fovy, 100)
def test_mjoption_can_make_default(self):
opt = mujoco.MjOption()
self.assertEqual(opt.timestep, 0.002)
np.testing.assert_array_equal(opt.gravity, [0, 0, -9.81])
def test_mjoption_can_copy(self):
opt1 = mujoco.MjOption()
opt1.timestep = 0.001
opt1.gravity = 2
opt2 = copy.copy(opt1)
self.assertEqual(opt2.timestep, 0.001)
np.testing.assert_array_equal(opt2.gravity, [2, 2, 2])
# Make sure opt2 is actually a copy.
opt1.timestep = 0.005
opt1.gravity = 5
self.assertEqual(opt2.timestep, 0.001)
np.testing.assert_array_equal(opt2.gravity, [2, 2, 2])
def test_mjmodel_can_copy(self):
model_copy = copy.copy(self.model)
self.assertEqual(
mujoco.mj_id2name(model_copy, mujoco.mjtObj.mjOBJ_JOINT, 0),
'myfree')
self.assertEqual(
mujoco.mj_id2name(model_copy, mujoco.mjtObj.mjOBJ_GEOM, 0),
'myplane')
self.assertEqual(
mujoco.mj_id2name(model_copy, mujoco.mjtObj.mjOBJ_GEOM, 1),
'mybox')
# Make sure it's a copy.
self.model.geom_size[1] = 0.5
np.testing.assert_array_equal(self.model.geom_size[1], [0.5, 0.5, 0.5])
np.testing.assert_array_equal(model_copy.geom_size[1], [0.1, 0.1, 0.1])
def test_assets_array_filename_too_long(self):
# Longest allowed filename (excluding null byte)
limit = mujoco.mjMAXVFSNAME - 1
contents = b'<mujoco/>'
valid_filename = 'a' * limit
mujoco.MjModel.from_xml_path(valid_filename, {valid_filename: contents})
invalid_filename = 'a' * (limit + 1)
expected_message = (
f'Filename length 1000 exceeds 999 character limit: {invalid_filename}')
with self.assertRaisesWithLiteralMatch(ValueError, expected_message):
mujoco.MjModel.from_xml_path(invalid_filename,
{invalid_filename: contents})
def test_mjdata_can_copy(self):
self.data.qpos = [0, 0, 0.1*np.sqrt(2) - 0.001,
np.cos(np.pi/8), np.sin(np.pi/8), 0, 0, 0,
1, 0, 0, 0]
mujoco.mj_forward(self.model, self.data)
data_copy = copy.copy(self.data)
self.assertEqual(data_copy.ncon, 2)
# Make sure it's a copy.
mujoco.mj_resetData(self.model, self.data)
mujoco.mj_forward(self.model, self.data)
mujoco.mj_forward(self.model, data_copy)
self.assertEqual(self.data.ncon, 4)
self.assertEqual(data_copy.ncon, 2)
mujoco.mj_resetData(self.model, data_copy)
mujoco.mj_forward(self.model, data_copy)
self.assertEqual(data_copy.ncon, 4)
def test_mjdata_can_read_warning_array(self):
warnings = self.data.warning
self.assertLen(warnings, mujoco.mjtWarning.mjNWARNING)
self.data.qpos[0] = float('NaN')
mujoco.mj_checkPos(self.model, self.data)
self.assertEqual(warnings[mujoco.mjtWarning.mjWARN_BADQPOS].number, 1)
def test_mjcontact_can_copy(self):
mujoco.mj_forward(self.model, self.data)
contact_copy = []
for i in range(4):
contact_copy.append(copy.copy(self.data.contact[i]))
# Sort contacts in anticlockwise order
contact_copy = sorted(
contact_copy, key=lambda x: np.arctan2(x.pos[1], x.pos[0]))
np.testing.assert_allclose(contact_copy[0].pos[:2], [-0.1, -0.1])
np.testing.assert_allclose(contact_copy[1].pos[:2], [0.1, -0.1])
np.testing.assert_allclose(contact_copy[2].pos[:2], [0.1, 0.1])
np.testing.assert_allclose(contact_copy[3].pos[:2], [-0.1, 0.1])
# Make sure they're actually copies.
for i in range(4):
self.data.contact[i].pos[:2] = 55
np.testing.assert_allclose(self.data.contact[0].pos[:2], [55, 55])
np.testing.assert_allclose(self.data.contact[1].pos[:2], [55, 55])
np.testing.assert_allclose(self.data.contact[2].pos[:2], [55, 55])
np.testing.assert_allclose(self.data.contact[3].pos[:2], [55, 55])
np.testing.assert_allclose(contact_copy[0].pos[:2], [-0.1, -0.1])
np.testing.assert_allclose(contact_copy[1].pos[:2], [0.1, -0.1])
np.testing.assert_allclose(contact_copy[2].pos[:2], [0.1, 0.1])
np.testing.assert_allclose(contact_copy[3].pos[:2], [-0.1, 0.1])
def test_mj_step(self):
displacement = 0.25
self.data.qpos[2] += displacement
mujoco.mj_forward(self.model, self.data)
gravity = -self.model.opt.gravity[2]
expected_contact_time = np.sqrt(2 * displacement / gravity)
# Grab a reference to the contacts upfront so that we know that they're
# a view into mjData rather than a copy.
contact = self.data.contact
self.model.opt.timestep = 2**-9 # 0.001953125; allows exact comparisons
self.assertEqual(self.data.time, 0)
while self.data.time < expected_contact_time:
self.assertEqual(self.data.ncon, 0)
self.assertEmpty(self.data.efc_type)
self.assertTrue(self.data.efc_type.flags['OWNDATA'])
prev_time = self.data.time
mujoco.mj_step(self.model, self.data)
self.assertEqual(self.data.time, prev_time + self.model.opt.timestep)
mujoco.mj_forward(self.model, self.data)
self.assertEqual(self.data.ncon, 4)
self.assertLen(self.data.efc_type, 16)
self.assertFalse(self.data.efc_type.flags['OWNDATA'])
# Sort contacts in anticlockwise order
sorted_contact = sorted(
contact, key=lambda x: np.arctan2(x.pos[1], x.pos[0]))
np.testing.assert_allclose(sorted_contact[0].pos[:2], [-0.1, -0.1])
np.testing.assert_allclose(sorted_contact[1].pos[:2], [0.1, -0.1])
np.testing.assert_allclose(sorted_contact[2].pos[:2], [0.1, 0.1])
np.testing.assert_allclose(sorted_contact[3].pos[:2], [-0.1, 0.1])
mujoco.mj_resetData(self.model, self.data)
self.assertEqual(self.data.ncon, 0)
self.assertEmpty(self.data.efc_type)
self.assertTrue(self.data.efc_type.flags['OWNDATA'])
def test_mj_step_multiple(self):
self.model.opt.timestep = 2**-9 # 0.001953125; allows exact comparisons
self.assertEqual(self.data.time, 0)
for _ in range(10):
prev_time = self.data.time
mujoco.mj_step(self.model, self.data, nstep=7)
self.assertEqual(self.data.time, prev_time + 7 * self.model.opt.timestep)
self.assertIn('Optionally, repeat nstep times.', mujoco.mj_step.__doc__)
def test_mj_contact_list(self):
self.assertEmpty(self.data.contact)
expected_ncon = 4
mujoco.mj_forward(self.model, self.data)
self.assertLen(self.data.contact, expected_ncon)
expected_pos = []
for contact in self.data.contact:
expected_pos.append(np.random.uniform(size=3))
contact.pos = expected_pos[-1]
self.assertLen(expected_pos, expected_ncon)
np.testing.assert_array_equal(self.data.contact.pos, expected_pos)
expected_friction = []
for contact in self.data.contact:
expected_friction.append(np.random.uniform(size=5))
contact.friction = expected_friction[-1]
self.assertLen(expected_friction, expected_ncon)
np.testing.assert_array_equal(self.data.contact.friction, expected_friction)
expected_H = [] # pylint: disable=invalid-name
for contact in self.data.contact:
expected_H.append(np.random.uniform(size=36))
contact.H = expected_H[-1]
self.assertLen(expected_H, expected_ncon)
np.testing.assert_array_equal(self.data.contact.H, expected_H)
def test_realloc_con_efc(self):
self.assertEmpty(self.data.contact)
ncon = 9
nefc = 11
mujoco._functions._realloc_con_efc(self.data, ncon, nefc)
ncon = 13
nefc = 17
mujoco._functions._realloc_con_efc(self.data, ncon=ncon, nefc=nefc)
self.assertLen(self.data.contact, ncon)
self.assertEqual(self.data.efc_id.shape, (nefc,))
self.assertEqual(self.data.efc_KBIP.shape, (nefc, 4))
expected_error = 'insufficient arena memory available'
with self.assertRaisesWithLiteralMatch(mujoco.FatalError, expected_error):
mujoco._functions._realloc_con_efc(self.data, 100000000, 100000000)
self.assertEmpty(self.data.contact)
self.assertEmpty(self.data.efc_id)
def test_mj_struct_list_equality(self):
model2 = mujoco.MjModel.from_xml_string(TEST_XML)
data2 = mujoco.MjData(model2)
mujoco.mj_forward(self.model, self.data)
self.assertEqual(self.data.ncon, 4)
mujoco.mj_forward(model2, data2)
self.assertEqual(data2.ncon, 4)
self.assertEqual(data2.contact, self.data.contact)
self.data.qpos[3:7] = [np.cos(np.pi/8), np.sin(np.pi/8), 0, 0]
self.data.qpos[2] *= (np.sqrt(2) - 1) * 0.1 - 1e-6
mujoco.mj_forward(self.model, self.data)
self.assertEqual(self.data.ncon, 2)
self.assertNotEqual(data2.contact, self.data.contact)
# Check that we can compare slices of different lengths
self.assertNotEqual(data2.contact, self.data.contact)
# Check that comparing things of different types do not raise an error
self.assertNotEqual(self.data.contact, self.data.warning)
self.assertNotEqual(self.data.contact, 5)
@parameterized.named_parameters([
('MjOption', mujoco.MjOption, 'tolerance'),
('MjWarningStat', mujoco.MjWarningStat, 'number'),
('MjTimerStat', mujoco.MjTimerStat, 'number'),
('MjSolverStat', mujoco.MjSolverStat, 'neval'),
('MjContact', mujoco.MjContact, 'dist'),
('MjStatistic', mujoco.MjStatistic, 'extent'),
('MjLROpt', mujoco.MjLROpt, 'maxforce'),
('MjvPerturb', mujoco.MjvPerturb, 'select'),
('MjvCamera', mujoco.MjvCamera, 'fixedcamid'),
])
def test_mj_struct_equality(self, cls, attr):
struct = cls()
struct2 = cls()
setattr(struct, attr, 1)
self.assertNotEqual(struct, struct2)
setattr(struct2, attr, 1)
self.assertEqual(struct, struct2)
self.assertNotEqual(struct, 3)
self.assertNotEqual(struct, None)
# mutable structs shouldn't declare __hash__
with self.assertRaises(TypeError):
hash(struct)
def test_mj_struct_equality_array(self):
contact1 = mujoco.MjContact()
contact2 = mujoco.MjContact()
contact1.H[3] = 1
self.assertNotEqual(contact1, contact2)
contact2.H[3] = 1
self.assertEqual(contact1, contact2)
@parameterized.named_parameters([
('MjOption', mujoco.MjOption, 'tolerance'),
('MjWarningStat', mujoco.MjWarningStat, 'number'),
('MjTimerStat', mujoco.MjTimerStat, 'number'),
('MjSolverStat', mujoco.MjSolverStat, 'neval'),
('MjContact', mujoco.MjContact, 'dist'),
('MjStatistic', mujoco.MjStatistic, 'extent'),
('MjLROpt', mujoco.MjLROpt, 'maxforce'),
('MjvPerturb', mujoco.MjvPerturb, 'select'),
('MjvCamera', mujoco.MjvCamera, 'fixedcamid'),
])
def test_mj_struct_repr(self, cls, attr):
struct = cls()
setattr(struct, attr, 1)
representation = repr(struct)
self.assertStartsWith(representation, f'<{cls.__name__}')
self.assertIn(f'{attr}: 1', representation)
self.assertEqual(str(struct), repr(struct))
def test_mj_struct_repr_for_subclass(self):
class MjWarningStatSubclass(mujoco.MjWarningStat):
# ptr attribute could cause an infinite recursion, if the repr
# implementation simply looked at all attributes.
@property
def ptr(self):
return self
# repr should include name of subclass.
expected_repr = """<MjWarningStatSubclass
lastinfo: 0
number: 0
>"""
self.assertEqual(repr(MjWarningStatSubclass()), expected_repr)
def test_mju_rotVecQuat(self): # pylint: disable=invalid-name
vec = [1, 0, 0]
quat = [np.cos(np.pi/8), 0, 0, np.sin(np.pi/8)]
expected = np.array([1, 1, 0]) / np.sqrt(2)
# Check that the output argument works, and that the binding returns None.
res = np.empty(3, np.float64)
self.assertIsNone(mujoco.mju_rotVecQuat(res, vec, quat))
np.testing.assert_allclose(res, expected)
# Check that the function can be called via keyword arguments.
mujoco.mju_rotVecQuat(vec=vec, quat=quat, res=res)
np.testing.assert_allclose(res, expected)
# Check that the res argument must have the right size.
with self.assertRaises(TypeError):
mujoco.mju_rotVecQuat(np.zeros(4, np.float64), vec, quat)
# Check that the vec argument must have the right size.
with self.assertRaises(TypeError):
mujoco.mju_rotVecQuat(res, [1, 2, 3, 4], quat)
# Check that the quat argument must have the right size.
with self.assertRaises(TypeError):
mujoco.mju_rotVecQuat(res, vec, [1, 2, 3])
# The following check needs to be done with a fully initialized array,
# since pybind11 prints out the array's contents when generating TypeErrors.
# Using `np.empty` here results in msan errors.
# Check that the output argument must have the correct dtype.
with self.assertRaises(TypeError):
mujoco.mju_rotVecQuat(vec, quat, res=np.zeros(3, int))
def test_getsetstate(self): # pylint: disable=invalid-name
mujoco.mj_step(self.model, self.data)
# Test for invalid state spec
invalid_spec = 2**mujoco.mjtState.mjNSTATE.value
expected_message = (
f'mj_stateSize: invalid state spec {invalid_spec} >= 2^mjNSTATE'
)
with self.assertRaisesWithLiteralMatch(mujoco.FatalError, expected_message):
mujoco.mj_stateSize(self.model, invalid_spec)
spec = mujoco.mjtState.mjSTATE_INTEGRATION
size = mujoco.mj_stateSize(self.model, spec)
state_bad_size = np.empty(size + 1, np.float64)
expected_message = ('state size should equal mj_stateSize(m, spec)')
with self.assertRaisesWithLiteralMatch(TypeError, expected_message):
mujoco.mj_getState(self.model, self.data, state_bad_size, spec)
# Get initial state.
state0 = np.empty(size, np.float64)
mujoco.mj_getState(self.model, self.data, state0, spec)
# Step, get next state.
mujoco.mj_step(self.model, self.data)
state1a = np.empty(size, np.float64)
mujoco.mj_getState(self.model, self.data, state1a, spec)
# Reset to initial state, step again, get state again.
mujoco.mj_setState(self.model, self.data, state0, spec)
mujoco.mj_step(self.model, self.data)
state1b = np.empty(size, np.float64)
mujoco.mj_getState(self.model, self.data, state1b, spec)
# Expect next states to be equal.
np.testing.assert_array_equal(state1a, state1b)
def test_mj_jacSite(self): # pylint: disable=invalid-name
mujoco.mj_forward(self.model, self.data)
site_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, 'mysite')
# Call mj_jacSite with only jacp.
jacp = np.empty((3, 10), np.float64)
mujoco.mj_jacSite(self.model, self.data, jacp, None, site_id)
expected_jacp = np.array(
[[0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
np.testing.assert_array_equal(jacp, expected_jacp)
# Call mj_jacSite with only jacr.
jacr = np.empty((3, 10), np.float64)
mujoco.mj_jacSite(self.model, self.data, None, jacr, site_id)
expected_jacr = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
np.testing.assert_array_equal(jacr, expected_jacr)
# Call mj_jacSite with both jacp and jacr.
jacp[:] = 0
jacr[:] = 0
mujoco.mj_jacSite(self.model, self.data, jacp, jacr, site_id)
np.testing.assert_array_equal(jacp, expected_jacp)
np.testing.assert_array_equal(jacr, expected_jacr)
# Check that the jacp argument must have the right size.
with self.assertRaises(TypeError):
mujoco.mj_jacSite(
self.model, self.data, np.empty((3, 6), jacp.dtype), None, site_id)
# Check that the jacr argument must have the right size.
with self.assertRaises(TypeError):
mujoco.mj_jacSite(
self.model, self.data, None, np.empty((4, 7), jacr.dtype), site_id)
# The following two checks need to be done with fully initialized arrays,
# since pybind11 prints out the array's contents when generating TypeErrors.
# Using `np.empty` here results in msan errors.
# Check that the jacp argument must have the right dtype.
with self.assertRaises(TypeError):
mujoco.mj_jacSite(
self.model, self.data, np.zeros(jacp.shape, int), None, site_id)
# Check that the jacr argument must have the right dtype.
with self.assertRaises(TypeError):
mujoco.mj_jacSite(
self.model, self.data, None, np.zeros(jacr.shape, int), site_id)
def test_docstrings(self): # pylint: disable=invalid-name
self.assertEqual(
mujoco.mj_versionString.__doc__,
"""mj_versionString() -> str
Return the current version of MuJoCo as a null-terminated string.
""")
self.assertEqual(
mujoco.mj_Euler.__doc__,
"""mj_Euler(m: mujoco._structs.MjModel, d: mujoco._structs.MjData) -> None
Euler integrator, semi-implicit in velocity.
""")
def test_int_constant(self):
self.assertEqual(mujoco.mjMAXVFSNAME, 1000)
def test_float_constant(self):
self.assertEqual(mujoco.mjMAXVAL, 1e10)
self.assertEqual(mujoco.mjMINVAL, 1e-15)
def test_string_constants(self):
self.assertLen(mujoco.mjDISABLESTRING, mujoco.mjtDisableBit.mjNDISABLE)
self.assertLen(mujoco.mjENABLESTRING, mujoco.mjtEnableBit.mjNENABLE)
self.assertLen(mujoco.mjTIMERSTRING, mujoco.mjtTimer.mjNTIMER)
self.assertLen(mujoco.mjLABELSTRING, mujoco.mjtLabel.mjNLABEL)
self.assertLen(mujoco.mjFRAMESTRING, mujoco.mjtFrame.mjNFRAME)
self.assertLen(mujoco.mjVISSTRING, mujoco.mjtVisFlag.mjNVISFLAG)
self.assertLen(mujoco.mjRNDSTRING, mujoco.mjtRndFlag.mjNRNDFLAG)
self.assertEqual(mujoco.mjDISABLESTRING[11], 'Refsafe')
self.assertEqual(mujoco.mjVISSTRING[mujoco.mjtVisFlag.mjVIS_INERTIA],
('&Inertia', '0', 'I'))
def test_enum_values(self):
self.assertEqual(mujoco.mjtJoint.mjJNT_FREE, 0)
self.assertEqual(mujoco.mjtJoint.mjJNT_BALL, 1)
self.assertEqual(mujoco.mjtJoint.mjJNT_SLIDE, 2)
self.assertEqual(mujoco.mjtJoint.mjJNT_HINGE, 3)
self.assertEqual(mujoco.mjtEnableBit.mjENBL_OVERRIDE, 1<<0)
self.assertEqual(mujoco.mjtEnableBit.mjENBL_ENERGY, 1<<1)
self.assertEqual(mujoco.mjtEnableBit.mjENBL_FWDINV, 1<<2)
self.assertEqual(mujoco.mjtEnableBit.mjENBL_SENSORNOISE, 1<<4)
self.assertEqual(mujoco.mjtEnableBit.mjNENABLE, 7)
self.assertEqual(mujoco.mjtGeom.mjGEOM_PLANE, 0)
self.assertEqual(mujoco.mjtGeom.mjGEOM_HFIELD, 1)
self.assertEqual(mujoco.mjtGeom.mjGEOM_SPHERE, 2)
self.assertEqual(mujoco.mjtGeom.mjGEOM_ARROW, 100)
self.assertEqual(mujoco.mjtGeom.mjGEOM_ARROW1, 101)
self.assertEqual(mujoco.mjtGeom.mjGEOM_ARROW2, 102)
self.assertEqual(mujoco.mjtGeom.mjGEOM_NONE, 1001)
def test_enum_from_int(self):
self.assertEqual(mujoco.mjtJoint.mjJNT_FREE, mujoco.mjtJoint(0))
self.assertEqual(mujoco.mjtGeom.mjGEOM_ARROW, mujoco.mjtGeom(value=100))
with self.assertRaises(ValueError):
mujoco.mjtJoint(1000)
with self.assertRaises(ValueError):
mujoco.mjtJoint(-1)
def test_enum_as_index(self):
x = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
self.assertEqual(x[mujoco.mjtFrame.mjFRAME_WORLD], 'h')
self.assertEqual(
x[mujoco.mjtFrame.mjFRAME_GEOM:mujoco.mjtFrame.mjFRAME_CAMERA],
['c', 'd'])
def test_enum_ops(self):
# Note: when modifying this test, make sure the enum value is an odd number
# so that the division tests are correctly exercised.
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD, 7)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD, 7.0)
self.assertEqual(7, mujoco.mjtFrame.mjFRAME_WORLD)
self.assertEqual(7.0, mujoco.mjtFrame.mjFRAME_WORLD)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD,
mujoco.mjtFrame.mjFRAME_WORLD)
self.assertNotEqual(mujoco.mjtFrame.mjFRAME_WORLD,
mujoco.mjtFrame.mjFRAME_NONE)
self.assertEqual(-mujoco.mjtFrame.mjFRAME_WORLD, -7)
self.assertIsInstance(-mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD + 1, 8)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD + 1, int)
self.assertEqual(2 + mujoco.mjtFrame.mjFRAME_WORLD, 9)
self.assertIsInstance(2 + mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD + 1.75, 8.75)
self.assertEqual(2.75 + mujoco.mjtFrame.mjFRAME_WORLD, 9.75)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD - 2, 5)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD - 2, int)
self.assertEqual(8 - mujoco.mjtFrame.mjFRAME_WORLD, 1)
self.assertIsInstance(8 - mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD - 2.25, 4.75)
self.assertEqual(8.25 - mujoco.mjtFrame.mjFRAME_WORLD, 1.25)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD * 3, 21)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD * 3, int)
self.assertEqual(3 * mujoco.mjtFrame.mjFRAME_WORLD, 21)
self.assertIsInstance(3 * mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD * 3.5, 24.5)
self.assertEqual(3.5 * mujoco.mjtFrame.mjFRAME_WORLD, 24.5)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD / 2, 3.5)
self.assertEqual(17.5 / mujoco.mjtFrame.mjFRAME_WORLD, 2.5)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD // 2, 3)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD // 2, int)
self.assertEqual(-mujoco.mjtFrame.mjFRAME_WORLD // 2, -4)
self.assertIsInstance(-mujoco.mjtFrame.mjFRAME_WORLD // 2, int)
self.assertEqual(20 // mujoco.mjtFrame.mjFRAME_WORLD, 2)
self.assertIsInstance(20 // mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(-20 // mujoco.mjtFrame.mjFRAME_WORLD, -3)
self.assertIsInstance(-20 // mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD // 2.0, 3)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD // 2.0, float)
self.assertEqual(-mujoco.mjtFrame.mjFRAME_WORLD // 2.0, -4)
self.assertIsInstance(-mujoco.mjtFrame.mjFRAME_WORLD // 2.0, float)
self.assertEqual(20.0 // mujoco.mjtFrame.mjFRAME_WORLD, 2)
self.assertIsInstance(20.0 // mujoco.mjtFrame.mjFRAME_WORLD, float)
self.assertEqual(-20 // mujoco.mjtFrame.mjFRAME_WORLD, -3)
self.assertIsInstance(-20.0 // mujoco.mjtFrame.mjFRAME_WORLD, float)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD % 4, 3)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD % 4, int)
self.assertEqual(-mujoco.mjtFrame.mjFRAME_WORLD % -4, -3)
self.assertIsInstance(-mujoco.mjtFrame.mjFRAME_WORLD % -4, int)
self.assertEqual(-mujoco.mjtFrame.mjFRAME_WORLD % 4, 1)
self.assertIsInstance(-mujoco.mjtFrame.mjFRAME_WORLD % 4, int)
self.assertEqual(mujoco.mjtFrame.mjFRAME_WORLD % -4, -1)
self.assertIsInstance(mujoco.mjtFrame.mjFRAME_WORLD % -4, int)
self.assertEqual(9 % mujoco.mjtFrame.mjFRAME_WORLD, 2)
self.assertIsInstance(9 % mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(-9 % -mujoco.mjtFrame.mjFRAME_WORLD, -2)
self.assertIsInstance(-9 % -mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(-9 % mujoco.mjtFrame.mjFRAME_WORLD, 5)
self.assertIsInstance(-9 % mujoco.mjtFrame.mjFRAME_WORLD, int)
self.assertEqual(9 % -mujoco.mjtFrame.mjFRAME_WORLD, -5)
self.assertIsInstance(9 % -mujoco.mjtFrame.mjFRAME_WORLD, int)
with self.assertRaises(ZeroDivisionError):
_ = mujoco.mjtFrame.mjFRAME_WORLD / 0
with self.assertRaises(ZeroDivisionError):
_ = 1 / mujoco.mjtFrame.mjFRAME_NONE
with self.assertRaises(ZeroDivisionError):
_ = mujoco.mjtFrame.mjFRAME_WORLD // 0
with self.assertRaises(ZeroDivisionError):
_ = 1 // mujoco.mjtFrame.mjFRAME_NONE
with self.assertRaises(ZeroDivisionError):
_ = mujoco.mjtFrame.mjFRAME_WORLD % 0
with self.assertRaises(ZeroDivisionError):
_ = 1 % mujoco.mjtFrame.mjFRAME_NONE
self.assertEqual(
mujoco.mjtDisableBit.mjDSBL_GRAVITY | mujoco.mjtDisableBit.mjDSBL_LIMIT,
72)
self.assertEqual(mujoco.mjtDisableBit.mjDSBL_PASSIVE | 33, 33)
self.assertEqual(mujoco.mjtDisableBit.mjDSBL_PASSIVE & 33, 32)
self.assertEqual(mujoco.mjtDisableBit.mjDSBL_PASSIVE ^ 33, 1)
self.assertEqual(33 | mujoco.mjtDisableBit.mjDSBL_PASSIVE, 33)
self.assertEqual(33 & mujoco.mjtDisableBit.mjDSBL_PASSIVE, 32)
self.assertEqual(33 ^ mujoco.mjtDisableBit.mjDSBL_PASSIVE, 1)
self.assertEqual(mujoco.mjtDisableBit.mjDSBL_CLAMPCTRL << 1,
mujoco.mjtDisableBit.mjDSBL_WARMSTART)
self.assertEqual(mujoco.mjtDisableBit.mjDSBL_CLAMPCTRL >> 3,
mujoco.mjtDisableBit.mjDSBL_CONTACT)
def test_can_raise_error(self):
self.data.pstack = self.data.narena
with self.assertRaisesRegex(mujoco.FatalError,
r'\Amj_stackAlloc: insufficient memory:'):
mujoco.mj_forward(self.model, self.data)
def test_mjcb_time(self):
class CallCounter:
def __init__(self):
self.count = 0
def __call__(self):
self.count += 1
return self.count - 1
call_counter = CallCounter()
with temporary_callback(mujoco.set_mjcb_time, call_counter):
self.assertIs(mujoco.get_mjcb_time(), call_counter)
# Check that the callback setter and getter aren't callin g the function.
self.assertEqual(call_counter.count, 0)
mujoco.mj_forward(self.model, self.data)
self.assertGreater(call_counter.count, 0)
self.assertIsNone(mujoco.get_mjcb_time())
def test_mjcb_time_exception(self):
class TestError(RuntimeError):
pass
def raises_exception():
raise TestError('string', (1, 2, 3), {'a': 1, 'b': 2})
with temporary_callback(mujoco.set_mjcb_time, raises_exception):
with self.assertRaises(TestError) as e:
mujoco.mj_forward(self.model, self.data)
self.assertEqual(
e.exception.args, ('string', (1, 2, 3), {'a': 1, 'b': 2}))
# Should not raise now that we've cleared the callback.
mujoco.mj_forward(self.model, self.data)
def test_mjcb_time_wrong_return_type(self):
with temporary_callback(mujoco.set_mjcb_time, lambda: 'string'):
with self.assertRaisesWithLiteralMatch(
TypeError, 'mjcb_time callback did not return a number'):
mujoco.mj_forward(self.model, self.data)
def test_mjcb_time_not_callable(self):
with self.assertRaisesWithLiteralMatch(
TypeError, 'callback is not an Optional[Callable]'):
mujoco.set_mjcb_time(1)
def test_mjcb_sensor(self):
class SensorCallback:
def __init__(self, test, expected_model, expected_data):
self.test = test
self.expected_model = expected_model
self.expected_data = expected_data
self.count = 0
def __call__(self, m, d, stage):
self.test.assertIs(m, self.expected_model)
self.test.assertIs(d, self.expected_data)
self.test.assertEqual(stage, mujoco.mjtStage.mjSTAGE_VEL)
d.sensordata[0] = 17
self.count += 1
model_with_sensor = mujoco.MjModel.from_xml_string(TEST_XML_SENSOR)
data_with_sensor = mujoco.MjData(model_with_sensor)
sensor_callback = SensorCallback(self, model_with_sensor, data_with_sensor)
self.assertEqual(sensor_callback.count, 0)
with temporary_callback(mujoco.set_mjcb_sensor, sensor_callback):
mujoco.mj_forward(model_with_sensor, data_with_sensor)
self.assertEqual(sensor_callback.count, 1)
self.assertEqual(data_with_sensor.sensordata[0], 17)
# This test is disabled on PyPy as it uses sys.getrefcount
# However PyPy is not officially supported by MuJoCo
@absltest.skipIf(sys.implementation.name == 'pypy',
reason='requires sys.getrefcount')
def test_mjcb_control_not_leak_memory(self):
model_instances = []
data_instances = []
for _ in range(10):
mujoco.set_mjcb_control(None)
model_instances.append(mujoco.MjModel.from_xml_string('<mujoco/>'))
data_instances.append(mujoco.MjData(model_instances[-1]))
mujoco.set_mjcb_control(lambda m, d: None)
mujoco.mj_step(model_instances[-1], data_instances[-1])
mujoco.set_mjcb_control(None)
while data_instances:
d = data_instances.pop()
self.assertEqual(sys.getrefcount(d), 2)
while model_instances:
m = model_instances.pop()
self.assertEqual(sys.getrefcount(m), 2)
def test_can_initialize_mjv_structs(self):
self.assertIsInstance(mujoco.MjvScene(), mujoco.MjvScene)
self.assertIsInstance(mujoco.MjvCamera(), mujoco.MjvCamera)
self.assertIsInstance(mujoco.MjvGLCamera(), mujoco.MjvGLCamera)
self.assertIsInstance(mujoco.MjvGeom(), mujoco.MjvGeom)
self.assertIsInstance(mujoco.MjvLight(), mujoco.MjvLight)
self.assertIsInstance(mujoco.MjvOption(), mujoco.MjvOption)
self.assertIsInstance(mujoco.MjvScene(), mujoco.MjvScene)
self.assertIsInstance(mujoco.MjvScene(self.model, 100), mujoco.MjvScene)
self.assertIsInstance(mujoco.MjvFigure(), mujoco.MjvFigure)
def test_mjv_camera(self):
camera = mujoco.MjvCamera()
camera.type = mujoco.mjtCamera.mjCAMERA_TRACKING
# IDs should be integers
camera.fixedcamid = 2**31 - 1
self.assertEqual(camera.fixedcamid, 2**31 - 1)
with self.assertRaises(TypeError):
camera.fixedcamid = 0.5
def test_mjv_scene(self):
scene = mujoco.MjvScene(model=self.model, maxgeom=100)
# scene.geoms is a fixed-length tuple of length maxgeom.
self.assertEqual(scene.ngeom, 0)
self.assertEqual(scene.maxgeom, 100)
self.assertLen(scene.geoms, scene.maxgeom)
# When the scene is updated, geoms are added to the scene
# (ngeom is incremented)
mujoco.mj_forward(self.model, self.data)
mujoco.mjv_updateScene(self.model, self.data, mujoco.MjvOption(),
None, mujoco.MjvCamera(),
mujoco.mjtCatBit.mjCAT_ALL, scene)
self.assertGreater(scene.ngeom, 0)
def test_mjv_scene_without_model(self):
scene = mujoco.MjvScene()
self.assertEqual(scene.scale, 1.0)
self.assertEqual(scene.maxgeom, 0)
def test_mj_ray(self):
# mj_ray has tricky argument types
geomid = np.zeros(1, np.int32)
mujoco.mj_forward(self.model, self.data)
mujoco.mj_ray(self.model, self.data, [0, 0, 0], [0, 0, 1], None, 0, 0,
geomid)
mujoco.mj_ray(self.model, self.data, [0, 0, 0], [0, 0, 1],
[0, 0, 0, 0, 0, 0], 0, 0, geomid)
# Check that named arguments work
mujoco.mj_ray(
m=self.model,
d=self.data,
pnt=[0, 0, 0],
vec=[0, 0, 1],
geomgroup=None,
flg_static=0,
bodyexclude=0,
geomid=geomid)
def test_inverse_fd_none(self):
eps = 1e-6
flg_centered = 0
mujoco.mjd_inverseFD(self.model, self.data, eps, flg_centered,
None, None, None, None, None, None, None)
def test_inverse_fd(self):
eps = 1e-6
flg_centered = 0
df_dq = np.zeros((self.model.nv, self.model.nv))
df_dv = np.zeros((self.model.nv, self.model.nv))
df_da = np.zeros((self.model.nv, self.model.nv))
ds_dq = np.zeros((self.model.nv, self.model.nsensordata))
ds_dv = np.zeros((self.model.nv, self.model.nsensordata))
ds_da = np.zeros((self.model.nv, self.model.nsensordata))
dm_dq = np.zeros((self.model.nv, self.model.nM))
mujoco.mjd_inverseFD(self.model, self.data, eps, flg_centered,
df_dq, df_dv, df_da, ds_dq, ds_dv, ds_da, dm_dq)
self.assertGreater(np.linalg.norm(df_dq), eps)
self.assertGreater(np.linalg.norm(df_dv), eps)
self.assertGreater(np.linalg.norm(df_da), eps)
self.assertGreater(np.linalg.norm(ds_dq), eps)
self.assertGreater(np.linalg.norm(ds_dv), eps)
self.assertGreater(np.linalg.norm(ds_da), eps)
def test_mjd_sub_quat(self):
quat1 = np.array((0.2, 0.3, 0.3, 0.4))
quat2 = np.array((0.2, 0.3, 0.3, 0.4))
d1 = np.empty(9, np.float64)
d2 = np.empty(9, np.float64)
mujoco.mjd_subQuat(quat1, quat2, d1, d2)
def test_mjd_quat_intergrate(self):
scale = 0.1
vel = np.array((0.2, 0.3, 0.3))
d_quat = np.empty(9, np.float64)
d_vel = np.empty(9, np.float64)
d_h = np.empty(3, np.float64)
mujoco.mjd_quatIntegrate(vel, scale, d_quat, d_vel, d_h)
def test_banded(self):
n_total = 4
n_band = 1
n_dense = 1
dense = np.array([[1.0, 0, 0, 0.1],
[0, 2.0, 0, 0.2],
[0, 0, 3.0, 0.3],
[0.1, 0.2, 0.3, 4.0]])
band = np.zeros(n_band*(n_total-n_dense) + n_dense*n_total)
mujoco.mju_dense2Band(band, dense, n_total, n_band, n_dense)
for i in range(4):
index = mujoco.mju_bandDiag(i, n_total, n_band, n_dense)
self.assertEqual(band[index], i+1)
dense2 = np.zeros((n_total, n_total))
flg_sym = 1
mujoco.mju_band2Dense(dense2, band, n_total, n_band, n_dense, flg_sym)
np.testing.assert_array_equal(dense, dense2)
vec = np.array([[2.0], [2.0], [3.0], [4.0]])
res = np.zeros_like(vec)
n_vec = 1
mujoco.mju_bandMulMatVec(res, band, vec,
n_total, n_band, n_dense, n_vec, flg_sym)
np.testing.assert_array_equal(res, dense @ vec)
diag_add = 0
diag_mul = 0
mujoco.mju_cholFactorBand(band, n_total, n_band, n_dense,
diag_add, diag_mul)
mujoco.mju_cholSolveBand(res, band, vec, n_total, n_band, n_dense)
np.testing.assert_almost_equal(res, np.linalg.solve(dense, vec))
def test_mju_box_qp(self):
n = 5
res = np.zeros(n)
r = np.zeros((n, n+7))
index = np.zeros(n, np.int32)
h = np.eye(n)
g = np.ones((n,))
lower = -np.ones((n,))
upper = np.ones((n,))
rank = mujoco.mju_boxQP(res, r, index, h, g, lower, upper)
self.assertGreater(rank, -1)
def test_mju_fill(self):
res = np.empty(3, np.float64)
mujoco.mju_fill(res, 1.5)
np.testing.assert_array_equal(res, np.full(3, 1.5))
def test_mju_eye(self):
eye4 = np.empty((4, 4), np.float64)
mujoco.mju_eye(eye4)
np.testing.assert_array_equal(eye4, np.eye(4))
def test_mju_symmetrize(self):
mat = np.linspace(0, 1, 16).reshape(4, 4)
res = np.empty((4, 4), np.float64)
mujoco.mju_symmetrize(res, mat)
np.testing.assert_array_equal(res, 0.5*(mat + mat.T))
def test_mju_clip(self):
self.assertEqual(mujoco.mju_clip(1.5, 1.0, 2.0), 1.5)
self.assertEqual(mujoco.mju_clip(1.5, 2.0, 3.0), 2.0)
self.assertEqual(mujoco.mju_clip(1.5, 0.0, 1.0), 1.0)
def test_mju_mul_vec_mat_vec(self):
vec1 = np.array([1., 2., 3.])
vec2 = np.array([3., 2., 1.])
mat = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
self.assertEqual(mujoco.mju_mulVecMatVec(vec1, mat, vec2), 204.)
@parameterized.product(flg_html=(False, True), flg_pad=(False, True))
def test_mj_printSchema(self, flg_html, flg_pad): # pylint: disable=invalid-name
# Make sure that mj_printSchema doesn't raise an exception
# (e.g. because the internal output buffer is too small)
self.assertIn('mujoco', mujoco.mj_printSchema(flg_html, flg_pad))
def test_pickle_mjdata(self):
mujoco.mj_step(self.model, self.data)
data2 = pickle.loads(pickle.dumps(self.data))
attr_to_compare = (
'time', 'qpos', 'qvel', 'qacc', 'xpos', 'mocap_pos',
'warning', 'energy'
)
self._assert_attributes_equal(data2, self.data, attr_to_compare)
for _ in range(10):
mujoco.mj_step(self.model, self.data)
mujoco.mj_step(self.model, data2)
self._assert_attributes_equal(data2, self.data, attr_to_compare)
def test_pickle_mjmodel(self):
model2 = pickle.loads(pickle.dumps(self.model))
attr_to_compare = (
'nq', 'nmat', 'body_pos', 'names',
)
self._assert_attributes_equal(model2, self.model, attr_to_compare)
def test_indexer_name_id(self):
xml = r"""
<mujoco>
<worldbody>
<geom name="mygeom" size="1" pos="0 0 1"/>
<geom size="2" pos="0 0 2"/>
<geom size="3" pos="0 0 3"/>
<geom name="myothergeom" size="4" pos="0 0 4"/>
<geom size="5" pos="0 0 5"/>
</worldbody>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
self.assertEqual(model.geom('mygeom').id, 0)
self.assertEqual(model.geom('myothergeom').id, 3)
self.assertEqual(model.geom(0).name, 'mygeom')
self.assertEqual(model.geom(1).name, '')
self.assertEqual(model.geom(2).name, '')
self.assertEqual(model.geom(3).name, 'myothergeom')
self.assertEqual(model.geom(4).name, '')
self.assertEqual(model.geom(0).size[0], 1)
self.assertEqual(model.geom(1).size[0], 2)
self.assertEqual(model.geom(2).size[0], 3)
self.assertEqual(model.geom(3).size[0], 4)
self.assertEqual(model.geom(4).size[0], 5)
data = mujoco.MjData(model)
mujoco.mj_forward(model, data)
self.assertEqual(data.geom('mygeom').id, 0)
self.assertEqual(data.geom('myothergeom').id, 3)
self.assertEqual(data.geom(0).name, 'mygeom')
self.assertEqual(data.geom(1).name, '')
self.assertEqual(data.geom(2).name, '')
self.assertEqual(data.geom(3).name, 'myothergeom')
self.assertEqual(data.geom(4).name, '')
self.assertEqual(data.geom(0).xpos[2], 1)
self.assertEqual(data.geom(1).xpos[2], 2)
self.assertEqual(data.geom(2).xpos[2], 3)
self.assertEqual(data.geom(3).xpos[2], 4)
self.assertEqual(data.geom(4).xpos[2], 5)
def _assert_attributes_equal(self, actual_obj, expected_obj, attr_to_compare):
for name in attr_to_compare:
actual_value = getattr(actual_obj, name)
expected_value = getattr(expected_obj, name)
try:
if isinstance(expected_value, np.ndarray):
np.testing.assert_array_equal(actual_value, expected_value)
else:
self.assertEqual(actual_value, expected_value)
except AssertionError as e:
self.fail("Attribute '{}' differs from expected value: {}".format(
name, str(e)))
def test_load_plugin(self):
mujoco.MjModel.from_xml_string(TEST_XML_PLUGIN)
if __name__ == '__main__':
absltest.main()
| mujoco-main | python/mujoco/bindings_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely minimal test of mujoco.viewer that just tries to import it."""
from absl.testing import absltest
from mujoco import viewer
class ViewerTest(absltest.TestCase):
def test_launch_function_exists(self):
self.assertIsNotNone(viewer.launch)
if __name__ == '__main__':
absltest.main()
| mujoco-main | python/mujoco/viewer_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MuJoCo Python rendering."""
from absl.testing import absltest
import mujoco
import numpy as np
@absltest.skipUnless(hasattr(mujoco, 'GLContext'),
'MuJoCo rendering is disabled')
class MuJoCoRenderTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.gl = mujoco.GLContext(640, 480)
self.gl.make_current()
def tearDown(self):
super().tearDown()
del self.gl
def test_can_render(self):
"""Test that the bindings can successfully render a simple image.
This test sets up a basic MuJoCo rendering context similar to the example in
https://mujoco.readthedocs.io/en/latest/programming#visualization
It calls `mjr_rectangle` rather than `mjr_render` so that we can assert an
exact rendered image without needing golden data. The purpose of this test
is to ensure that the bindings can correctly return pixels in Python, rather
than to test MuJoCo's rendering pipeline itself.
"""
self.model = mujoco.MjModel.from_xml_string('<mujoco><worldbody/></mujoco>')
self.data = mujoco.MjData(self.model)
scene = mujoco.MjvScene(self.model, maxgeom=0)
mujoco.mjv_updateScene(
self.model, self.data, mujoco.MjvOption(), mujoco.MjvPerturb(),
mujoco.MjvCamera(), mujoco.mjtCatBit.mjCAT_ALL, scene)
context = mujoco.MjrContext(self.model, mujoco.mjtFontScale.mjFONTSCALE_150)
mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_OFFSCREEN, context)
# MuJoCo's default render buffer size is 640x480.
full_rect = mujoco.MjrRect(0, 0, 640, 480)
mujoco.mjr_rectangle(full_rect, 0, 0, 0, 1)
blue_rect = mujoco.MjrRect(56, 67, 234, 123)
mujoco.mjr_rectangle(blue_rect, 0, 0, 1, 1)
expected_upside_down_image = np.zeros((480, 640, 3), dtype=np.uint8)
expected_upside_down_image[67:67+123, 56:56+234, 2] = 255
upside_down_image = np.empty((480, 640, 3), dtype=np.uint8)
mujoco.mjr_readPixels(upside_down_image, None, full_rect, context)
np.testing.assert_array_equal(upside_down_image, expected_upside_down_image)
# Check that mjr_readPixels can accept a flattened array.
upside_down_image[:] = 0
mujoco.mjr_readPixels(
np.reshape(upside_down_image, -1), None, full_rect, context)
np.testing.assert_array_equal(upside_down_image, expected_upside_down_image)
context.free()
def test_safe_to_free_context_twice(self):
self.model = mujoco.MjModel.from_xml_string('<mujoco><worldbody/></mujoco>')
self.data = mujoco.MjData(self.model)
scene = mujoco.MjvScene(self.model, maxgeom=0)
mujoco.mjv_updateScene(
self.model, self.data, mujoco.MjvOption(), None,
mujoco.MjvCamera(), mujoco.mjtCatBit.mjCAT_ALL, scene)
context = mujoco.MjrContext(self.model, mujoco.mjtFontScale.mjFONTSCALE_150)
mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_OFFSCREEN, context)
context.free()
context.free()
def test_mjrrect_repr(self):
rect = mujoco.MjrRect(1, 2, 3, 4)
rect_repr = repr(rect)
self.assertIn('MjrRect', rect_repr)
self.assertIn('left: 1', rect_repr)
if __name__ == '__main__':
absltest.main()
| mujoco-main | python/mujoco/render_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports GLContext for MuJoCo Python bindings."""
import ctypes
import ctypes.util
import os
import platform
# pylint: disable=g-import-not-at-top
_SYSTEM = platform.system()
_MUJOCO_GL = os.environ.get('MUJOCO_GL', '').lower().strip()
if _MUJOCO_GL not in ('disable', 'disabled', 'off', 'false', '0'):
_VALID_MUJOCO_GL = ('enable', 'enabled', 'on', 'true', '1' , 'glfw', '')
if _SYSTEM == 'Linux':
_VALID_MUJOCO_GL += ('glx', 'egl', 'osmesa')
elif _SYSTEM == 'Windows':
_VALID_MUJOCO_GL += ('wgl',)
elif _SYSTEM == 'Darwin':
_VALID_MUJOCO_GL += ('cgl',)
if _MUJOCO_GL not in _VALID_MUJOCO_GL:
raise RuntimeError(
f'invalid value for environment variable MUJOCO_GL: {_MUJOCO_GL}')
if _SYSTEM == 'Linux' and _MUJOCO_GL == 'osmesa':
from mujoco.osmesa import GLContext as _GLContext
GLContext = _GLContext
elif _SYSTEM == 'Linux' and _MUJOCO_GL == 'egl':
from mujoco.egl import GLContext as _GLContext
GLContext = _GLContext
elif _SYSTEM == 'Darwin':
from mujoco.cgl import GLContext as _GLContext
GLContext = _GLContext
else:
from mujoco.glfw import GLContext as _GLContext
GLContext = _GLContext
| mujoco-main | python/mujoco/gl_context.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bindings for Apple CGL."""
import ctypes
import enum
_CGL = ctypes.CDLL('/System/Library/OpenGL.framework/OpenGL')
CGLContextObj = ctypes.c_void_p
CGLPixelFormatObj = ctypes.c_void_p
GLint = ctypes.c_int
_CGLChoosePixelFormat = _CGL.CGLChoosePixelFormat
_CGLChoosePixelFormat.argtypes = (
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(CGLPixelFormatObj),
ctypes.POINTER(GLint),
)
_CGLCreateContext = _CGL.CGLCreateContext
_CGLCreateContext.argtypes = (
CGLPixelFormatObj,
ctypes.c_int,
CGLContextObj,
)
_CGLErrorString = _CGL.CGLErrorString
_CGLErrorString.restype = ctypes.c_char_p
_CGLErrorString.argtype = (ctypes.c_int,)
_CGLLockContext = _CGL.CGLLockContext
_CGLLockContext.argtypes = (CGLContextObj,)
_CGLReleaseContext = _CGL.CGLReleaseContext
_CGLReleaseContext.restype = None
_CGLReleaseContext.argtypes = (CGLContextObj,)
_CGLReleasePixelFormat = _CGL.CGLReleasePixelFormat
_CGLReleasePixelFormat.restype = None
_CGLReleasePixelFormat.argtypes = (CGLPixelFormatObj,)
_CGLSetCurrentContext = _CGL.CGLSetCurrentContext
_CGLSetCurrentContext.argtypes = (CGLContextObj,)
_CGLUnlockContext = _CGL.CGLUnlockContext
_CGLUnlockContext.argtypes = (CGLContextObj,)
# pylint: disable=invalid-name
class CGLOpenGLProfile(enum.IntEnum):
CGLOGLPVersion_Legacy = 0x1000 # renderer compatible with GL1.0
CGLOGLPVersion_3_2_Core = 0x3200 # renderer capable of GL3.2 or later
CGLOGLPVersion_GL3_Core = 0x3200 # renderer capable of GL3.2 or later
CGLOGLPVersion_GL4_Core = 0x4100 # renderer capable of GL4.1 or later
class CGLPixelFormatAttribute(enum.IntEnum):
"""CGLPixelFormatAttribute enum values."""
CGLPFAAllRenderers = 1 # choose from all available renderers
CGLPFATripleBuffer = 3 # choose a triple buffered pixel format
CGLPFADoubleBuffer = 5 # choose a double buffered pixel format
CGLPFAColorSize = 8 # number of color buffer bits
CGLPFAAlphaSize = 11 # number of alpha component bits
CGLPFADepthSize = 12 # number of depth buffer bits
CGLPFAStencilSize = 13 # number of stencil buffer bits
CGLPFAMinimumPolicy = 51 # never choose smaller buffers than requested
CGLPFAMaximumPolicy = 52 # choose largest buffers of type requested
CGLPFASampleBuffers = 55 # number of multi sample buffers
CGLPFASample = 56 # number of samples per multi sample buffer
CGLPFAColorFloat = 58 # color buffers store floating point pixels
CGLPFAMultisample = 59 # choose multisampling
CGLPFASupersample = 60 # choose supersampling
CGLPFASampleAlpha = 61 # request alpha filtering
CGLPFARendererID = 70 # request renderer by ID
CGLPFANoRecovery = 72 # disable all failure recovery systems
CGLPFAAccelerated = 73 # choose a hardware accelerated renderer
CGLPFAClosestPolicy = 74 # choose the closest color buffer to request
CGLPFABackingStore = 76 # back buffer contents are valid after swap
CGLPFABackingVolatile = 77 # back buffer contents are volatile after swap
CGLPFADisplayMask = 84 # mask limiting supported displays
CGLPFAAllowOfflineRenderers = 96 # show offline renderers in pixel formats
CGLPFAAcceleratedCompute = 97 # choose a hardware accelerated compute device
CGLPFAOpenGLProfile = 99 # specify an OpenGL Profile to use
CGLPFASupportsAutomaticGraphicsSwitching = 101 # responds to display changes
CGLPFAVirtualScreenCount = 128 # number of virtual screens in this format
# Note: the following attributes are deprecated in Core Profile
CGLPFAAuxBuffers = 7 # number of aux buffers
CGLPFAAccumSize = 14 # number of accum buffer bits
CGLPFAAuxDepthStencil = 57 # each aux buffer has its own depth stencil
CGLPFAStereo = 6
CGLPFAOffScreen = 53
CGLPFAWindow = 80
CGLPFACompliant = 83
CGLPFAPBuffer = 90
CGLPFARemotePBuffer = 91
CGLPFASingleRenderer = 71
CGLPFARobust = 75
CGLPFAMPSafe = 78
CGLPFAMultiScreen = 81
CGLPFAFullScreen = 54
# pylint: enable=invalid-name
class CGLError(RuntimeError): # pylint: disable=g-bad-exception-name
pass
def _make_checked(func):
def checked_func(*args):
err = func(*args)
if err:
raise CGLError(_CGLErrorString(err).decode())
return checked_func
CGLChoosePixelFormat = _make_checked(_CGLChoosePixelFormat)
CGLCreateContext = _make_checked(_CGLCreateContext)
CGLLockContext = _make_checked(_CGLLockContext)
CGLReleaseContext = _CGLReleaseContext
CGLReleasePixelFormat = _CGLReleasePixelFormat
CGLSetCurrentContext = _make_checked(_CGLSetCurrentContext)
CGLUnlockContext = _make_checked(_CGLUnlockContext)
| mujoco-main | python/mujoco/cgl/cgl.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An Apple CGL context for offscreen rendering on macOS."""
import atexit
import ctypes
import os
from mujoco.cgl import cgl
_ATTRIB = cgl.CGLPixelFormatAttribute
_PROFILE = cgl.CGLOpenGLProfile
class GLContext:
"""An EGL context for headless accelerated OpenGL rendering on GPU devices."""
def __init__(self, max_width, max_height):
del max_width, max_height # unused
attrib_values = (
_ATTRIB.CGLPFAOpenGLProfile, _PROFILE.CGLOGLPVersion_Legacy,
_ATTRIB.CGLPFAColorSize, 24,
_ATTRIB.CGLPFAAlphaSize, 8,
_ATTRIB.CGLPFADepthSize, 24,
_ATTRIB.CGLPFAStencilSize, 8,
_ATTRIB.CGLPFAMultisample,
_ATTRIB.CGLPFASampleBuffers, 1,
_ATTRIB.CGLPFASample, 4,
_ATTRIB.CGLPFAAccelerated,
0,
)
attribs = (ctypes.c_int * len(attrib_values))(*attrib_values)
self._pix = cgl.CGLPixelFormatObj()
npix = cgl.GLint()
cgl.CGLChoosePixelFormat(
attribs, ctypes.byref(self._pix), ctypes.byref(npix)
)
self._context = cgl.CGLContextObj()
cgl.CGLCreateContext(self._pix, 0, ctypes.byref(self._context))
def make_current(self):
cgl.CGLSetCurrentContext(self._context)
cgl.CGLLockContext(self._context)
def free(self):
"""Frees resources associated with this context."""
if self._context:
cgl.CGLUnlockContext(self._context)
cgl.CGLSetCurrentContext(None)
cgl.CGLReleaseContext(self._context)
self._context = None
if self._pix:
cgl.CGLReleasePixelFormat(self._pix)
self._context = None
def __del__(self):
self.free()
| mujoco-main | python/mujoco/cgl/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code generator for function_traits.h."""
import keyword
from typing import Mapping, Sequence
from absl import app
from introspect import ast_nodes
from introspect import functions
FUNCTIONS: Mapping[str, ast_nodes.FunctionDecl] = functions.FUNCTIONS
def _sanitize_keyword(s: str) -> str:
if keyword.iskeyword(s):
return s + '_'
return s
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
struct_decls = []
for func in FUNCTIONS.values():
# Skip mju_error_{i,s} and mju_warning_{i,s} as these are not
# supported in the Python bindings, and Introspect currently
# doesn't support variadic functions.
if func.name.startswith('mju_error') or func.name == 'mju_warning':
continue
# Modify some parameter types.
parameters = []
modified = False
for p in func.parameters:
# Expose array parameters as pointer-to-arrays so that we can determine
# array extents in C++ templates.
if isinstance(p.type, ast_nodes.ArrayType):
parameters.append(ast_nodes.FunctionParameterDecl(
name=p.name, type=ast_nodes.PointerType(
ast_nodes.ArrayType(
inner_type=p.type.inner_type, extents=p.type.extents))))
modified = True
else:
parameters.append(p)
if modified:
func = ast_nodes.FunctionDecl(
name=func.name, return_type=func.return_type,
parameters=parameters, doc=func.doc)
getfunc = f'*reinterpret_cast<type*>(&::{func.name})'
else:
getfunc = f'::{func.name}'
param_names = ', '.join(
f'"{_sanitize_keyword(p.name)}"' for p in parameters
)
struct_decls.append(f"""
struct {func.name} {{
static constexpr char name[] = "{func.name}";
static constexpr char doc[] = "{func.doc}";
using type = {func.decltype};
static constexpr auto param_names = std::make_tuple({param_names});
MUJOCO_ALWAYS_INLINE static type& GetFunc() {{
return {getfunc};
}}
}};
""".strip())
all_structs = '\n\n'.join(struct_decls)
print(f"""
// Copyright 2022 DeepMind Technologies Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MUJOCO_PYTHON_CODEGEN_FUNCTION_TRAITS_H_
#define MUJOCO_PYTHON_CODEGEN_FUNCTION_TRAITS_H_
#include <tuple>
#include <mujoco/mujoco.h>
#include "util/crossplatform.h"
namespace mujoco::python_traits {{
{all_structs}
}} // namespace mujoco::python_traits
#endif // MUJOCO_PYTHON_CODEGEN_FUNCTION_TRAITS_H_
""".lstrip())
if __name__ == '__main__':
app.run(main)
| mujoco-main | python/mujoco/codegen/generate_function_traits.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code generator for function_traits.h."""
from typing import Mapping, Sequence
from absl import app
from introspect import ast_nodes
from introspect import enums
ENUMS: Mapping[str, ast_nodes.EnumDecl] = enums.ENUMS
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
struct_decls = []
for enum in ENUMS.values():
value_decls = []
for k in enum.values:
value_decls.append(f'std::make_pair("{k}", ::{enum.name}::{k})')
if len(value_decls) < 2:
value_decls = ''.join(value_decls)
else:
value_decls = '\n ' + ',\n '.join(value_decls)
struct_decls.append(f"""
struct {enum.name} {{
static constexpr char name[] = "{enum.name}";
using type = ::{enum.name};
static constexpr auto values = std::array{{{value_decls}}};
}};
""".strip())
all_structs = '\n\n'.join(struct_decls)
all_enum_inits = '\n ' + '{},\n '.join(ENUMS.keys()) + '{}'
print(f"""
// Copyright 2022 DeepMind Technologies Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MUJOCO_PYTHON_CODEGEN_ENUM_TRAITS_H_
#define MUJOCO_PYTHON_CODEGEN_ENUM_TRAITS_H_
#include <array>
#include <tuple>
#include <utility>
#include <mujoco/mujoco.h>
namespace mujoco::python_traits {{
{all_structs}
static constexpr auto kAllEnums = std::make_tuple({all_enum_inits});
}} // namespace mujoco::python_traits
#endif // MUJOCO_PYTHON_CODEGEN_ENUM_TRAITS_H_
""".lstrip())
if __name__ == '__main__':
app.run(main)
| mujoco-main | python/mujoco/codegen/generate_enum_traits.py |
# Copyright 2018 The dm_control Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An EGL context for headless accelerated OpenGL rendering on GPU devices."""
import atexit
import ctypes
import os
PYOPENGL_PLATFORM = os.environ.get('PYOPENGL_PLATFORM')
if not PYOPENGL_PLATFORM:
os.environ['PYOPENGL_PLATFORM'] = 'egl'
elif PYOPENGL_PLATFORM.lower() != 'egl':
raise ImportError(
'Cannot use EGL rendering platform. '
'The PYOPENGL_PLATFORM environment variable is set to {!r} '
'(should be either unset or \'egl\').')
from mujoco.egl import egl_ext as EGL
from OpenGL import error
def create_initialized_egl_device_display():
"""Creates an initialized EGL display directly on a device."""
all_devices = EGL.eglQueryDevicesEXT()
selected_device = os.environ.get('MUJOCO_EGL_DEVICE_ID', None)
if selected_device is None:
candidates = all_devices
else:
device_idx = int(selected_device)
if not 0 <= device_idx < len(all_devices):
raise RuntimeError(
f'The MUJOCO_EGL_DEVICE_ID environment variable must be an integer '
f'between 0 and {len(all_devices)-1} (inclusive), got {device_idx}.')
candidates = all_devices[device_idx:device_idx + 1]
for device in candidates:
display = EGL.eglGetPlatformDisplayEXT(
EGL.EGL_PLATFORM_DEVICE_EXT, device, None)
if display != EGL.EGL_NO_DISPLAY and EGL.eglGetError() == EGL.EGL_SUCCESS:
# `eglInitialize` may or may not raise an exception on failure depending
# on how PyOpenGL is configured. We therefore catch a `GLError` and also
# manually check the output of `eglGetError()` here.
try:
initialized = EGL.eglInitialize(display, None, None)
except error.GLError:
pass
else:
if initialized == EGL.EGL_TRUE and EGL.eglGetError() == EGL.EGL_SUCCESS:
return display
return EGL.EGL_NO_DISPLAY
EGL_DISPLAY = create_initialized_egl_device_display()
if EGL_DISPLAY == EGL.EGL_NO_DISPLAY:
raise ImportError(
'Cannot initialize a EGL device display. This likely means that your EGL '
'driver does not support the PLATFORM_DEVICE extension, which is '
'required for creating a headless rendering context.')
atexit.register(EGL.eglTerminate, EGL_DISPLAY)
EGL_ATTRIBUTES = (
EGL.EGL_RED_SIZE, 8,
EGL.EGL_GREEN_SIZE, 8,
EGL.EGL_BLUE_SIZE, 8,
EGL.EGL_ALPHA_SIZE, 8,
EGL.EGL_DEPTH_SIZE, 24,
EGL.EGL_STENCIL_SIZE, 8,
EGL.EGL_COLOR_BUFFER_TYPE, EGL.EGL_RGB_BUFFER,
EGL.EGL_SURFACE_TYPE, EGL.EGL_PBUFFER_BIT,
EGL.EGL_RENDERABLE_TYPE, EGL.EGL_OPENGL_BIT,
EGL.EGL_NONE
)
class GLContext:
"""An EGL context for headless accelerated OpenGL rendering on GPU devices."""
def __init__(self, max_width, max_height):
del max_width, max_height # unused
num_configs = ctypes.c_long()
config_size = 1
config = EGL.EGLConfig()
EGL.eglReleaseThread()
EGL.eglChooseConfig(
EGL_DISPLAY,
EGL_ATTRIBUTES,
ctypes.byref(config),
config_size,
num_configs)
if num_configs.value < 1:
raise RuntimeError(
'EGL failed to find a framebuffer configuration that matches the '
'desired attributes: {}'.format(EGL_ATTRIBUTES))
EGL.eglBindAPI(EGL.EGL_OPENGL_API)
self._context = EGL.eglCreateContext(
EGL_DISPLAY, config, EGL.EGL_NO_CONTEXT, None)
if not self._context:
raise RuntimeError('Cannot create an EGL context.')
def make_current(self):
if not EGL.eglMakeCurrent(
EGL_DISPLAY, EGL.EGL_NO_SURFACE, EGL.EGL_NO_SURFACE, self._context):
raise RuntimeError('Failed to make the EGL context current.')
def free(self):
"""Frees resources associated with this context."""
if self._context:
current_context = EGL.eglGetCurrentContext()
if current_context and self._context.address == current_context.address:
EGL.eglMakeCurrent(EGL_DISPLAY, EGL.EGL_NO_SURFACE,
EGL.EGL_NO_SURFACE, EGL.EGL_NO_CONTEXT)
EGL.eglDestroyContext(EGL_DISPLAY, self._context)
EGL.eglReleaseThread()
self._context = None
def __del__(self):
self.free()
| mujoco-main | python/mujoco/egl/__init__.py |
# Copyright 2018 The dm_control Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extends OpenGL.EGL with definitions necessary for headless rendering."""
import ctypes
from OpenGL.platform import ctypesloader # pylint: disable=g-bad-import-order
try:
# Nvidia driver seems to need libOpenGL.so (as opposed to libGL.so)
# for multithreading to work properly. We load this in before everything else.
ctypesloader.loadLibrary(ctypes.cdll, 'OpenGL', mode=ctypes.RTLD_GLOBAL)
except OSError:
pass
# pylint: disable=g-import-not-at-top
from OpenGL import EGL
from OpenGL import error
# From the EGL_EXT_device_enumeration extension.
PFNEGLQUERYDEVICESEXTPROC = ctypes.CFUNCTYPE(
EGL.EGLBoolean,
EGL.EGLint,
ctypes.POINTER(EGL.EGLDeviceEXT),
ctypes.POINTER(EGL.EGLint),
)
try:
_eglQueryDevicesEXT = PFNEGLQUERYDEVICESEXTPROC( # pylint: disable=invalid-name
EGL.eglGetProcAddress('eglQueryDevicesEXT'))
except TypeError as e:
raise ImportError('eglQueryDevicesEXT is not available.') from e
# From the EGL_EXT_platform_device extension.
EGL_PLATFORM_DEVICE_EXT = 0x313F
PFNEGLGETPLATFORMDISPLAYEXTPROC = ctypes.CFUNCTYPE(
EGL.EGLDisplay, EGL.EGLenum, ctypes.c_void_p, ctypes.POINTER(EGL.EGLint))
try:
eglGetPlatformDisplayEXT = PFNEGLGETPLATFORMDISPLAYEXTPROC( # pylint: disable=invalid-name
EGL.eglGetProcAddress('eglGetPlatformDisplayEXT'))
except TypeError as e:
raise ImportError('eglGetPlatformDisplayEXT is not available.') from e
# Wrap raw _eglQueryDevicesEXT function into something more Pythonic.
def eglQueryDevicesEXT(max_devices=10): # pylint: disable=invalid-name
devices = (EGL.EGLDeviceEXT * max_devices)()
num_devices = EGL.EGLint()
success = _eglQueryDevicesEXT(max_devices, devices, num_devices)
if success == EGL.EGL_TRUE:
return [devices[i] for i in range(num_devices.value)]
else:
raise error.GLError(err=EGL.eglGetError(),
baseOperation=eglQueryDevicesEXT,
result=success)
# Expose everything from upstream so that
# we can use this as a drop-in replacement for OpenGL.EGL.
# pylint: disable=wildcard-import,g-bad-import-order
from OpenGL.EGL import *
| mujoco-main | python/mujoco/egl/egl_ext.py |
# Copyright 2018 The dm_control Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An OSMesa context for software-based OpenGL rendering."""
import os
PYOPENGL_PLATFORM = os.environ.get('PYOPENGL_PLATFORM')
if not PYOPENGL_PLATFORM:
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
elif PYOPENGL_PLATFORM.lower() != 'osmesa':
raise ImportError(
'Cannot use OSMesa rendering platform. '
'The PYOPENGL_PLATFORM environment variable is set to {!r} '
'(should be either unset or \'osmesa\').'
.format(PYOPENGL_PLATFORM))
# pylint: disable=g-import-not-at-top
from OpenGL import GL
from OpenGL import osmesa
from OpenGL.GL import arrays
_DEPTH_BITS = 24
_STENCIL_BITS = 8
_ACCUM_BITS = 0
class GLContext:
"""An OSMesa context for software-based OpenGL rendering."""
def __init__(self, max_width, max_height):
"""Initializes this OSMesa context."""
self._context = osmesa.OSMesaCreateContextExt(
osmesa.OSMESA_RGBA,
_DEPTH_BITS,
_STENCIL_BITS,
_ACCUM_BITS,
None, # sharelist
)
if not self._context:
raise RuntimeError('Failed to create OSMesa GL context.')
self._height = max_height
self._width = max_width
# Allocate a buffer to render into.
self._buffer = arrays.GLfloatArray.zeros((max_height, max_width, 4))
def make_current(self):
if self._context:
success = osmesa.OSMesaMakeCurrent(
self._context,
self._buffer,
GL.GL_FLOAT,
self._width,
self._height)
if not success:
raise RuntimeError('Failed to make OSMesa context current.')
def free(self):
"""Frees resources associated with this context."""
if self._context and self._context == osmesa.OSMesaGetCurrentContext():
osmesa.OSMesaMakeCurrent(None, None, GL.GL_FLOAT, 0, 0)
osmesa.OSMesaDestroyContext(self._context)
self._buffer = None
self._context = None
def __del__(self):
self.free()
| mujoco-main | python/mujoco/osmesa/__init__.py |
#!/usr/bin/env python
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interpreter trampoline for macOS to support non-block Cocoa GUI.
This script executes a native binary that runs the CPython interpreter entry
point in a separate thread, thus leaving the macOS main thread free for Cocoa
GUI calls without blocking the user's Python script. In other words, Python's
idea of the "main thread" is different from the thread that holds the
com.apple.main-thread DispatchQueue.
"""
import ctypes
import importlib.util
import os
import platform
import sys
if platform.system() != 'Darwin':
raise RuntimeError('This script only works on macOS')
_NSGetExecutablePath = getattr(ctypes.CDLL(None), '_NSGetExecutablePath')
def get_executable_path():
c_path_size = ctypes.c_int32(0)
_NSGetExecutablePath(None, ctypes.byref(c_path_size))
c_path = (ctypes.c_char * c_path_size.value)()
_NSGetExecutablePath(ctypes.byref(c_path), ctypes.byref(c_path_size))
return c_path.value.decode()
def main(argv):
module_dir = os.path.dirname(importlib.util.find_spec('mujoco').origin)
os.environ['MJPYTHON_BIN'] = os.path.join(
module_dir, 'MuJoCo (mjpython).app/Contents/MacOS/mjpython')
# Conda doesn't create a separate shared library for Python.
# We instead use the Python binary itself, which can be dlopened just as well.
os.environ['MJPYTHON_LIBPYTHON'] = get_executable_path()
# argv[0] is currently the path to this script.
# Replace it with sys.executable to preserve e.g. virtualenv path.
argv[0] = sys.executable
os.execve(os.environ['MJPYTHON_BIN'], argv, os.environ)
if __name__ == '__main__':
main(sys.argv)
| mujoco-main | python/mujoco/mjpython/mjpython.py |
# Copyright 2017 The dm_control Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An OpenGL context created via GLFW."""
import glfw
class GLContext:
"""An OpenGL context created via GLFW."""
def __init__(self, max_width, max_height):
glfw.init()
glfw.window_hint(glfw.VISIBLE, 0)
self._context = glfw.create_window(width=max_width, height=max_height,
title='Invisible window', monitor=None,
share=None)
def make_current(self):
glfw.make_context_current(self._context)
def free(self):
if self._context:
if glfw.get_current_context() == self._context:
glfw.make_context_current(None)
glfw.destroy_window(self._context)
self._context = None
def __del__(self):
self.free()
| mujoco-main | python/mujoco/glfw/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the Sphinx documentation builder."""
import os
import sys
# -- Path setup --------------------------------------------------------------
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
from sphinxcontrib import katex # pylint: disable=g-import-not-at-top
from sphinxcontrib import youtube # pylint: disable=g-import-not-at-top,unused-import
# -- Project information -----------------------------------------------------
project = 'MuJoCo'
copyright = 'DeepMind Technologies Limited' # pylint: disable=redefined-builtin
author = 'Google DeepMind'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.katex',
'sphinxcontrib.youtube',
'sphinx_copybutton',
'sphinx_favicon',
'sphinx_reredirects',
'sphinx_toolbox.collapse',
'mujoco_include',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'includes/*',
'APIreference/functions.rst',
'APIreference/functions_override.rst',
'XMLschema.rst',
]
redirects = {
# index.rst just contains the table of contents definition.
'index': 'overview.html',
}
rst_prolog = """
.. include:: /includes/macros.rst
.. include:: /includes/roles.rst
.. include:: <isonum.txt>
"""
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
html_title = 'MuJoCo Documentation'
html_logo = 'images/banner.svg'
SHARED_CSS_VARIABLES = {
'admonition-font-size': '1rem',
'admonition-title-font-size': '1rem',
'sidebar-item-font-size': '115%',
}
# font-stack--monospace used in code blocks, Inconsolata fits in 100 chars.
html_theme_options = {
'light_css_variables': {
'font-stack--monospace': 'Inconsolata,Consolas,ui-monospace,monospace',
'at-color': '#bc103e',
'at-val-color': '#bc103e',
'body-color': '#14234b',
'color-highlight-on-target': '#e5e8ed',
'primary-header-color': '#0053d6',
'row-odd-background-color': '#f0f3f7',
'rst-content-a-color': '#2980b9',
'secondary-header-color': '#123693',
'wy-menu-vertical-background-color': '#0053d6',
'wy-menu-vertical-color': 'white',
'wy-nav-side-background-color': '#0053d6',
},
'dark_css_variables': {
'at-color': '#ff95a6',
'at-val-color': '#ff95a6',
'body-color': '#14234b',
'color-admonition-background': '#1e1e21',
'color-highlight-on-target': '#3d4045',
'primary-header-color': '#a8caff',
'row-odd-background-color': '#222326',
'rst-content-a-color': '#2980b9',
'secondary-header-color': '#458dff',
'wy-menu-vertical-background-color': '#0053d6',
'wy-menu-vertical-color': 'white',
'wy-nav-side-background-color': '#0053d6',
},
}
for v in html_theme_options.values():
if isinstance(v, dict):
v.update(SHARED_CSS_VARIABLES)
pygments_style = 'default'
pygments_dark_style = 'monokai'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [
'_static',
'css',
]
html_css_files = [
'theme_overrides.css',
]
favicons = [
{
'sizes': '16x16',
'href': 'favicons/favicon-16x16.png',
},
{
'sizes': '32x32',
'href': 'favicons/favicon-32x32.png',
},
{
'rel': 'apple-touch-icon',
'sizes': '180x180',
'href': 'favicons/favicon-180x180.png',
},
{
'sizes': '180x180',
'href': 'favicons/favicon-180x180.png',
},
{
'sizes': '192x192',
'href': 'favicons/favicon-192x192.png',
},
]
# -- Options for katex ------------------------------------------------------
# See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html
latex_macros = r"""
\def \d #1{\operatorname{#1}}
"""
# Translate LaTeX macros to KaTeX and add to options for HTML builder
katex_macros = katex.latex_defs_to_katex_macros(latex_macros)
katex_options = 'macros: {' + katex_macros + '}'
# Add LaTeX macros for LATEX builder
latex_elements = {'preamble': latex_macros}
| mujoco-main | doc/conf.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sphinx extension for the mujoco-include directive."""
import header_reader
from sphinx.application import Sphinx
from sphinx.directives.code import LiteralInclude
from sphinx.util.console import red
_FILENAME = 'includes/references.h'
_ERROR_LINE = 16
class MujocoInclude(LiteralInclude):
"""Extension to LiteralInclude directive for MuJoCo."""
def run(self):
mujoco_api = self.env.app.config['mujoco_include_header']
token = self.arguments[0]
source = mujoco_api.get(token)
start_line = _ERROR_LINE
end_line = _ERROR_LINE
if source is None:
print(red(f'Warning: C reference \'{token}\' not found.'))
else:
start_line = source.start
end_line = source.end
# Config arguments and options for LiteralInclude.
self.arguments[0] = f'../{_FILENAME}'
self.options['language'] = 'C'
self.options['lines'] = f'{start_line}-{end_line}'
return list(LiteralInclude.run(self))
def setup(app: Sphinx) -> None:
api = {}
with open(_FILENAME, 'r') as file:
api = header_reader.read(file.readlines())
app.add_config_value('mujoco_include_header', api, '')
app.add_directive('mujoco-include', MujocoInclude)
| mujoco-main | doc/ext/mujoco_include.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MuJoCo API header reader."""
from absl.testing import absltest
from absl.testing import parameterized
import header_reader
_EXAMPLE = """
//------- My favorite section --------
// My function
MJAPI void mj_function(int a, int b);
// My other function
// This one has multiple lines
MJAPI const char* mj_other_function(int a, int b,
const char* a);
typedef enum mjEnum_ {
mjVALUE1 // Some value
mjVALUE2 // Some other value
} mjEnum;
struct mjStruct_ {
int value1 // Some value
int value2 // Some other value
};
typedef struct mjStruct_ mjStruct;
// My favorite struct
typedef struct mjStruct2_ {
int value1 // Another value
int value2 // More more value
} mjStruct2;
MJAPI void mj_no_doc(int a, void* b);
//------------ MJAPI FUNCTIONS --------------
void mj_stripped(int a, int b,
int c);
"""
_API = header_reader.read([f'{line}\n' for line in _EXAMPLE.split('\n')])
class MuJoCoApiGeneratorTest(parameterized.TestCase):
def test_enums_line_numbers(self):
self.assertEqual(_API['mjEnum'].start, 12)
self.assertEqual(_API['mjEnum'].end, 15)
def test_structs_line_numbers(self):
self.assertEqual(_API['mjStruct'].start, 17)
self.assertEqual(_API['mjStruct'].end, 21)
def test_structs2_line_numbers(self):
self.assertEqual(_API['mjStruct2'].start, 23)
self.assertEqual(_API['mjStruct2'].end, 26)
def test_function_line_numbers(self):
self.assertEqual(_API['mj_function'].start, 5)
self.assertEqual(_API['mj_function'].end, 5)
def test_function_code(self):
self.assertEqual(_API['mj_function'].code,
'void mj_function(int a, int b);\n')
def test_function_section(self):
self.assertEqual(_API['mj_function'].section, 'My favorite section')
def test_function_doc(self):
self.assertEqual(_API['mj_function'].doc, 'My function\n')
def test_multi_line_doc(self):
self.assertEqual(_API['mj_other_function'].doc,
'My other function\nThis one has multiple lines\n')
def test_multi_line_function(self):
self.assertEqual(_API['mj_other_function'].start, 9)
self.assertEqual(_API['mj_other_function'].end, 10)
def test_no_doc_function(self):
self.assertEqual(_API['mj_no_doc'].start, 28)
self.assertEqual(_API['mj_no_doc'].end, 28)
def test_stripped_functions(self):
self.assertEqual(_API['mj_stripped'].start, 32)
if __name__ == '__main__':
absltest.main()
| mujoco-main | doc/ext/header_reader_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads MuJoCo header files and generates a doc-friendly data structure."""
import dataclasses
import re
from typing import Optional, List, Dict
# Precompiled regex for matching a section.
_SECTION_REGEX = re.compile(r'^//-+ (?P<section>.+) -+$')
# Precompiled regex for matching a C function definition.
_FUNCTION_REGEX = re.compile(r'(?P<token>mj\w+)\s*\(')
# Precompiled regex for matching a C function ending.
_FUNCTION_ENDING_REGEX = re.compile(r'\);\s$')
# Precompiled regex for matching a C struct ending.
_STRUCT_END_REGEX_1 = re.compile(r'^typedef\s+struct\s+\w+\s+(?P<token>mj\w+);')
# Precompiled regex for matching a C struct ending (version 2).
_STRUCT_END_REGEX_2 = re.compile(r'^}\s+(?P<token>mj\w+);')
# Precompiled regex for matching a C enum ending.
_ENUM_END_REGEX = re.compile(r'^}\s+(?P<token>mj\w+);')
@dataclasses.dataclass
class ApiDefinition:
"""Defines a C reference parsed from a C header file."""
token: str
c_type: str
code: str
start: int
end: int
section: str
doc: str
class ApiState:
"""Internal state of the reader used for parsing header files."""
def __init__(self):
self.token = ''
self.section = ''
self.code = ''
self.doc = ''
self._state = None
self._start = 0
self._end = 0
@property
def state(self):
return self._state
def export_definition(self):
return ApiDefinition(self.token, self._state, self.code, self._start,
self._end, self.section, self.doc)
def start(self, state):
self._state = state
self._start = self._end
def iterate(self):
self._end += 1
def end(self):
self.token = ''
self._state = None
self.code = ''
self.doc = ''
def read(lines: List[str]) -> Dict[str, ApiDefinition]:
"""Reads header lines and returns a maps of ApiDefinition's."""
api = {}
stripped_functions = False
s = ApiState()
for line in lines:
s.iterate()
section = _find_section(line)
if section is not None:
if 'MJAPI FUNCTIONS' in section:
# Stripped functions do not begin with MJAPI, and must be under the
# predefiend section 'MJAPI FUNCTIONS'. This is because the docs don't
# include this prefix, and so we need to read such functions from the
# reference header.
stripped_functions = True
s.section = section
s.end()
continue
if s.state == 'DOC':
token = _find_function_start(line, stripped_functions)
if token is not None:
if stripped_functions:
s.code = f'{s.code}{line}'
else:
s.code = f'{s.code}{line[6:]}'
s.token = token
s.start('FUNCTION')
if _is_function_end(line):
api[token] = s.export_definition()
s.end()
continue
elif line.startswith('//'):
s.doc = f'{s.doc}{line[3:]}'
else:
s.end()
if s.state == 'FUNCTION':
if stripped_functions:
s.code = f'{s.code}{line}'
else:
s.code = f'{s.code}{line[6:]}'
if _is_function_end(line):
api[s.token] = s.export_definition()
s.end()
elif s.state == 'ENUM':
match = _ENUM_END_REGEX.search(line)
if match is not None:
s.code = f'{s.code}{line}'
s.token = match.group('token')
api[s.token] = s.export_definition()
s.end()
else:
s.code = f'{s.code}{line}'
elif s.state == 'STRUCT':
match = _STRUCT_END_REGEX_1.search(line)
if match is None:
match = _STRUCT_END_REGEX_2.search(line)
if match is not None:
s.code = f'{s.code}{line}'
s.token = match.group('token')
api[s.token] = s.export_definition()
s.end()
else:
s.code = f'{s.code}{line}'
elif s.state is None:
if line.startswith('typedef enum'):
s.start('ENUM')
s.code = f'{s.code}{line}'
if line.startswith('struct') or line.startswith('typedef struct'):
s.start('STRUCT')
s.code = f'{s.code}{line}'
if line.startswith('//'):
s.doc = f'{s.doc}{line[3:]}'
s.start('DOC')
token = _find_function_start(line, stripped_functions)
if token is not None:
if stripped_functions:
s.code = f'{s.code}{line}'
else:
s.code = f'{s.code}{line[6:]}'
s.token = token
s.start('FUNCTION')
if _is_function_end(line):
api[token] = s.export_definition()
s.end()
return api
def _find_section(line) -> Optional[str]:
match = _SECTION_REGEX.search(line)
if match is not None:
return match.group('section').strip()
return None
def _find_function_start(line, stripped) -> Optional[str]:
if (line.startswith('MJAPI') and 'extern' not in line) or stripped:
match = _FUNCTION_REGEX.search(line)
if match is not None:
return match.group('token')
return None
def _is_function_end(line):
match = _FUNCTION_ENDING_REGEX.search(line)
return match is not None
| mujoco-main | doc/ext/header_reader.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the sorting network dataset."""
import os
from typing import Sequence, Tuple
from absl import app
from absl import flags
from absl import logging
import jraph
import networkx as nx
import numpy as np
import tqdm
from script_postcompute_eigedecomp import precalc_and_append
np.random.seed(42)
_OUT_PATH = flags.DEFINE_string('out_path', '~/distance',
'The path to write datasets to.')
_SHARD_SIZE = flags.DEFINE_integer(
'shard_size', 10_000, 'The number of times to store in each file.')
_WEIGHTED = flags.DEFINE_bool('weighted', False,
'If the dataset should contain weighted graphs.')
_CONNECTED = flags.DEFINE_bool(
'connected', True,
'If the dataset should contain only (weakly) connected graphs.')
_ACYCLIC = flags.DEFINE_bool(
'acyclic', False,
'If the dataset should contain only acyclic graphs.')
_TARGET = flags.DEFINE_enum(
'target', 'directed', ['directed', 'undirected', 'signed'],
'How the distance should be calculated.')
_N_TRAIN = flags.DEFINE_list(
'n_train', [16, 18], 'Range `(min, max+1)` of number of nodes for train')
_N_VALID = flags.DEFINE_list(
'n_valid', [18, 20], 'Range `(min, max+1)` of number of nodes for validation')
_N_TEST = flags.DEFINE_list(
'n_test', [20, 28], 'Range `(min, max+1)` of number of nodes for test')
maglap_configs = [
dict(k=16, k_excl=0, q=0.1,
q_absolute=False, norm_comps_sep=False,
sign_rotate=True, use_symmetric_norm=True),
dict(k=16, k_excl=0, q=0,
q_absolute=False, norm_comps_sep=False,
sign_rotate=True, use_symmetric_norm=True)
]
AVERAGE_DEGREE = {
'acyclic': (1, 1.5, 2, 2.5, 3),
'regular': (1, 1.5, 2)
}
def _random_er_graph(nb_nodes, p=0.5, directed=True, acyclic=False,
weighted=False, low=0.1, high=1.0):
"""Random Erdos-Renyi graph."""
mat = np.random.binomial(1, p, size=(nb_nodes, nb_nodes))
if not directed:
mat *= np.transpose(mat)
elif acyclic:
mat = np.triu(mat, k=1)
p = np.random.permutation(nb_nodes) # To allow nontrivial solutions
mat = mat[p, :][:, p]
if weighted:
weights = np.random.uniform(low=low, high=high, size=(nb_nodes, nb_nodes))
if not directed:
weights *= np.transpose(weights)
weights = np.sqrt(weights + 1e-3) # Add epsilon to protect underflow
mat = mat.astype(float) * weights
return mat
def generate_sample(**random_er_graph_kwargs) -> Tuple[jraph.GraphsTuple,
np.ndarray]:
adj = _random_er_graph(**random_er_graph_kwargs)
G = nx.from_numpy_array(adj, create_using=nx.DiGraph)
if _CONNECTED.value:
G = G.subgraph(max(nx.weakly_connected_components(G), key=len))
adj = nx.to_numpy_array(G)
if _TARGET.value == 'undirected':
G = G.to_undirected()
elif _TARGET.value == 'signed':
adj_signed = adj[:]
adj_signed -= ~adj_signed.astype(bool) & adj_signed.T
G = nx.from_numpy_array(adj_signed, create_using=nx.DiGraph)
distances = nx.floyd_warshall_numpy(G)
senders, receivers = np.where(adj)
graph = jraph.GraphsTuple(
n_node=np.array([adj.shape[0]], dtype=np.int32),
n_edge=np.array(senders.shape, dtype=np.int32),
senders=senders,
receivers=receivers,
nodes=np.array([], dtype=np.float32),
edges=np.array([], dtype=np.float32),
globals=np.array([], dtype=np.float32))
graph = precalc_and_append(graph, maglap_configs)
return graph, distances
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
assert len(_N_TRAIN.value) == 2, '`n_train` must be of length 2'
assert len(_N_VALID.value) == 2, '`n_valid` must be of length 2'
assert len(_N_TEST.value) == 2, '`n_test` must be of length 2'
n_train = tuple(int(v) for v in _N_TRAIN.value)
n_valid = tuple(int(v) for v in _N_VALID.value)
n_test = tuple(int(v) for v in _N_TEST.value)
# Instructions with lengths to put in which dataset and how many sorting
# networks shall be generated (excluding random sampling over topo. orders)
splits = [
# (split, nb_nodes_list, n_generation_trials)
('train', list(range(*n_train)), 400_000),
('valid', list(range(*n_valid)), 5_000),
('test', list(range(*n_test)), 20_000)
]
dataset_name = f'{n_train[0]}to{n_train[1] - 1}_{n_valid[0]}to{n_valid[1] - 1}_{n_test[0]}to{n_test[1] - 1}'
if _WEIGHTED.value or _CONNECTED.value or _ACYCLIC.value:
dataset_name += '_'
if _WEIGHTED.value:
dataset_name += 'w'
if _CONNECTED.value:
dataset_name += 'c'
if _ACYCLIC.value:
dataset_name += 'a'
if _TARGET.value == 'undirected':
dataset_name += '_u'
elif _TARGET.value == 'signed':
dataset_name += '_s'
base_path = os.path.join(_OUT_PATH.value, dataset_name)
os.makedirs(base_path, exist_ok=True)
average_degree = (
AVERAGE_DEGREE['acyclic'] if _ACYCLIC.value else AVERAGE_DEGREE['regular'])
id_ = 0
for split, nb_nodes_list, n_generation_trials in splits:
file_path = os.path.join(base_path, split)
os.makedirs(file_path, exist_ok=True)
sample_count = 0
buffer = []
start_id = id_
for trial in tqdm.tqdm(range(n_generation_trials), desc=split):
nb_nodes = np.random.choice(nb_nodes_list, 1).item()
deg = np.random.choice(average_degree, 1).item()
p = deg / (nb_nodes - 1)
graph, distances = generate_sample(
nb_nodes=nb_nodes, p=p, directed=True, acyclic=_ACYCLIC.value,
weighted=_WEIGHTED.value)
sample_count += 1
buffer.append((
graph, distances.flatten(), distances.flatten(), np.array(id_)))
id_ += 1
if len(buffer) >= _SHARD_SIZE.value or trial == n_generation_trials - 1:
file_name = os.path.join(file_path, f'{start_id}_{id_ - 1}.npz')
np.savez_compressed(file_name, data=np.array(buffer, dtype='object'))
logging.info('Wrote %d to %s', len(buffer), file_name)
buffer = []
start_id = id_
logging.info('Wrote %d instances in `%s`', sample_count, split)
if __name__ == '__main__':
app.run(main)
| digraph_transformer-main | script_generate_distance_np.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naive implementation of MagNet (Zhang et al. 2021) using dense matrices."""
from typing import Any, Callable, Optional, Tuple, Union
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
# pylint: disable=g-bad-import-order
import layers
import utils
# Inline important classes and methods
CallArgs = layers.CallArgs
mlp, MLP = layers.mlp, layers.MLP
Tensor = utils.Tensor
def magnetic_laplacian(
graph: jraph.GraphsTuple,
q: float = 0.25,
q_absolute: bool = False,
use_symmetric_norm: bool = False) -> Tuple[Tensor]:
"""k *complex* eigenvectors of the smallest k eigenvectors of the magnetic laplacian.
Args:
graph: the explicitly batched graph (i.e. nodes are of shape [b, n, d]).
q: Factor in magnetic laplacian. Default 0.25.
q_absolute: If true `q` will be used, otherwise `q / m_imag / 2`.
use_symmetric_norm: symmetric (True) or row normalization (False).
Returns:
tensor of shape [b, n, n] the laplacian.
"""
batch, n_nodes = (next(iter(graph.nodes.items()))[1] if isinstance(
graph.nodes, dict) else graph.nodes).shape[:2]
assign = jax.vmap(lambda a, s, r, d: a.at[s, r].add(d[s]))
# Handle -1 padding
edges_padding_mask = graph.senders >= 0
adj = jnp.zeros((batch, n_nodes, n_nodes), dtype=jnp.float32)
adj = assign(adj, graph.senders, graph.receivers, edges_padding_mask)
adj = jnp.where(adj > 1, 1., adj)
transpose_idx = tuple(range(adj.ndim - 2)) + (adj.ndim - 1, adj.ndim - 2)
adj_transposed = adj.transpose(*transpose_idx)
symmetric_adj = adj + adj_transposed
symmetric_adj = jnp.where((adj != 0) & (adj_transposed != 0),
symmetric_adj / 2, symmetric_adj)
symmetric_deg = symmetric_adj.sum(-2)
if not q_absolute:
m_imag = (adj != adj_transposed).sum((-2, -1)) / 2
m_imag = jnp.where(m_imag > graph.n_node[..., 0], graph.n_node[..., 0],
m_imag)
q = q / jnp.where(m_imag > 0, m_imag, 1)
else:
q = jnp.full(batch, q)
theta = 1j * 2 * jnp.pi * q[..., None, None] * (adj - adj_transposed)
if use_symmetric_norm:
inv_deg = jnp.zeros((batch, n_nodes, n_nodes), dtype=jnp.float32)
inv_deg_diag = inv_deg.at[:, jnp.arange(n_nodes), jnp.arange(n_nodes)]
inv_deg = inv_deg_diag.set(jax.lax.rsqrt(jnp.clip(symmetric_deg, 1)))
laplacian = jnp.eye(
n_nodes) - (inv_deg @ symmetric_adj @ inv_deg) * jnp.exp(theta)
idx = jnp.tile(jnp.arange(n_nodes)[None, :], [batch, 1])
mask = idx < graph.n_node[:, :1]
mask = mask[..., None] * mask[..., None, :]
laplacian = mask * laplacian
else:
deg = jnp.zeros((batch, n_nodes, n_nodes), dtype=jnp.float32)
deg_diag = deg.at[:, jnp.arange(n_nodes), jnp.arange(n_nodes)]
deg = deg_diag.set(symmetric_deg)
laplacian = deg - symmetric_adj * jnp.exp(theta)
return laplacian
def complex_relu(value):
mask = 1.0 * (value.real >= 0)
return mask * value
class MagNet(hk.Module):
"""MagNet.
Attributes:
d_model: number of hidden dimensions.
activation: The activation function.
gnn_type: Either `gcn` or `gnn`
use_edge_attr: If True also the edge attributes are considered. Must be
True for `gnn_type=gcn`.
k_hop: Number of message passing steps.
mlp_layers: Number of layers in MLP (only relevant for `gnn_type=gnn`).
tightening_factor: The factor of dimensionality reduction for message
passing in contrast to `d_model`.
norm: The batch/layer norm.
concat: If True all intermediate node embeddings are concatenated and then
mapped to `d_model` in the output MLP.
residual: If True the GNN embeddings
bidirectional: If True, edhes in both directions are considered (only
relevant for `gnn_type=gnn`).
q: potential of magnetic laplacian.
q_absolute: if False we use the graph specific normalizer.
name: Name of module.
**kwargs:
"""
def __init__(self,
d_model: int = 256,
activation: Callable[[Tensor], Tensor] = jax.nn.relu,
gnn_type='gcn',
use_edge_attr=True,
k_hop=2,
mlp_layers: int = 2,
tightening_factor: int = 1,
norm=None,
concat: bool = False,
residual: bool = True,
bidirectional: bool = True,
name: Optional[str] = None,
q: float = 0.25,
q_absolute: float = 0.25,
**kwargs):
super().__init__(name=name)
self.d_model = d_model
self.mlp_layers = mlp_layers
self.tightening_factor = tightening_factor
self.activation = activation
self.gnn_type = gnn_type
self.use_edge_attr = use_edge_attr
self.k_hop = k_hop
self.norm = norm
self.concat = concat
self.residual = residual
self.bidirectional = bidirectional
self.q = q
self.q_absolute = q_absolute
if kwargs:
logging.info('GNN.__init__() received unexpected kwargs: %s', kwargs)
def _update_fn(self, features: Tensor, sender_features: Tensor,
receiver_features: Tensor, globals_: Any,
exclude_features: bool, name: str):
stack = [features, sender_features, receiver_features]
if exclude_features:
stack = stack[1:]
if not self.bidirectional:
stack = stack[:-1]
concat_features = jnp.concatenate(stack, axis=-1)
return mlp(
concat_features,
self.d_model // self.tightening_factor,
activation=self.activation,
with_norm=False,
final_activation=True,
n_layers=self.mlp_layers,
name=name)
def __call__(self,
graph: jraph.GraphsTuple,
call_args: CallArgs,
mask: Optional[Tensor] = None,
in_axes: int = 0,
**kwargs):
if self.k_hop == 0:
return graph
x = graph.nodes
maglap = magnetic_laplacian(
graph, q=self.q, q_absolute=self.q_absolute, use_symmetric_norm=True)
max_eigenvalue = jnp.linalg.eigvalsh(maglap).max()
t_0 = jnp.eye(x.shape[-2])
t_1 = 2 / max_eigenvalue * maglap - t_0[None, ...]
for _ in range(self.k_hop):
l_0 = hk.Linear(self.d_model)
x_0 = t_0 @ x
x_0 = l_0(x_0.real) + 1j * l_0(x_0.imag)
l_1 = hk.Linear(self.d_model, with_bias=False)
x_1 = t_1 @ x
x_1 = l_1(x_1.real) + 1j * l_1(x_1.imag)
x = complex_relu(x_0 + x_1)
graph = graph._replace(nodes=jnp.concatenate((x.real, x.imag), axis=-1))
return graph
| digraph_transformer-main | magnet.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import os
from multiprocessing import Pool
from typing import Sequence
import numpy as np
from absl import app
from absl import flags
from tqdm import tqdm
from utils import eigv_magnetic_laplacian_numba
_SPLITS = flags.DEFINE_list(
name="splits",
default=["train", "valid", "test"],
help="Data splits for which to precompute the eigenvectors.",
)
_DATA_ROOT = flags.DEFINE_string(
name="data_root",
default="data/sorting_network/7to11_12_13to16",
help="Data root for the new dataset that contains the eigenvectors."
)
_NUM_CPU = flags.DEFINE_integer(
name="num_cpu",
default=1,
help="Number of CPUs to use for the eigenvector calculation."
)
configs = [
dict(k=25, k_excl=0, q=0.25,
q_absolute=False, norm_comps_sep=False,
sign_rotate=True, use_symmetric_norm=True),
dict(k=25, k_excl=0, q=0,
q_absolute=False, norm_comps_sep=False,
sign_rotate=True, use_symmetric_norm=True)
]
def process_graph(graph_tuple, k, k_excl, q, q_absolute, norm_comps_sep,
sign_rotate, use_symmetric_norm, l2_norm=True):
"""Compute the first `k` maglap eigenvectors and values for a graph."""
n_node = graph_tuple.n_node[0]
eigenvalues = np.zeros(shape=(k), dtype=np.float32)
eigenvectors = np.zeros(
shape=(n_node, k), dtype=np.complex64)
n_eigv = min(k, n_node)
eigenvalues[:n_eigv], eigenvectors[:, :n_eigv], _ = eigv_magnetic_laplacian_numba(
senders=graph_tuple.senders.astype(np.int64),
receivers=graph_tuple.receivers.astype(np.int64),
n_node=np.array([graph_tuple.n_node[0], 0]),
padded_nodes_size=graph_tuple.n_node[0],
k=k,
k_excl=k_excl,
q=q,
q_absolute=q_absolute,
norm_comps_sep=norm_comps_sep,
l2_norm=l2_norm,
sign_rotate=sign_rotate,
use_symmetric_norm=use_symmetric_norm
)
if q == 0:
eigenvectors = eigenvectors.real
return eigenvalues, eigenvectors
def precalc_and_append(graph, configs):
precomputed = tuple(
(config, *process_graph(graph[0], **config)) for config in configs)
if not isinstance(graph[0].globals, dict):
graph[0] = graph[0]._replace(
globals={'eigendecomposition': precomputed})
else:
graph[0].globals['eigendecomposition'] = precomputed
return graph
def main(argv: Sequence[str]) -> None:
for split in _SPLITS.value:
base_path = os.path.join(_DATA_ROOT.value, split)
for file in tqdm(os.listdir(os.path.join(base_path))):
file_path = os.path.join(base_path, file)
buffer = list(np.load(file_path, allow_pickle=True)["data"])
with Pool(_NUM_CPU.value) as p:
buffer = p.map(partial(precalc_and_append, configs=configs), buffer)
np.savez_compressed(file_path, data=np.array(buffer, dtype='object'))
if __name__ == '__main__':
app.run(main)
| digraph_transformer-main | script_postcompute_eigedecomp.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph Property Prediction Config oor OGB Code2 and Sorting Network Task."""
import collections
import dataclasses
from typing import Mapping, Optional
from jaxline import base_config
from ml_collections import config_dict
import presets
@dataclasses.dataclass
class Dataset:
"""For all important information that are dataset specific.
Attributes:
name: unique name of dataset. This name determines e.g. the behavior of how
the model encodes the data and how data is preprocessed.
path: relative storage location.
sequence_length: number of predictions in target sequence.
num_classes: number of classes per prediction in target sequence.
ast_depth: maximal distance from root node to leaf node that will be
considered (OGB specific).
num_nodetypes: number of node types (OGB specific).
num_nodeattributes: number of node attributes.
edge_df_types: number of edge types (OGB data-flow specific).
edge_ast_types: number of edge types (OGB data-flow specific).
num_train_samples: number of training samples (i.e., samples per epoch).
idx2vocab: maps id in target vocabulary to ids.
vocab2idx: maps id in target vocabulary to ids.
"""
name: str
path: str
sequence_length: int
num_classes: int
ast_depth: int
num_nodetypes: int
num_nodeattributes: int
edge_df_types: Optional[int]
edge_ast_types: Optional[int]
num_train_samples: int
idx2vocab: Optional[Mapping[int, str]]
vocab2idx: Optional[Mapping[str, int]]
# Standard OGB Code2 dataset
OGBG_CODE2 = Dataset('ogbg-code2', 'ogbg-code2', 5, 5002, 20, 98, 10030, None,
None, 407_976, None, None)
# Standard OGB Code2 dataset with edges only in one direction
OGBG_CODE2_NOREV = Dataset('ogbg-code2-norev', 'ogbg-code2-norev', 5, 5002, 20,
98, 10030, None, None, 407_976, None, None)
# Data-Flow centric OGB Code2 dataset
OGBG_CODE2_NOREV_DF = Dataset('ogbg-code2-norev-df', 'ogbg-code2-norev-df', 5,
5002, 20, 91, 11973, 12, 49, 407_976, None, None)
# Sorting network dataset
SORTING_NETWORKS = Dataset('sn', '7to11_12_13to16', 1, 2, -1, -1, 26, None,
None, 800_000, None, None)
# Playground tasks
## Adjacency dataset
ADJACENCY_C = Dataset('dist_adj', '16to17_18to19_20to27_c', -1, 2, -1, -1, -1, None,
None, 400_000, None, None)
ADJACENCY_CA = Dataset('dist_adj', '16to17_18to19_20to27_ca', -1, 2, -1, -1, -1, None,
None, 400_000, None, None)
ADJACENCY_C_U = Dataset('dist_adj', '16to17_18to19_20to27_c_u', -1, 2, -1, -1, -1, None,
None, 400_000, None, None)
## Connected dataset
CONNECTED_C = Dataset('dist_con', '16to17_18to19_20to27_c', -1, 2, -1, -1, -1, None,
None, 400_000, None, None)
CONNECTED_CA = Dataset('dist_adj', '16to17_18to19_20to27_ca', -1, 2, -1, -1, -1, None,
None, 400_000, None, None)
CONNECTED_C_U = Dataset('dist_adj', '16to17_18to19_20to27_c_u', -1, 2, -1, -1, -1, None,
None, 400_000, None, None)
## Distance dataset
DISTANCE_C = Dataset('dist', '16to63_64to71_72to83_c', -1, -1, -1, -1, -1, None,
None, 400_000, None, None)
DISTANCE_CA = Dataset('dist', '16to63_64to71_72to83_ca', -1, -1, -1, -1, -1, None,
None, 400_000, None, None)
DISTANCE_C_U = Dataset('dist_u', '16to63_64to71_72to83_c_u', -1, -1, -1, -1, -1, None,
None, 400_000, None, None)
DISTANCE_CA_U = Dataset('dist_u', '16to63_64to71_72to83_ca_u', -1, -1, -1, -1, -1, None,
None, 400_000, None, None)
# After init this should be equivalent to `jax.device_count()`. We used 8
# devices in our experiments with TPUs (OGB Code2).
train_devices = 1 # 8
datasets = {
'ogbg-code2': OGBG_CODE2,
'ogbg-code2-norev': OGBG_CODE2_NOREV,
'ogbg-code2-norev-df': OGBG_CODE2_NOREV_DF,
'sn-7to11-12-13to16': SORTING_NETWORKS,
'adj_c': ADJACENCY_C,
'adj_ca': ADJACENCY_CA,
'adj_c_u': ADJACENCY_C_U,
'con_c': CONNECTED_C,
'con_ca': CONNECTED_CA,
'con_c_u': CONNECTED_C_U,
'dist_c': DISTANCE_C,
'dist_ca': DISTANCE_CA,
'dist_c_u': DISTANCE_C_U,
'dist_ca_u': DISTANCE_CA_U
}
def get_config(preset=''):
"""Return config object for training."""
config = get_default_config()
# E.g. '/data/pretrained_models'
config.restore_path = config_dict.placeholder(str)
if preset:
unique_presets = list(collections.OrderedDict.fromkeys(preset.split(',')))
config = presets.apply_presets(config, unique_presets)
config.experiment_kwargs.config.dataset = datasets[
config.experiment_kwargs.config.dataset_name]
# Adjust for presets
effective_batch_size = config.experiment_kwargs.config.training.batch_size
if config.experiment_kwargs.config.dataset_config.do_bucket_by_size:
# Manually determined constant for OGB
effective_batch_size *= 3.5
steps_per_epoch = int(
config.experiment_kwargs.config.dataset.num_train_samples /
effective_batch_size / train_devices)
config.training_steps = config.epochs * steps_per_epoch
optimizer = config.experiment_kwargs.config.optimizer
k_accum = max(optimizer.accumulate_gradient_k, 1)
lr_schedule = optimizer.lr_schedule
lr_schedule.warmup_steps = int(
config.warmup_epochs * steps_per_epoch / k_accum)
lr_schedule.decay_steps = int(config.training_steps / k_accum)
batch_size = config.experiment_kwargs.config.training.batch_size
lr_schedule.init_value *= batch_size
lr_schedule.peak_value *= batch_size
lr_schedule.end_value *= batch_size
return config
def get_default_config():
"""Return config object for reproducing SAT on ogbn-code2."""
config = base_config.get_base_config()
training_batch_size = 48
eval_batch_size = 48
# Experiment config.
loss_kwargs = dict(only_punish_first_end_of_sequence_token=False)
posenc_config = dict(
posenc_type='',
exclude_canonical=False,
relative_positional_encodings=True,
# RW/PPR
do_also_reverse=True,
ppr_restart_probability=0.2,
random_walk_steps=3,
top_k_eigenvectors=10,
# MagLap
excl_k_eigenvectors=1,
concatenate_eigenvalues=False,
maglap_q=0.25,
maglap_q_absolute=True,
maglap_symmetric_norm=False,
maglap_transformer=False,
maglap_use_signnet=True,
maglap_use_gnn=False,
maglap_norm_comps_sep=False,
maglap_l2_norm=True,
maglap_dropout_p=0.,
maglap_sign_rotate=True,
maglap_net_type='default')
dataset_config = dict(
do_bucket_by_size=False,
bucket_boundaries=(255, 511), # For bucketing by size
bucket_batch_size_factors=(4, 2, 1), # For bucketing by size
exclude_control_flow_edges=True, # Only relevant with df graph
exclude_next_syntax_edges=True # Only relevant with df graph,
)
gnn_config = dict(
gnn_type='gcn',
k_hop=2,
mlp_layers=2, # Does not have any effect on the GCN
se='gnn',
tightening_factor=1,
use_edge_attr=True,
bidirectional=False, # has not effect on `gcn``
residual=False,
concat=True)
attention_config = dict(
num_heads=4,
dense_widening_factor=4,
with_bias=False,
dropout_p=0.2,
with_attn_dropout=True,
re_im_separate_projection=False)
encoder_config = dict(attribute_dropout=0.)
model_config = dict(
model_type='sat',
num_layers=4,
d_model=256,
global_readout='mean', # other options 'sum_n', 1 (func def node)
activation='relu',
batch_norm=False, # Use LayerNorm otherwise
deg_sensitive_residual=True,
attention_config=attention_config,
encoder_config=encoder_config,
loss_kwargs=loss_kwargs,
posenc_config=posenc_config,
with_pre_gnn=False,
gnn_config=gnn_config)
evaluation_config = dict(
eval_also_on_test=True,
batch_size=eval_batch_size,
max_number_of_instances=-1,
unk_offset=0.,
eos_offset=0.)
# Training loop config.
config.warmup_epochs = 2
config.epochs = 32
steps_per_epoch = int(405_000 / training_batch_size / train_devices)
config.training_steps = config.epochs * steps_per_epoch
optimizer_config = dict(
name='adamw',
optimizer_kwargs=dict(b1=.9, b2=.999, weight_decay=1e-6),
lr_schedule=dict(
warmup_steps=config.warmup_epochs,
decay_steps=config.training_steps,
init_value=0.,
peak_value=1e-4 / 32,
end_value=0.,
),
use_agc=False, # adaptive gradient clipping
accumulate_gradient_k=8,
agc_kwargs=dict(clipping=0.1),
)
config.log_train_data_interval = 120
config.log_tensors_interval = 120
config.save_checkpoint_interval = 600
config.save_initial_train_checkpoint = False
config.checkpoint_dir = '/tmp/checkpoint/digraph_transformer'
config.eval_specific_checkpoint_dir = ''
config.best_model_eval_metric = 'F1'
config.best_model_eval_metric_higher_is_better = True
config.wandb = config_dict.ConfigDict(dict(
project='digt-playground',
tags=tuple(),
settings=dict(code_dir='.')
))
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
# Must be set
data_root=config_dict.placeholder(str),
debug=False,
# Gets overwritten later
dataset_name='ogbg-code2',
pmap_axis='i',
optimizer=optimizer_config,
model=model_config,
dataset_config=dataset_config,
training=dict(batch_size=training_batch_size),
evaluation=evaluation_config)))
return config
| digraph_transformer-main | config.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains top level models as well as important custom components."""
import dataclasses
import functools
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import warnings
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
# pylint: disable=g-bad-import-order
import config
import layers
import magnet
import utils
from ogb_utils import ASTNodeEncoder
# Inline important classes and methods
CallArgs = layers.CallArgs
BatchNorm, LayerNorm = layers.BatchNorm, layers.LayerNorm
mlp, MLP = layers.mlp, layers.MLP
MultiHeadAttention = layers.MultiHeadAttention
GraphConvolution = layers.GraphConvolution
Tensor = utils.Tensor
softmax_cross_entropy_loss = utils.softmax_cross_entropy_loss
count_edges = utils.count_edges
exact_ppr, k_step_random_walk = utils.exact_ppr, utils.k_step_random_walk
svd_encodings = utils.svd_encodings
sinusoid_position_encoding = utils.sinusoid_position_encoding
@dataclasses.dataclass
class PositionalEncodingsParams:
"""Parameters for positional encodings."""
# Options: '', 'rw', 'ppr', 'maglap'
# Random Walk, Personalized Page Rank, Magnetic Laplacian
posenc_type: Optional[str] = None
# For absolute we use aggregation for global position according to Pan Li et
# al., 2020 or the real value of the (magnetic) laplacian eigenvectors
# If using maglap, this implies that we have complex queries and key in attn.
relative_positional_encodings: bool = True
# Exclude AST depth in OGB or sinusoidal for sorting networks
exclude_canonical: bool = False
### 'rw', 'ppr'
# Not only consider the position in the direction of the graph
do_also_reverse: bool = True
# If using ppr/rw, the restart probability for the ppr
ppr_restart_probability: float = 0.2
# If using rw, the number of random walk steps
### 'maglap'
random_walk_steps: int = 3
# If using maglap, the k eigenvecotrs to use starting from excl_k_eigenvectors
top_k_eigenvectors: int = 10
# If using maglap, exclude the excl_k_eigenvectors most top eigenvectors
excl_k_eigenvectors: int = 0
# If using maglap and true, also the eigenvalues are considered
concatenate_eigenvalues: bool = False
# If using maglap, the q factor for directionality
maglap_q: float = 0.25
# If using maglap, if true `q` will be used, otherwise `q / n_node`.
maglap_q_absolute: bool = True
# If using maglap, True for symmetric and otherwise row normalization
maglap_symmetric_norm: bool = False
# If using maglap, True for a transformer or False for the MLP SignNet
maglap_transformer: bool = False
# If using maglap and `maglap_transformer`= True, use gnn for raw eigenvec.
maglap_use_gnn: bool = False
# If using maglap and `maglap_transformer`= True and if true, real and imag
# components are separately normalized.
maglap_norm_comps_sep: bool = False
# If using maglap and False, we normalize the eigenvectors to span [0,1]
# (ignoring eigenvectors of very low magnitude)
maglap_l2_norm: bool = True
# To force the network to work also with a subset of vectors
maglap_dropout_p: float = 0.0
# If using maglap, we can either use a SignNet (True) approach or scale as
# well as rotate eigenvectors according to a convention.
maglap_use_signnet: bool = True
# Use the sign convention and rotation (i.e. the absolute largest real value
# is positive and phase shift starts at 0)
maglap_sign_rotate: bool = True
# Rotation invariant MagLapNet
maglap_net_type: str = 'signnet'
@dataclasses.dataclass
class LossConfig:
"""Config for the loss."""
# Only punish first EOS token
only_punish_first_end_of_sequence_token: bool = False
class DataFlowASTEncoder(hk.Module):
"""Encodes the AST for our graph construction procedure."""
def __init__(self,
emb_dim,
num_nodetypes,
num_nodeattributes,
max_depth,
num_edge_df_types,
num_edge_ast_types,
attribute_dropout: float = 0.0,
name=None):
super().__init__(name=name)
self.emb_dim = emb_dim
self.num_nodetypes = num_nodetypes
self.num_nodeattributes = num_nodeattributes
self.max_depth = max_depth
self.num_edge_df_types = num_edge_df_types
self.num_edge_ast_types = num_edge_ast_types
self.attribute_dropout = attribute_dropout
def __call__(
self,
graph: jraph.GraphsTuple,
depth: Optional[Tensor] = None,
call_args: CallArgs = CallArgs(True)
) -> jraph.GraphsTuple:
nodes, edges = graph.nodes, graph.edges
node_type_encoder = hk.Embed(self.num_nodetypes, self.emb_dim)
node_attribute_encoder = hk.Embed(self.num_nodeattributes, self.emb_dim)
node_type = nodes[..., 0]
node_attribute = nodes[..., 1]
if call_args.is_training:
mask = hk.dropout(hk.next_rng_key(), self.attribute_dropout,
jnp.ones_like(node_attribute))
node_attribute = jnp.where(mask > 0, node_attribute,
self.num_nodeattributes - 1) # set to unknown
nodes = (
node_type_encoder(node_type) + node_attribute_encoder(node_attribute))
if depth is not None:
depth_encoder = hk.Embed(self.max_depth + 1, self.emb_dim)
depth = jnp.where(depth > self.max_depth, self.max_depth, depth)
nodes += depth_encoder(depth[..., 0])
edge_df_type_encoder = hk.Embed(self.num_edge_df_types, self.emb_dim)
edge_ast_type_encoder = hk.Embed(self.num_edge_ast_types, self.emb_dim)
edges = (
edge_df_type_encoder(edges['edge_type']) +
edge_ast_type_encoder(edges['edge_name']))
graph = graph._replace(nodes=nodes, edges=edges)
return graph
class SortingNetworkEncoder(hk.Module):
"""Encodes the Sorting Network graph."""
def __init__(self,
emb_dim: int,
encode_order: bool = True,
num_nodeattributes: int = 26,
name: Optional[str] = None):
super().__init__(name=name)
self.emb_dim = emb_dim
self.encode_order = encode_order
self.num_nodeattributes = num_nodeattributes
def __call__(
self,
graph: jraph.GraphsTuple,
depth: Any = None,
call_args: CallArgs = CallArgs(True)) -> jraph.GraphsTuple:
assert depth is None
nodes_int = graph.nodes.astype(jnp.int32)
argument_1 = sinusoid_position_encoding(
nodes_int[..., 1],
max_timescale=int(self.num_nodeattributes),
hidden_size=self.emb_dim // 2)
argument_2 = sinusoid_position_encoding(
nodes_int[..., 2],
max_timescale=int(self.num_nodeattributes),
hidden_size=self.emb_dim // 2)
nodes = jnp.concatenate((argument_1, argument_2), axis=-1)
nodes = hk.Linear(self.emb_dim)(nodes)
if self.encode_order:
node_id = sinusoid_position_encoding(nodes_int[..., 0], self.emb_dim)
nodes += node_id
graph = graph._replace(nodes=nodes)
return graph
class DistanceEncoder(hk.Module):
"""Encodes the Positional Encoding Playground graph."""
def __init__(self,
emb_dim: int,
encode_order: bool = True,
num_nodeattributes: int = 11,
name: Optional[str] = None):
super().__init__(name=name)
self.emb_dim = emb_dim
self.encode_order = encode_order
self.num_nodeattributes = num_nodeattributes
def __call__(
self,
graph: jraph.GraphsTuple,
depth: Any = None,
call_args: CallArgs = CallArgs(True)) -> jraph.GraphsTuple:
assert depth is None
nodes = jnp.zeros((*graph.nodes.shape, self.emb_dim), dtype=jnp.float32)
if self.encode_order:
node_id = sinusoid_position_encoding(nodes[..., 0], self.emb_dim)
nodes += node_id
graph = graph._replace(nodes=nodes)
return graph
class NaiveMagLapNet(hk.Module):
"""For the Magnetic Laplacian's or Combinatorial Laplacian's eigenvectors.
Args:
d_model_elem: Dimension to map each eigenvector.
d_model_aggr: Output dimension.
num_heads: Number of heads for optional attention.
n_layers: Number of layers for MLP/GNN.
dropout_p: Dropout for attention as well as eigenvector embeddings.
activation: Element-wise non-linearity.
concatenate_eigenvalues: If True also concatenate the eigenvalues.
norm: Optional norm.
name: Name of the layer.
"""
def __init__(
self,
d_model_aggr: int = 256,
name: Optional[str] = None,
*args,
**kwargs):
super().__init__(name=name)
self.d_model_aggr = d_model_aggr
def __call__(self, graph: jraph.GraphsTuple, eigenvalues: Tensor,
eigenvectors: Tensor, call_args: CallArgs,
mask: Optional[Tensor] = None) -> Tensor:
# Naive version
re = hk.Linear(self.d_model_aggr)(jnp.real(eigenvectors))
im = hk.Linear(self.d_model_aggr)(jnp.imag(eigenvectors))
return re + im
class MagLapNet(hk.Module):
"""For the Magnetic Laplacian's or Combinatorial Laplacian's eigenvectors.
Args:
d_model_elem: Dimension to map each eigenvector.
d_model_aggr: Output dimension.
num_heads: Number of heads for optional attention.
n_layers: Number of layers for MLP/GNN.
dropout_p: Dropout for attention as well as eigenvector embeddings.
activation: Element-wise non-linearity.
return_real_output: True for a real number (otherwise complex).
consider_im_part: Ignore the imaginary part of the eigenvectors.
use_signnet: If using the sign net idea f(v) + f(-v).
use_gnn: If True use GNN in signnet, otherwise MLP.
use_attention: If true apply attention between eigenvector embeddings for
same node.
concatenate_eigenvalues: If True also concatenate the eigenvalues.
norm: Optional norm.
name: Name of the layer.
"""
def __init__(self,
d_model_elem: int = 32,
d_model_aggr: int = 256,
num_heads: int = 4,
n_layers: int = 1,
dropout_p: float = 0.2,
activation: Callable[[Tensor], Tensor] = jax.nn.relu,
return_real_output: bool = True,
consider_im_part: bool = True,
use_signnet: bool = True,
use_gnn: bool = False,
use_attention: bool = False,
concatenate_eigenvalues: bool = False,
norm: Optional[Any] = None,
name: Optional[str] = None):
super().__init__(name=name)
self.concatenate_eigenvalues = concatenate_eigenvalues
self.consider_im_part = consider_im_part
self.use_signnet = use_signnet
self.use_gnn = use_gnn
self.use_attention = use_attention
self.num_heads = num_heads
self.dropout_p = dropout_p
self.norm = norm
if self.use_gnn:
self.element_gnn = GNN(
int(2 * d_model_elem) if self.consider_im_part else d_model_elem,
gnn_type='gnn',
k_hop=n_layers,
mlp_layers=n_layers,
activation=activation,
use_edge_attr=False,
concat=True,
residual=False,
name='re_element')
else:
self.element_mlp = MLP(
int(2 * d_model_elem) if self.consider_im_part else d_model_elem,
n_layers=n_layers,
activation=activation,
with_norm=False,
final_activation=True,
name='re_element')
self.re_aggregate_mlp = MLP(
d_model_aggr,
n_layers=n_layers,
activation=activation,
with_norm=False,
final_activation=True,
name='re_aggregate')
self.im_aggregate_mlp = None
if not return_real_output and self.consider_im_part:
self.im_aggregate_mlp = MLP(
d_model_aggr,
n_layers=n_layers,
activation=activation,
with_norm=False,
final_activation=True,
name='im_aggregate')
def __call__(self, graph: jraph.GraphsTuple, eigenvalues: Tensor,
eigenvectors: Tensor, call_args: CallArgs,
mask: Optional[Tensor] = None) -> Tensor:
padding_mask = (eigenvalues > 0)[..., None, :]
padding_mask = padding_mask.at[..., 0].set(True)
attn_padding_mask = padding_mask[..., None] & padding_mask[..., None, :]
trans_eig = jnp.real(eigenvectors)[..., None]
if self.consider_im_part and jnp.iscomplexobj(eigenvectors):
trans_eig_im = jnp.imag(eigenvectors)[..., None]
trans_eig = jnp.concatenate((trans_eig, trans_eig_im), axis=-1)
else:
if not self.use_signnet:
# Like to Dwivedi & Bresson (2021)
rand_sign_shape = (*trans_eig.shape[:-3], 1, *trans_eig.shape[-2:])
rand_sign = jax.random.rademacher(hk.next_rng_key(), rand_sign_shape)
trans_eig = rand_sign * trans_eig
# Lower impact of numerical issues, assumes `k_excl` = 0
trans_eig = trans_eig.at[..., 0, :].set(
jnp.absolute(trans_eig[..., 0, :]))
eigenvalues = eigenvalues.at[..., 0].set(0)
if self.use_gnn:
trans = self.element_gnn(
graph._replace(nodes=trans_eig, edges=None), call_args).nodes
if self.use_signnet:
trans_neg = self.element_gnn(
graph._replace(nodes=-trans_eig, edges=None), call_args).nodes
# assumes `k_excl` = 0
if self.consider_im_part and jnp.iscomplexobj(eigenvectors):
trans_neg = trans_neg.at[..., 0, :].set(0)
trans += trans_neg
else:
trans = self.element_mlp(trans_eig)
if self.use_signnet:
trans_neg = self.element_mlp(-trans_eig)
# assumes `k_excl` = 0
if self.consider_im_part and jnp.iscomplexobj(eigenvectors):
trans_neg = trans_neg.at[..., 0, :].set(0)
trans += trans_neg
if self.concatenate_eigenvalues:
eigenvalues_ = jnp.broadcast_to(eigenvalues[..., None, :],
trans.shape[:-1])
trans = jnp.concatenate((eigenvalues_[..., None], trans), axis=-1)
if self.use_attention:
if self.norm is not None:
trans = self.norm()(trans)
attn = MultiHeadAttention(
self.num_heads,
key_size=trans.shape[-1] // self.num_heads,
value_size=trans.shape[-1] // self.num_heads,
model_size=trans.shape[-1],
w_init=None,
dropout_p=self.dropout_p,
with_bias=False)
trans += attn(
trans,
trans,
trans,
mask=attn_padding_mask,
is_training=call_args.is_training)
padding_mask = padding_mask[..., None]
trans = trans * padding_mask
trans = trans.reshape(trans.shape[:-2] + (-1,))
if self.dropout_p and call_args.is_training:
trans = hk.dropout(hk.next_rng_key(), self.dropout_p, trans)
output = self.re_aggregate_mlp(trans)
if self.im_aggregate_mlp is None:
return output
output_im = self.im_aggregate_mlp(trans)
output = output + 1j * output_im
return output
class GNN(hk.Module):
"""Standard GNN that supersedes a GCN implementation os used by the Open Graph Benchmark Code2 dataset and a standard bidirectional GNN.
Attributes:
d_model: number of hidden dimensions.
activation: The activation function.
gnn_type: Either `gcn` or `gnn`
use_edge_attr: If True also the edge attributes are considered. Must be
True for `gnn_type=gcn`.
k_hop: Number of message passing steps.
mlp_layers: Number of layers in MLP (only relevant for `gnn_type=gnn`).
tightening_factor: The factor of dimensionality reduction for message
passing in contrast to `d_model`.
norm: The batch/layer norm.
concat: If True all intermediate node embeddings are concatenated and then
mapped to `d_model` in the output MLP.
residual: If True the GNN embeddings
bidirectional: If True, edges in both directions are considered (only
relevant for `gnn_type=gnn`).
name: Name of module.
**kwargs:
"""
def __init__(self,
d_model: int = 256,
activation: Callable[[Tensor], Tensor] = jax.nn.relu,
gnn_type='gcn',
use_edge_attr=True,
k_hop=2,
mlp_layers: int = 2,
tightening_factor: int = 1,
norm=None,
concat: bool = False,
residual: bool = True,
bidirectional: bool = True,
name: Optional[str] = None,
**kwargs):
super().__init__(name=name)
self.d_model = d_model
self.mlp_layers = mlp_layers
self.tightening_factor = tightening_factor
self.activation = activation
self.gnn_type = gnn_type
self.use_edge_attr = use_edge_attr
self.k_hop = k_hop
self.norm = norm
self.concat = concat
self.residual = residual
self.bidirectional = bidirectional
if kwargs:
logging.info('GNN.__init__() received unexpected kwargs: %s', kwargs)
def _layer(self,
idx: int) -> Callable[[jraph.GraphsTuple], jraph.GraphsTuple]:
if self.gnn_type == 'gcn':
assert self.use_edge_attr, 'For GCN we must include edge features'
def update_fn(x):
return hk.Linear(self.d_model // self.tightening_factor)(x)
backw_update_node_fn = backw_update_edge_fn = None
if self.bidirectional:
backw_update_node_fn = backw_update_edge_fn = update_fn
layer = GraphConvolution(
forw_update_node_fn=update_fn,
forw_update_edge_fn=update_fn,
backw_update_node_fn=backw_update_node_fn,
backw_update_edge_fn=backw_update_edge_fn,
activation=self.activation,
add_self_edges=False)
elif self.gnn_type == 'gnn':
layer = jraph.GraphNetwork(
update_edge_fn=functools.partial(
self._update_fn,
exclude_features=not self.use_edge_attr,
name=f'edge_mlp{idx}'),
update_node_fn=functools.partial(
self._update_fn, exclude_features=False, name=f'node_mlp{idx}'))
else:
raise ValueError(f'`gnn_type` {self.gnn_type} is not supported')
return layer
def _update_fn(self, features: Tensor, sender_features: Tensor,
receiver_features: Tensor, globals_: Any,
exclude_features: bool, name: str):
if self.bidirectional:
stack = [features, sender_features, receiver_features]
else:
stack = [features, sender_features + receiver_features]
if exclude_features:
stack = stack[1:]
concat_features = jnp.concatenate(stack, axis=-1)
return mlp(
concat_features,
self.d_model // self.tightening_factor,
activation=self.activation,
with_norm=False,
final_activation=True,
n_layers=self.mlp_layers,
name=name)
def __call__(self,
graph: jraph.GraphsTuple,
call_args: CallArgs,
mask: Optional[Tensor] = None,
in_axes: int = 0,
**kwargs):
if self.k_hop == 0:
return graph
nodes_list = [graph.nodes]
for idx in range(self.k_hop):
new_graph = jax.vmap(self._layer(idx), in_axes=in_axes)(graph)
if self.residual and self.tightening_factor == 1:
graph = graph._replace(
nodes=graph.nodes + new_graph.nodes,
edges=graph.edges + new_graph.edges,
)
else:
graph = new_graph
if self.concat:
nodes_list.append(graph.nodes)
if self.concat:
graph = graph._replace(nodes=jnp.concatenate(nodes_list, axis=-1))
if self.norm:
graph = graph._replace(nodes=self.norm()(graph.nodes, call_args, mask))
if self.concat or self.tightening_factor > 1:
graph = graph._replace(nodes=hk.Linear(self.d_model)(graph.nodes))
return graph
class TransformerEncoderLayer(hk.Module):
"""The transformer encoder layer.
Main differences to the common implementation is the option to apply a GNN to
query and key, as well as handling complex valued positional encodings.
Attributes:
d_model: number of hidden dimensions.
num_heads: in multi head attention.
dense_widening_factor: factor to enlarge MLP after attention.
dropout_p: Probability dropout.
with_bias: If the linear projections shall have a bias.
re_im_separate_projection: Apply a joint (False) or separate projection
(True) for complex values query and key.
se: If to apply a structural encoding to query and key (either `gnn` or ``).
norm: The batch/layer norm.
pre_norm: If applying norm before attention.
activation: The activation function.
gnn_config: Config for GNN.
name: Name of the layer.
"""
def __init__( # pylint: disable=dangerous-default-value
self,
d_model=256,
num_heads=4,
dense_widening_factor=4,
dropout_p=0.2,
with_attn_dropout=True,
re_im_separate_projection=False,
with_bias=False,
activation=jax.nn.relu,
norm: Any = LayerNorm,
pre_norm: bool = False,
gnn_config=dict(),
name=None):
super().__init__(name=name)
self.d_model = d_model
self.num_heads = num_heads
self.dense_widening_factor = dense_widening_factor
self.dropout_p = dropout_p
self.with_bias = with_bias
self.re_im_separate_projection = re_im_separate_projection
if not isinstance(gnn_config, dict):
gnn_config = gnn_config.to_dict()
else:
gnn_config = gnn_config.copy()
self.se = gnn_config.pop('se', 'gnn')
self.norm = norm
self.pre_norm = pre_norm
self.activation = activation
if self.se == 'gnn':
self.se = GNN(self.d_model, self.activation, norm=self.norm, **gnn_config)
elif self.se:
raise ValueError(f'unexpected structure extractor value {self.se}')
self.linear_dm = functools.partial(hk.Linear, self.d_model)
self.linear_ff = functools.partial(
hk.Linear, int(self.d_model * self.dense_widening_factor))
self.attn = MultiHeadAttention(
self.num_heads,
key_size=self.d_model // self.num_heads,
value_size=self.d_model // self.num_heads,
model_size=self.d_model,
w_init=None,
dropout_p=self.dropout_p if with_attn_dropout else 0.,
with_bias=self.with_bias,
re_im_separate_projection=self.re_im_separate_projection)
def __call__(self,
graph: jraph.GraphsTuple,
call_args: CallArgs,
invnorm_degree: Optional[Tensor] = None,
posenc: Optional[Tensor] = None,
mask: Optional[Tensor] = None):
if mask is not None:
bn_mask = mask[..., :1]
else:
bn_mask = None
if self.pre_norm:
graph = graph._replace(nodes=self.norm()(graph.nodes, call_args, bn_mask))
value = graph.nodes
if posenc is not None and posenc.ndim <= value.ndim:
value = jnp.real(value + posenc)
graph = graph._replace(nodes=value)
if not self.se:
query = key = graph.nodes
else:
graph_se = self.se(graph, call_args, mask=bn_mask)
query = key = graph_se.nodes
logit_offset = None
if posenc is not None:
if posenc.ndim > query.ndim:
logit_offset = posenc
posenc = None
else:
query = key = query + posenc
attn_emb = self.attn(
query=query,
key=key,
value=value,
is_training=call_args.is_training,
logit_offset=logit_offset,
mask=mask)
if invnorm_degree is not None:
attn_emb = invnorm_degree[..., None] * attn_emb
if call_args.is_training:
attn_emb = hk.dropout(hk.next_rng_key(), self.dropout_p, attn_emb)
value = value + attn_emb
value = self.norm()(value, call_args, bn_mask)
fwd_emb = self.activation(self.linear_ff()(value))
if call_args.is_training:
fwd_emb = hk.dropout(hk.next_rng_key(), self.dropout_p, fwd_emb)
fwd_emb = self.linear_dm()(fwd_emb)
if call_args.is_training:
fwd_emb = hk.dropout(hk.next_rng_key(), self.dropout_p, fwd_emb)
value = value + fwd_emb
if not self.pre_norm:
value = self.norm()(value, call_args, bn_mask)
return graph._replace(nodes=value)
class GraphTransformerEncoder(hk.Module):
"""Wrapper for multiple encoder layers."""
def __init__(
self,
layer_sequence: Sequence[TransformerEncoderLayer],
name: Optional[str] = None,
):
super().__init__(name=name)
self.layer_sequence = layer_sequence
def __call__(self, x, *args, **kwargs):
output = x
for layer in self.layer_sequence:
output = layer(output, *args, **kwargs)
return output
class StructureAwareTransformer(hk.Module):
"""Our implementation/extension of the Structure Aware Transformer, ICML22.
Attributes:
model_type: Either `sat` or `gnn`.
num_class: Number of classes for graph classification.
d_model: number of hidden dimensions.
input_embedder: an embedder for the raw graph's features.
gnn_config: Config for GNN
attention_config: Config for Attention
max_seq_len: number of prediction to be made per graph.
global_readout: how to aggregate the node embeddings. One of `1`, `cls`,
`mean`, `sum_n`.
activation: The activation function.
batch_norm: If True use BatchNorm, otherwise use LayerNorm.
deg_sensitive_residual: If True normalize residual by node degree.
with_pre_gnn: If True apply a GNN before the transformer.
posenc_config: Configuration of positional encodings.
loss_config: Configuration for loss.
eps: a small constant.
pmap_axis: Relevant for hk.BatchNorm to work within `pmap`.
name: Name of the layer.
"""
# pylint: disable=dangerous-default-value
def __init__(self,
model_type: str,
num_class: int,
d_model: int,
input_embedder: Optional[Union[ASTNodeEncoder,
DataFlowASTEncoder]] = None,
num_layers=4,
gnn_config=dict(),
attention_config=dict(),
max_seq_len=5,
global_readout='mean',
activation: str = 'relu',
batch_norm: bool = False,
deg_sensitive_residual: bool = True,
with_pre_gnn: bool = False,
posenc_config=PositionalEncodingsParams(),
loss_config=LossConfig(),
eps: float = 1e-7,
pmap_axis: Optional[str] = None,
name: Optional[str] = None):
super().__init__(name=name)
self.d_model = d_model
self.deg_sensitive_residual = deg_sensitive_residual
self.eps = eps
self.embedding = input_embedder
self.global_readout = global_readout
self.posenc_config = posenc_config
self.loss_config = loss_config
self.max_seq_len = max_seq_len
self.num_class = num_class
self.activation = self.activation_fn(activation)
if batch_norm:
self.norm = functools.partial(
BatchNorm,
create_scale=True,
create_offset=True,
decay_rate=0.9,
eps=1e-5,
cross_replica_axis=pmap_axis)
else:
self.norm = functools.partial(LayerNorm, axis=-1, eps=1e-5)
if model_type == 'sat':
layer = functools.partial(
TransformerEncoderLayer,
d_model=d_model,
activation=self.activation,
gnn_config=gnn_config,
norm=self.norm,
**attention_config)
self.encoder = GraphTransformerEncoder(
[layer() for idx in range(num_layers)])
elif model_type == 'magnet':
self.encoder = magnet.MagNet(
d_model, activation=self.activation, q=posenc_config.maglap_q,
q_absolute=posenc_config.maglap_q_absolute, **gnn_config)
else:
warnings.warn(f'Falling back to `gnn` model (given type {model_type})')
self.encoder = GNN(d_model, activation=self.activation, **gnn_config)
self.pre_gnn = None
if with_pre_gnn:
# pylint: disable=unused-argument
def update_edge_fn(features: Tensor, sender_features: Tensor,
receiver_features: Tensor, globals_: Any) -> Tensor:
return mlp(
features,
self.d_model,
activation=self.activation,
with_norm=False,
final_activation=True,
n_layers=1,
name='pre_gnn_update_edge_fn')
def update_node_fn(features: Tensor, sender_features: Tensor,
receiver_features: Tensor, globals_: Any) -> Tensor:
concat_features = jnp.concatenate(
[features, sender_features, receiver_features], axis=-1)
return mlp(
concat_features,
self.d_model,
activation=self.activation,
with_norm=False,
final_activation=True,
n_layers=1,
name='pre_gnn_update_node_fn')
self.pre_gnn = jraph.GraphNetwork(
update_edge_fn=update_edge_fn, update_node_fn=update_node_fn)
if self.posenc_config.posenc_type == 'maglap':
use_rel_posenc = self.posenc_config.relative_positional_encodings
if self.posenc_config.maglap_net_type == 'naive':
self.maglap_net = NaiveMagLapNet(self.d_model)
else:
self.maglap_net = MagLapNet(
self.d_model // 32,
self.d_model,
num_heads=attention_config.get('num_heads', 4),
n_layers=1,
dropout_p=self.posenc_config.maglap_dropout_p,
concatenate_eigenvalues=self.posenc_config.concatenate_eigenvalues,
consider_im_part=self.posenc_config.maglap_q != 0.,
activation=self.activation,
use_signnet=self.posenc_config.maglap_use_signnet,
use_gnn=self.posenc_config.maglap_use_gnn,
use_attention=self.posenc_config.maglap_transformer,
norm=self.norm if self.posenc_config.maglap_transformer else None,
return_real_output=not use_rel_posenc)
def activation_fn(self, activation: str):
"""Get activation function from name."""
if activation == 'relu':
return jax.nn.relu
elif activation == 'gelu':
return jax.nn.gelu
else:
raise ValueError(f'unexpected activation {activation}')
def positional_encodings(
self,
graph: jraph.GraphsTuple,
eigenvalues: Optional[Tensor] = None,
eigenvectors: Optional[Tensor] = None,
call_args: CallArgs = CallArgs(True),
mask: Optional[Tensor] = None
) -> Tuple[jraph.GraphsTuple, Optional[Tensor]]:
"""Adds the positional encodings."""
if not self.posenc_config.posenc_type:
return graph, None
if self.posenc_config.posenc_type == 'ppr':
ppr = exact_ppr(
graph, restart_p=self.posenc_config.ppr_restart_probability)
if self.posenc_config.do_also_reverse:
backward_ppr = exact_ppr(
graph,
restart_p=self.posenc_config.ppr_restart_probability,
reverse=True)
posenc = jnp.stack((ppr, backward_ppr), axis=-1)
else:
posenc = ppr[..., None]
elif self.posenc_config.posenc_type == 'rw':
posenc = k_step_random_walk(
graph,
k=self.posenc_config.random_walk_steps,
ppr_restart_p=self.posenc_config.ppr_restart_probability)
if self.posenc_config.do_also_reverse:
backward_rw = k_step_random_walk(
graph,
k=self.posenc_config.random_walk_steps,
ppr_restart_p=self.posenc_config.ppr_restart_probability,
reverse=True)
posenc = jnp.concatenate((posenc, backward_rw), axis=-1)
elif self.posenc_config.posenc_type == 'svd':
posenc = svd_encodings(graph, rank=self.posenc_config.top_k_eigenvectors)
# Like to Hussain (2022)
rand_sign_shape = (*posenc.shape[:-2], 1, *posenc.shape[-1:])
rand_sign = jax.random.rademacher(hk.next_rng_key(), rand_sign_shape)
posenc = rand_sign * posenc
posenc = mlp(
posenc,
self.d_model,
activation=self.activation,
n_layers=1,
with_norm=False,
final_activation=True,
name='posenc_mlp')
graph = graph._replace(nodes=graph.nodes + posenc)
# Otherwise, we interpret the encoding as relative
posenc = None
elif self.posenc_config.posenc_type == 'maglap':
# We might have already loaded the eigenvalues, -vectors
if eigenvalues is None or eigenvectors is None:
raise RuntimeError('Eigenvectors and eigenvalues were not provided.')
posenc = self.maglap_net(
graph, eigenvalues, eigenvectors, call_args=call_args, mask=mask)
if not self.posenc_config.relative_positional_encodings:
graph = graph._replace(nodes=graph.nodes + posenc)
# Otherwise, we interpret the encoding as relative
posenc = None
else:
raise ValueError(
f'unexpected positional encoding type {self.posenc_config.posenc_type}'
)
if (not self.posenc_config.relative_positional_encodings and
self.posenc_config.posenc_type in ['ppr', 'rw']):
posenc = hk.Linear(posenc.shape[-1], name='posenc_linear')(posenc)
posenc = (self.activation(posenc) * mask[..., None]).sum(axis=-3)
posenc = mlp(
posenc,
self.d_model,
activation=self.activation,
n_layers=1,
with_norm=False,
final_activation=True,
name='posenc_mlp')
graph = graph._replace(nodes=graph.nodes + posenc)
# Otherwise, we interpret the encoding as relative
posenc = None
return graph, posenc
def readout(self, graph: jraph.GraphsTuple) -> Tensor:
"""For the aggregate prediction over for the graph."""
if self.global_readout == 1 or self.global_readout == '1':
return graph.nodes[..., 1, :]
elif self.global_readout == 'cls':
return graph.nodes[jnp.arange(graph.n_node.shape[0]),
graph.n_node[..., 0].astype(jnp.int32) - 1]
batch, n_node = graph.nodes.shape[:2]
indices = jnp.tile(jnp.arange(n_node), (batch, 1))
padding_mask = indices < graph.n_node[:, 0, None]
if self.global_readout == 'mean':
output = (graph.nodes * padding_mask[..., None]).sum(-2)
output /= (graph.n_node[:, 0, None] + self.eps)
elif self.global_readout == 'sum_n':
output = (graph.nodes * padding_mask[..., None]).sum(-2)
n = graph.n_node[:, 0, None]
output = jnp.concatenate((output, n), axis=-1)
return output
def get_padding_mask(self, graph: jraph.GraphsTuple) -> Tensor:
batch, n_node = graph.nodes.shape[:2]
indices = jnp.tile(jnp.arange(n_node), (batch, 1))
padding_mask = indices < graph.n_node[:, 0, None]
padding_mask = padding_mask[:, :, None] * padding_mask[:, None, :]
return padding_mask
def __call__(self,
graph: jraph.GraphsTuple,
call_args: CallArgs = CallArgs(False)):
node_depth = None
if 'node_depth' in graph.nodes and not self.posenc_config.exclude_canonical:
node_depth = graph.nodes['node_depth']
eigenvalues = eigenvectors = None
if 'eigenvalues' in graph.nodes and 'eigenvectors' in graph.nodes:
eigenvalues = graph.nodes['eigenvalues']
eigenvectors = graph.nodes['eigenvectors']
# Either nodes.['node_feat'] or the `nodes` represent the features
if 'node_feat' in graph.nodes:
graph = graph._replace(nodes=graph.nodes['node_feat'])
if self.deg_sensitive_residual:
sender_degree = count_edges(graph.senders, n_nodes=graph.nodes.shape[1])
invnorm_degree = jax.lax.rsqrt((sender_degree + 1.))
else:
invnorm_degree = None
if self.embedding is not None:
graph = self.embedding(graph, node_depth, call_args)
if self.pre_gnn is not None:
graph = graph._replace(nodes=jax.vmap(self.pre_gnn)(graph).nodes)
padding_mask = self.get_padding_mask(graph)
graph, posenc = self.positional_encodings(graph, eigenvalues, eigenvectors,
call_args, mask=padding_mask)
if self.global_readout == 'cls':
cls_token = hk.get_parameter('cls_token', (1, self.d_model), jnp.float32,
hk.initializers.RandomNormal())
n_node = graph.n_node.astype(jnp.int32)
nodes = graph.nodes.at[jnp.arange(graph.nodes.shape[0]),
n_node[..., 0].astype(jnp.int32)].set(cls_token)
n_node = n_node.at[..., 0].add(1)
n_node = n_node.at[..., 1].add(-1)
graph = graph._replace(nodes=nodes, n_node=n_node)
# New padding mask due to added node
padding_mask = self.get_padding_mask(graph)
graph = self.encoder(
graph,
call_args=call_args,
posenc=posenc,
invnorm_degree=invnorm_degree,
mask=padding_mask)
output = self.readout(graph)
if self.num_class <= 0 or self.max_seq_len <= 0:
lead = len(graph.nodes.shape[:-2]) * (1,)
nodes = graph.nodes.shape[-2]
x_senders = jnp.tile(graph.nodes[..., :, None, :], lead + (1, nodes, 1))
x_receivers = jnp.tile(graph.nodes[..., None, :, :], lead + (nodes, 1, 1))
x_global = jnp.tile(output[..., None, None, :], lead + (nodes, nodes, 1))
x = jnp.concatenate((x_senders, x_receivers, x_global), axis=-1)
x = MLP(self.d_model, n_layers=2, activation=self.activation,
with_norm=True, final_activation=True, name='adj_mlp')(x)
dim_out = 1 if self.num_class <= 0 else self.num_class
output = hk.Linear(dim_out, name='adj_out')(x)
return output
classifier = hk.Linear(self.num_class * self.max_seq_len)
prediction = classifier(output).reshape(
list(output.shape[:-1]) + [self.max_seq_len, self.num_class])
return prediction
def loss(self, graph: jraph.GraphsTuple, is_training=True):
prediction = self.__call__(
graph._replace(globals=None), call_args=CallArgs(is_training))
if self.num_class > 0 and self.max_seq_len > 0:
loss = softmax_cross_entropy_loss(
prediction, graph.globals['target'][..., 0, :].astype(jnp.int32),
self.num_class,
self.loss_config.only_punish_first_end_of_sequence_token)
else:
if isinstance(graph.nodes, dict):
graph = graph._replace(nodes=graph.nodes['node_feat'])
target = graph.globals['target']
mask = target >= 0
if self.num_class > 0:
target = jnp.where(mask, target, 0)
targets_one_hot = jax.nn.one_hot(target, self.num_class)
logits = jax.nn.log_softmax(prediction, axis=-1)
loss = -jnp.sum(targets_one_hot * logits, axis=-1)
else:
prediction = jax.nn.softplus(prediction[..., 0])
loss = (prediction - target) ** 2
loss = (loss * mask).sum(axis=(-2, -1)) / mask.sum(axis=(-2, -1))
return loss, prediction
def get_model( # pylint: disable=dangerous-default-value
dataset: config.Dataset,
# For batch norm
pmap_axis: str,
model_type: str = 'sat',
num_layers=4,
d_model=256,
activation: str = 'relu',
batch_norm: bool = False,
deg_sensitive_residual: bool = True,
with_pre_gnn: bool = False,
encoder_config=dict(),
loss_kwargs=dict(),
posenc_config=dict(),
attention_config=dict(),
gnn_config=dict(),
global_readout='mean',
**kwargs):
"""Creates the model for the given configuration."""
if kwargs:
logging.info('get_model() received kwargs: %s', kwargs)
posenc_config = PositionalEncodingsParams(**posenc_config)
if '-df' in dataset.name:
input_encoder = DataFlowASTEncoder(d_model, dataset.num_nodetypes,
dataset.num_nodeattributes,
dataset.ast_depth, dataset.edge_df_types,
dataset.edge_ast_types, **encoder_config)
elif dataset.name.startswith('sn'):
input_encoder = SortingNetworkEncoder(
d_model,
num_nodeattributes=dataset.num_nodeattributes,
encode_order=not posenc_config.exclude_canonical)
elif dataset.name.startswith('dist'):
input_encoder = DistanceEncoder(
d_model, encode_order=not posenc_config.exclude_canonical)
else:
edge_dim = d_model if gnn_config.get('residual', False) else d_model // 2
input_encoder = ASTNodeEncoder(
d_model,
dataset.num_nodetypes,
dataset.num_nodeattributes,
dataset.ast_depth,
edge_dim=edge_dim)
model = StructureAwareTransformer(
model_type,
dataset.num_classes,
d_model,
input_encoder,
attention_config=attention_config,
num_layers=num_layers,
max_seq_len=dataset.sequence_length,
gnn_config=gnn_config,
pmap_axis=pmap_axis,
with_pre_gnn=with_pre_gnn,
deg_sensitive_residual=deg_sensitive_residual,
posenc_config=posenc_config,
loss_config=LossConfig(**loss_kwargs),
global_readout=global_readout,
activation=activation,
batch_norm=batch_norm,
**kwargs)
return model
| digraph_transformer-main | models.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All the important shortcuts to ease configuration of experiments."""
from typing import Text, List
from ml_collections import config_dict
def apply_presets(config: config_dict.ConfigDict,
unique_presets: List[Text]) -> config_dict.ConfigDict:
"""Applies the defined presets."""
all_presets = {
# Select dataset
'norevogb': norevogb_preset,
'dfogb': dfogb_preset,
'sn': sorting_network_preset,
'adj': adjacency_preset,
'adja': adjacency_acyclic_preset,
'adju': adjacency_undirected_preset,
'con': is_connected_preset,
'cona': is_connected_acyclic_preset,
'conu': is_connected_undirected_preset,
'dist': distance_preset,
'dista': distance_acyclic_preset,
'distu': distance_undirected_preset,
'distau': distance_acyclic_undirected_preset,
# Basic architecture and positional encodings
'bignn': bignn_preset,
'pprpos': ppr_posenc_preset,
'rwpos': random_walk_posenc_preset,
'maglappos': magnetic_laplacian_preset,
'lappos': laplacian_preset,
'bbs': bucket_by_size_preset
}
for preset in unique_presets:
if preset in all_presets:
all_presets[preset](config)
else:
raise ValueError(f'Invalid preset value `{preset}`')
return config
def norevogb_preset(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the vanilla OGB (without reversed edges)."""
config_ = config.experiment_kwargs.config
config_.dataset_name = 'ogbg-code2-norev'
# General hyperparams (With tuning batch size 56)
config_.optimizer.use_agc = True
config_.optimizer.optimizer_kwargs.weight_decay = 6e-5
config_.optimizer.optimizer_kwargs.b1 = 0.9
config_.optimizer.optimizer_kwargs.b2 = 0.95
config_.optimizer.lr_schedule.peak_value = 3e-3 / 56
config_.optimizer.lr_schedule.init_value = 5e-6 / 56
config_.optimizer.lr_schedule.end_value = 6e-9 / 56
config_.optimizer.agc_kwargs.clipping = 0.05
# Model params
config_.model.attention_config.with_attn_dropout = False
config_.model.activation = 'gelu'
config_.model.attention_config.with_bias = True
config_.model.global_readout = 'cls'
config_.model.encoder_config.attribute_dropout = 0.15
config_.model.attention_config.dropout_p = 0.18
config_.model.gnn_config.bidirectional = True
config_.model.gnn_config.k_hop = 3
config_.model.gnn_config.mlp_layers = 1
# Offset
config_.evaluation.unk_offset = 0.75
config_.evaluation.eos_offset = 0.3
return config
def dfogb_preset(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the data-flow centric OGB (without reversed edges)."""
config_ = config.experiment_kwargs.config
config_.dataset_name = 'ogbg-code2-norev-df'
config_.dataset_config.exclude_control_flow_edges = False
config_.dataset_config.exclude_next_syntax_edges = True
# General hyperparams (With tuning batch size 56)
config_.optimizer.use_agc = True
config_.optimizer.optimizer_kwargs.weight_decay = 7.5e-5
config_.optimizer.optimizer_kwargs.b1 = 0.75
config_.optimizer.optimizer_kwargs.b2 = 0.935
config_.optimizer.lr_schedule.peak_value = 3e-3 / 56
config_.optimizer.lr_schedule.init_value = 4.5e-6 / 56
config_.optimizer.lr_schedule.end_value = 6.5e-9 / 56
config_.optimizer.agc_kwargs.clipping = 0.1
# Model params
config_.model.attention_config.with_attn_dropout = False
config_.model.activation = 'gelu'
config_.model.attention_config.with_bias = True
config_.model.global_readout = 'cls'
config_.model.encoder_config.attribute_dropout = 0.15
config_.model.attention_config.dropout_p = 0.185
config_.model.gnn_config.bidirectional = True
config_.model.gnn_config.k_hop = 3
config_.model.gnn_config.mlp_layers = 1
# offset
config_.evaluation.unk_offset = 0.75
config_.evaluation.eos_offset = 0.45
# Adaptions to save memory in comparison to TPU setup
config_.optimizer.accumulate_gradient_k = 12
config_.training.batch_size = 32
# config_.evaluation.batch_size = 32
return config
def sorting_network_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = dfogb_preset(config)
# Reverse adaptions to save memory in comparison to TPU setup
config.experiment_kwargs.config.optimizer.accumulate_gradient_k = 8
config.experiment_kwargs.config.training.batch_size = 48
# config.experiment_kwargs.config.evaluation.batch_size = 48
config.epochs = 15
config.experiment_kwargs.config.dataset_name = 'sn-7to11-12-13to16'
config.experiment_kwargs.config.model.gnn_config.use_edge_attr = False
config.experiment_kwargs.config.evaluation.max_number_of_instances = 40_000
config.best_model_eval_metric = 'accuracy'
config.experiment_kwargs.config.optimizer.agc_kwargs.clipping = 0.075
config.experiment_kwargs.config.optimizer.optimizer_kwargs.weight_decay = 6e-5
config.experiment_kwargs.config.optimizer.optimizer_kwargs.b1 = 0.7
config.experiment_kwargs.config.optimizer.optimizer_kwargs.b2 = 0.9
config.experiment_kwargs.config.optimizer.lr_schedule.peak_value = 4e-4 / 48
config.experiment_kwargs.config.optimizer.lr_schedule.init_value = 2e-6 / 48
config.experiment_kwargs.config.optimizer.lr_schedule.end_value = 2e-9 / 48
config.experiment_kwargs.config.model.deg_sensitive_residual = False
config.experiment_kwargs.config.model.posenc_config.exclude_canonical = True
# offset
config.experiment_kwargs.config.evaluation.unk_offset = 0
config.experiment_kwargs.config.evaluation.eos_offset = 0
return config
def adjacency_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = sorting_network_preset(config)
config.save_checkpoint_interval = 240
config.epochs = 15 # Due to the more aggressive bucketing, this is roughly 30
config_ = config.experiment_kwargs.config
config_.dataset_name = 'adj_c'
config_.evaluation.max_number_of_instances = -1
config.best_model_eval_metric = 'f1'
config_.optimizer.accumulate_gradient_k = 1
config_.model.posenc_config.exclude_canonical = True
# config_.model.global_readout = 'sum_n'
config_.dataset_config.bucket_boundaries = (127, 255, 511)
config_.dataset_config.bucket_batch_size_factors = (8, 4, 2, 1)
# Offset - will be auto tuned
# config_.training.batch_size = 24
config_.evaluation.unk_offset = 0.0
config_.evaluation.eos_offset = 0.0
config_.evaluation.batch_size = 128
# config_.evaluation.batch_size = 24
return config
def adjacency_acyclic_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = adjacency_preset(config)
config.experiment_kwargs.config.dataset_name = 'adj_ca'
return config
def adjacency_undirected_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = adjacency_preset(config)
config.experiment_kwargs.config.dataset_name = 'adj_c_u'
return config
def is_connected_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = adjacency_preset(config)
config.experiment_kwargs.config.dataset_name = 'con_c'
return config
def is_connected_acyclic_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = is_connected_preset(config)
config.experiment_kwargs.config.dataset_name = 'con_ca'
return config
def is_connected_undirected_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = is_connected_preset(config)
config.experiment_kwargs.config.dataset_name = 'con_c_u'
return config
def distance_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = adjacency_preset(config)
config.save_checkpoint_interval = 600
config_ = config.experiment_kwargs.config
config_.dataset_name = 'dist_c'
config_.evaluation.batch_size = 64
config.best_model_eval_metric = 'rmse'
config.best_model_eval_metric_higher_is_better = False
return config
def distance_acyclic_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = distance_preset(config)
config.experiment_kwargs.config.dataset_name = 'dist_ca'
return config
def distance_undirected_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = distance_preset(config)
config.experiment_kwargs.config.dataset_name = 'dist_c_u'
return config
def distance_acyclic_undirected_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the sorting network dataset."""
config = distance_preset(config)
config.experiment_kwargs.config.dataset_name = 'dist_ca_u'
return config
def bignn_preset(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Params for the bidirectional GNN."""
config_ = config.experiment_kwargs.config.model.gnn_config
config_.gnn_type = 'gnn'
config_.k_hop = 3
config_.se = 'gnn'
config_.mlp_layers = 1
config_.tightening_factor = 2
config_.bidirectional = True
config_.concat = True
config_.residual = True
return config
def ppr_posenc_preset(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Personalized Page Rank positional encodings."""
exclude_canonical = 'ogb' not in config.experiment_kwargs.config.dataset_name
posenc_config = config_dict.ConfigDict(
dict(
posenc_type='ppr',
do_also_reverse=True,
ppr_restart_probability=0.1,
relative_positional_encodings=True,
exclude_canonical=exclude_canonical,
random_walk_steps=-1))
config.experiment_kwargs.config.model.posenc_config = posenc_config
return config
def random_walk_posenc_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Random Walk positional encodings."""
exclude_canonical = 'ogb' not in config.experiment_kwargs.config.dataset_name
posenc_config = config_dict.ConfigDict(
dict(
posenc_type='rw',
do_also_reverse=True,
ppr_restart_probability=0.05,
relative_positional_encodings=False,
exclude_canonical=exclude_canonical,
random_walk_steps=3))
config.experiment_kwargs.config.model.posenc_config = posenc_config
return config
def magnetic_laplacian_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
config_ = config.experiment_kwargs.config
posenc_config = config_.model.posenc_config
posenc_config.posenc_type = 'maglap'
# posenc_config.exclude_canonical = False
posenc_config.relative_positional_encodings = False
posenc_config.top_k_eigenvectors = 15
posenc_config.maglap_q = 0.1
posenc_config.maglap_q_absolute = False
posenc_config.excl_k_eigenvectors = 0
posenc_config.concatenate_eigenvalues = True
posenc_config.maglap_symmetric_norm = True
posenc_config.maglap_transformer = True
posenc_config.maglap_use_signnet = True
posenc_config.maglap_use_gnn = True
posenc_config.maglap_norm_comps_sep = False
posenc_config.maglap_l2_norm = True
posenc_config.maglap_dropout_p = 0.10
posenc_config.maglap_sign_rotate = False
if 'sn-' in config.experiment_kwargs.config.dataset_name:
config_.optimizer.use_agc = True
config_.optimizer.optimizer_kwargs.weight_decay = 6e-5
config_.optimizer.optimizer_kwargs.b1 = 0.6
config_.optimizer.optimizer_kwargs.b2 = 0.9
config_.optimizer.lr_schedule.peak_value = 5e-4 / 48
config_.optimizer.lr_schedule.init_value = 1.5e-7 / 48
config_.optimizer.lr_schedule.end_value = 1.5e-9 / 48
config_.optimizer.agc_kwargs.clipping = 0.005
posenc_config.top_k_eigenvectors = 25
posenc_config.relative_positional_encodings = False
posenc_config.maglap_q = 0.25
posenc_config.maglap_q_absolute = False
posenc_config.maglap_transformer = True
posenc_config.maglap_use_gnn = False
posenc_config.maglap_norm_comps_sep = False
posenc_config.maglap_l2_norm = True
posenc_config.maglap_dropout_p = 0.15
posenc_config.maglap_use_signnet = False
posenc_config.maglap_sign_rotate = True
if ('adj' in config.experiment_kwargs.config.dataset_name or
'con' in config.experiment_kwargs.config.dataset_name or
'dist' in config.experiment_kwargs.config.dataset_name):
posenc_config.top_k_eigenvectors = 16
config_.model.global_readout = 'sum_n' # Avoid adding a new virtual node
posenc_config.maglap_use_gnn = False
posenc_config.maglap_l2_norm = True
posenc_config.maglap_sign_rotate = True
posenc_config.maglap_use_signnet = False
return config
def laplacian_preset(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Regular Laplacian positional encodings."""
config = magnetic_laplacian_preset(config)
config_ = config.experiment_kwargs.config
config_.model.posenc_config.maglap_q = 0
config_.model.posenc_config.relative_positional_encodings = False
return config
def bucket_by_size_preset(
config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""To batch graphs of similar size together."""
config.experiment_kwargs.config.dataset_config.do_bucket_by_size = True
return config
def model_size_preset(config: config_dict.ConfigDict,
layers: int) -> config_dict.ConfigDict:
"""To adjust model size and batch size accordingly."""
config_ = config.experiment_kwargs.config
inflate_factor = layers / config_.model.num_layers
config_.training.batch_size = int(config_.training.batch_size /
inflate_factor)
config_.model.attention_config.num_heads = int(
config_.model.attention_config.num_heads / inflate_factor)
config_.model.num_layers = int(config_.model.num_layers / inflate_factor)
return config
| digraph_transformer-main | presets.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset utility functions for the graph property prediction task."""
import functools
import multiprocessing
import os
from typing import Any, Dict, Optional, Sequence
from absl import logging
import jax
import jax.numpy as jnp
import jraph
import numba
import numpy as np
import tensorflow as tf
# pylint: disable=g-bad-import-order
import config
import utils
# This param should be chosen to be reasonable for the pipeline/hardware
NUM_THREADS_EIGENVECTORS = 96
@jax.curry(jax.tree_map)
def _downcast_ints(x):
if x.dtype == tf.int64:
return tf.cast(x, tf.int32)
return x
@jax.curry(jax.tree_map)
def _make_writable(x):
if not x.flags["WRITEABLE"]:
return np.array(x)
return x
def _add_target_to_globals(dataset_config: config.Dataset,
graph: jraph.GraphsTuple, target: tf.Tensor,
target_raw: Optional[tf.Tensor],
graph_index: Optional[int], is_training: bool):
"""Adds the labels to globals of the graph for convenience."""
def set_global_shape(x):
if dataset_config.sequence_length < 0:
return x
return tf.ensure_shape(x, [1, dataset_config.sequence_length])
globals_dict = {
"target": set_global_shape(tf.expand_dims(target, axis=0)),
**(graph.globals if isinstance(graph.globals, dict) else {})
}
if graph_index is not None:
globals_dict["graph_index"] = tf.expand_dims(graph_index, axis=0)
if not is_training and target_raw is not None:
globals_dict["target_raw"] = tf.expand_dims(target_raw, axis=0)
return graph._replace(globals=globals_dict)
def _pad_to_next_power_of_two(graph: jraph.GraphsTuple,
cfg: config.Dataset) -> jraph.GraphsTuple:
"""Pads GraphsTuple nodes and edges to next power of two."""
graph = _make_writable(graph)
n_node_max = graph.n_node.max()
n_edge_max = graph.n_edge.max()
n_node = np.power(2, np.ceil(np.log2(n_node_max + 1)))
n_edge = np.power(2, np.ceil(np.log2(n_edge_max + 1)))
# Handle a trailing dimension of size 1
if len(graph.n_edge.shape) == 3 and graph.n_edge.shape[-1] == 1:
graph = graph._replace(n_edge=graph.n_edge[..., 0])
batch = graph.n_node.shape[0]
pad_n_node = int(n_node - n_node_max)
pad_n_edge = int(n_edge - n_edge_max)
pad_n_empty_graph = 1
def pad(leaf, n_pad: int):
padding = np.zeros(
(
batch,
n_pad,
) + leaf.shape[2:], dtype=leaf.dtype)
padded = np.concatenate([leaf, padding], axis=1)
return padded
if cfg.name.startswith('dist'):
def tree_nodes_pad(nodes):
if nodes.shape[-1] == 0:
return pad(nodes, n_pad=int(n_node))
return pad(nodes, n_pad=pad_n_node)
tree_edges_pad = functools.partial(pad, n_pad=int(n_edge))
def tree_globs_pad(globals_):
if globals_.shape[-1] == 1:
return pad(globals_, n_pad=pad_n_empty_graph)
indices = jnp.tile(jnp.arange(n_node), (batch, 1))
padding_mask = indices < graph.n_node[:, 0, None]
padding_mask = padding_mask[:, :, None] * padding_mask[:, None, :]
if cfg.num_classes > 0 and not cfg.name.endswith('con'):
# adj = np.zeros((batch, int(n_node), int(n_node)), dtype=np.int32)
adj = np.full((batch, int(n_node), int(n_node)), -1, dtype=np.int32)
adj[padding_mask] = 0
batch_idx = np.arange(batch)[:, None]
batch_idx = np.tile(batch_idx, (1, graph.senders.shape[-1]))
adj[batch_idx, graph.senders, graph.receivers] = 1
adj[:, jnp.arange(int(n_node)), jnp.arange(int(n_node))] = -1
return adj
else:
sq_globals = np.full((batch, int(n_node), int(n_node)), -1.)
for idx_batch, nb_nodes in enumerate(graph.n_node[:, 0]):
idx = np.arange(nb_nodes)
idx = np.stack(np.meshgrid(idx, idx)).reshape((2, -1))
sq_globals[idx_batch, idx[0], idx[1]] = globals_[
idx_batch, 0, :int(nb_nodes ** 2)]
sq_globals[:, np.arange(int(n_node)), np.arange(int(n_node))] = -1
if cfg.name.endswith('con'):
sq_globals[~np.isfinite(sq_globals)] = 0
sq_globals[sq_globals > 0] = 1
else:
sq_globals[~np.isfinite(sq_globals)] = -1
return sq_globals
else:
tree_nodes_pad = functools.partial(pad, n_pad=pad_n_node)
tree_edges_pad = functools.partial(pad, n_pad=pad_n_edge)
def tree_globs_pad(globals_):
return pad(globals_, n_pad=pad_n_empty_graph)
# Correct zero padding of senders and receivers
edge_pad_idx = np.tile(
np.arange(graph.senders.shape[1]),
(graph.senders.shape[0], 1)) >= graph.n_edge
senders = graph.senders
senders[edge_pad_idx] = -1
receivers = graph.receivers
receivers[edge_pad_idx] = -1
# Only OGB hast edge features, while Sorting Networks has not
edges = graph.edges
if isinstance(edges, dict) or (edges.shape[-1] > 1):
edges = jax.tree_map(tree_edges_pad, edges)
padded_graph = jraph.GraphsTuple(
n_node=np.concatenate([graph.n_node, n_node - graph.n_node], axis=1),
n_edge=np.concatenate([graph.n_edge, n_edge - graph.n_edge], axis=1),
nodes=jax.tree_map(tree_nodes_pad, graph.nodes),
edges=edges,
globals=jax.tree_map(tree_globs_pad, graph.globals),
senders=np.concatenate(
[senders, np.full([batch, pad_n_edge], -1, dtype=np.int32)], axis=1),
receivers=np.concatenate(
[receivers,
np.full([batch, pad_n_edge], -1, dtype=np.int32)], axis=1))
return padded_graph
def build_dataset_iterator(dataset_config: config.Dataset,
ds: tf.data.Dataset,
batch_size: int,
debug: bool = False,
do_bucket_by_size: bool = False,
bucket_boundaries: Sequence[int] = (255, 511),
bucket_batch_size_factors: Sequence[int] = (
4, 2, 1),
is_training: bool = True,
max_token_length: int = 1023,
max_number_of_instances: int = -1,
exclude_control_flow_edges: bool = True,
exclude_next_syntax_edges: bool = False,
num_parallel_batchers: Optional[int] = None,
posenc_config: Optional[Dict[str, Any]] = None):
"""Creates a dataset generator and does the important preprocessing steps."""
num_local_devices = jax.local_device_count()
if debug:
max_items_to_read_from_dataset = int(num_local_devices * batch_size)
prefetch_buffer_size = 1
shuffle_buffer_size = 1
num_parallel_batchers = 1
drop_remainder = False
else:
max_items_to_read_from_dataset = -1 # < 0 means no limit.
prefetch_buffer_size = 64
# It can take a while to fill the shuffle buffer with k fold splits.
shuffle_buffer_size = int(1e6)
if is_training and num_parallel_batchers is None:
num_parallel_batchers = 4
drop_remainder = is_training
ds = ds.filter(
lambda graph, *args: tf.math.reduce_any(graph.n_node < max_token_length))
if is_training:
logging.info("Shard dataset %d / %d",
jax.process_index() + 1, jax.process_count())
ds = ds.shard(jax.process_count(), jax.process_index())
ds = ds.take(max_items_to_read_from_dataset)
ds = ds.cache()
if is_training:
ds = ds.shuffle(shuffle_buffer_size)
# Only take a random subset (must be after shuffle)
if max_number_of_instances > 0:
ds = ds.take(max_number_of_instances)
def map_fn(graph, target, target_raw=None, graph_index=None):
graph = _add_target_to_globals(dataset_config, graph, target, target_raw,
graph_index, is_training)
graph = _downcast_ints(graph)
if "-raw" in dataset_config.name:
graph.nodes["node_feat"] = graph.nodes["node_feat_orig"]
if "-df" in dataset_config.name:
mask = tf.ones_like(graph.edges["edge_type"], dtype=tf.bool)
if exclude_control_flow_edges:
# These edges are our data-flow centric control flow edges
mask = graph.edges["edge_type"] != 1
if exclude_next_syntax_edges:
# These edges are the original control flow edges of python_graphs
mask = mask & (graph.edges["edge_type"] != 9)
edges = jax.tree_util.tree_map(lambda x: tf.boolean_mask(x, mask, axis=0),
graph.edges)
senders = tf.boolean_mask(graph.senders, mask[:, 0], axis=0)
receivers = tf.boolean_mask(graph.receivers, mask[:, 0], axis=0)
n_dropped_edges = tf.reduce_sum(
tf.cast(tf.math.logical_not(mask), tf.int32))
n_edge = graph.n_edge - n_dropped_edges
graph = graph._replace(
edges=edges, senders=senders, receivers=receivers, n_edge=n_edge)
return graph
ds = ds.map(map_fn, num_parallel_calls=tf.data.AUTOTUNE)
### Explicit static batching due to self-attention over nodes. ###
if not do_bucket_by_size or not is_training:
ds = ds.padded_batch(
num_local_devices * batch_size, drop_remainder=drop_remainder)
else:
full_batch_size = num_local_devices * batch_size
ds = ds.bucket_by_sequence_length(
lambda graph: tf.reduce_max(graph.n_node),
bucket_boundaries=bucket_boundaries,
# This is a rather pessimistic linear scaling
bucket_batch_sizes=[
factor * full_batch_size for factor in bucket_batch_size_factors
],
drop_remainder=drop_remainder)
if is_training:
ds = ds.repeat()
ds = ds.prefetch(prefetch_buffer_size) # tf.data.experimental.AUTOTUNE)
def reshape(leaf):
return leaf.reshape((num_local_devices,
leaf.shape[0] // num_local_devices) + leaf.shape[1:])
calc_eigenvals_and_vecs = (
posenc_config is not None and
posenc_config.get("posenc_type", "") == "maglap")
if calc_eigenvals_and_vecs:
eigv_magnetic_config = dict(
k=posenc_config.get("top_k_eigenvectors", 5),
k_excl=posenc_config.get("excl_k_eigenvectors", 1),
q=posenc_config.get("maglap_q", 0.25),
q_absolute=posenc_config.get("maglap_q_absolute", True),
use_symmetric_norm=posenc_config.get("maglap_symmetric_norm", False),
norm_comps_sep=posenc_config.get("maglap_norm_comps_sep", False),
sign_rotate=posenc_config.get("maglap_sign_rotate", True))
eigv_magnetic_laplacian = functools.partial(
utils.eigv_magnetic_laplacian_numba_batch, **eigv_magnetic_config)
if "-df" in dataset_config.name:
eigv_magnetic_config['l2_norm'] = posenc_config.get("maglap_l2_norm",
True)
eigv_magnetic_config['exclude_cfg'] = exclude_control_flow_edges
eigv_magnetic_config['exclude_ns'] = exclude_next_syntax_edges
numba.set_num_threads(
min(NUM_THREADS_EIGENVECTORS, numba.get_num_threads()))
logging.info("Numba uses %d threads for eigenvector calculation",
numba.get_num_threads())
logging.info("Number of cores %d", multiprocessing.cpu_count())
for sample in ds.as_numpy_iterator():
# Precomputed eigenvectors need to be added prior to final padding
eigenvalues, eigenvectors = None, None
if 'eigendecomposition' in sample.globals:
eigendecomposition = sample.globals.pop('eigendecomposition')
if calc_eigenvals_and_vecs:
for entry, val, vec in eigendecomposition:
# Assuming that the elements are always in the same order
entry = jax.tree_map(lambda x: x[0], entry)
if all([k in entry and np.isclose(v, entry[k])
for k, v in eigv_magnetic_config.items()]):
eigenvalues, eigenvectors = val, vec
if eigenvectors is not None:
if not isinstance(sample.nodes, dict):
sample = sample._replace(nodes={"node_feat": sample.nodes})
sample.nodes["eigenvectors"] = eigenvectors
# Doing these preprocessing here is not optimal, however, it is a simple
# approach to maneuvers around some limitations of TFDS etc.
sample = _pad_to_next_power_of_two(sample, dataset_config)
if calc_eigenvals_and_vecs and eigenvalues is None:
logging.debug("Start eigenvalues and vectors calculation")
try:
eigenvalues, eigenvectors = eigv_magnetic_laplacian(
sample.senders, sample.receivers, sample.n_node)
except:
logging.warning("MagLap calculation error in %s",
str(sample.globals["graph_index"]))
raise
eigenvalues = eigenvalues.astype(np.float32)
if np.iscomplexobj(eigenvectors):
eigenvectors = eigenvectors.astype(np.complex64)
else:
eigenvectors = eigenvectors.astype(np.float32)
if not isinstance(sample.nodes, dict):
sample = sample._replace(nodes={"node_feat": sample.nodes})
sample.nodes["eigenvectors"] = eigenvectors
# This is not accurate, but globals are treated differently
sample.nodes["eigenvalues"] = eigenvalues
logging.debug("Finished eigenvalues and vectors calculation")
if calc_eigenvals_and_vecs:
# This is not accurate, but globals are treated differently later on
sample.nodes["eigenvalues"] = eigenvalues
sample = jax.tree_map(
lambda x: jnp.array(x) if x.dtype != object else x, sample)
if jax.device_count() > 1 and is_training:
sample = jax.tree_map(reshape, sample)
yield sample
def dataset_generator(path: str, split: str):
"""Loads the data from a folder stored as `npz` files, assuming the data is located in `$path/$split`.
"""
base_path = os.path.join(path, split)
for file in os.listdir(base_path):
if not file.endswith('.npz') or 'meta' in file:
continue
for instance in np.load(
os.path.join(base_path, file), allow_pickle=True)["data"]:
yield tuple(instance)
| digraph_transformer-main | dataset_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the sorting network dataset."""
import os
from typing import List, Sequence, Tuple, Union
from absl import app
from absl import flags
from absl import logging
import jraph
import numba
import numpy as np
import tqdm
from script_postcompute_eigedecomp import precalc_and_append
np.random.seed(42)
_OUT_PATH = flags.DEFINE_string('out_path', '~/data/sorting_network',
'The path to write datasets to.')
_SHARD_SIZE = flags.DEFINE_integer(
'shard_size', 10_000, 'The number of times to store in each file.')
_REVERSE = flags.DEFINE_boolean('reverse', True,
'If true also reverse test samples')
# Instructions with lengths to put in which dataset and how many sorting
# networks shall be generated (2x for train, 3x for valid and test)
SPLITS = [
# (split, seq_lens, n_generation_trials)
('train', [7, 8, 9, 10, 11], 400_000),
('valid', [12], 20_000),
('test', [13, 14, 15, 16], 20_000)
]
DATASET_NAME = '7to11_12_13to16'
# How many random sorting networks are sampled in parallel (multithreaded)
BATCH_SIZE = 500
# Upper limit on operators
MAX_OPERATORS = 512
maglap_configs = [
dict(k=25, k_excl=0, q=0.25,
q_absolute=False, norm_comps_sep=False,
sign_rotate=True, use_symmetric_norm=True),
dict(k=25, k_excl=0, q=0,
q_absolute=False, norm_comps_sep=False,
sign_rotate=True, use_symmetric_norm=True)
]
@numba.jit(nopython=False)
def get_test_cases(seq_len: int) -> np.ndarray:
"""Generates all possible 0-1 perturbations (aka truth table)."""
true_false = np.array((True, False))
return np.stack(np.meshgrid(*((true_false,) * seq_len))).reshape(seq_len, -1)
@numba.jit(nopython=False)
def generate_sorting_network(seq_len: int, max_operators: int = 512):
"""Generates a valid sorting network."""
test_cases = get_test_cases(seq_len)
operators = []
last_operator = (-1, -1)
unsorted_locations = np.arange(seq_len)
while True:
i, j = sorted(np.random.choice(unsorted_locations, size=2, replace=False))
if (i, j) == last_operator:
continue
if i not in unsorted_locations and j not in unsorted_locations:
continue
last_operator = (i, j)
operators.append((i, j))
test_cases[(i, j), :] = np.sort(test_cases[(i, j), :], axis=0)
test_cases = np.unique(test_cases, axis=1)
unsorted_locations_ = np.arange(seq_len)[(np.sort(test_cases, axis=0) !=
test_cases).any(1)]
# Sometime numba has issues with tracking variables through loops
unsorted_locations = unsorted_locations_
if test_cases.shape[1] == seq_len + 1:
return True, operators, test_cases
if len(operators) == max_operators:
return False, operators, test_cases
@numba.jit(nopython=False)
def test_network(operators, seq_len):
test_cases = get_test_cases(seq_len)
for i, j in operators.astype(np.int64):
test_cases[(i, j), :] = np.sort(test_cases[(i, j), :], axis=0)
test_cases = np.unique(test_cases, axis=1)
if test_cases.shape[1] == seq_len + 1:
return True, test_cases
return False, test_cases
def operators_to_graphstuple(operators: Union[List[Tuple[int, int]],
np.ndarray],
seq_len: int) -> jraph.GraphsTuple:
"""Converts the list of "operators" to a jraph.graphstuple."""
num_nodes = len(operators)
senders = np.zeros(int(2 * len(operators)) - seq_len, dtype=np.int32)
receivers = np.zeros(int(2 * len(operators)) - seq_len, dtype=np.int32)
# Node features: (order_idx, location_i, location_j)
nodes = np.zeros((num_nodes, 3), dtype=np.float32)
loc = {i: -1 for i in range(seq_len)}
edge_idx = 0
for idx, (i, j) in enumerate(operators):
# Add edges
if loc[i] >= 0:
senders[edge_idx] = loc[i]
receivers[edge_idx] = idx
edge_idx += 1
if loc[j] >= 0:
senders[edge_idx] = loc[j]
receivers[edge_idx] = idx
edge_idx += 1
# Update node features
nodes[idx, 0] = idx
nodes[idx, 1] = i
nodes[idx, 2] = j
# Update mapping from location to node
loc[i] = idx
loc[j] = idx
return jraph.GraphsTuple(
n_node=np.array([num_nodes], dtype=np.int32),
n_edge=np.array(senders.shape, dtype=np.int32),
senders=senders,
receivers=receivers,
nodes=dict(node_feat=nodes),
edges=np.array([], dtype=np.float32),
globals=np.array([], dtype=np.float32))
def generate_sorting_network_batch(
seq_len: int,
batch: int,
max_operators: int = 512
) -> List[jraph.GraphsTuple]:
"""Generates batch graphs in parallel."""
graph_tuples = []
for _ in numba.prange(batch):
success, operators, _ = generate_sorting_network(
seq_len, max_operators=max_operators)
if success:
graph_tuples.append(operators_to_graphstuple(operators, seq_len))
graph_tuples[-1] = precalc_and_append(graph_tuples[-1], maglap_configs)
return graph_tuples
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
base_path = os.path.join(_OUT_PATH.value, DATASET_NAME)
os.makedirs(base_path, exist_ok=True)
id_ = 0
for split, seq_lens, n_generation_trials in SPLITS:
file_path = os.path.join(base_path, split)
os.makedirs(file_path, exist_ok=True)
sample_count = 0
buffer = []
start_id = id_
n_batches = n_generation_trials // BATCH_SIZE
for batch_idx in tqdm.tqdm(range(n_batches), desc=split):
seq_len = np.random.choice(seq_lens, 1).item()
graphs = generate_sorting_network_batch(
seq_len,
BATCH_SIZE,
MAX_OPERATORS)
sample_count += (3 if _REVERSE.value and split != 'train' else
2) * len(graphs)
for graph in graphs:
buffer.append(
(graph, np.array([True]), np.array([True]), np.array(id_)))
id_ += 1
# Remove last operation to generate an incorrect sorting network
graph_ = jraph.GraphsTuple(
nodes=dict(node_feat=graph.nodes['node_feat'][:-1]),
edges=np.array([], dtype=np.float32),
# It is very unlikely that last operation still operates on inputs
senders=graph.senders[:-2],
receivers=graph.receivers[:-2],
n_node=graph.n_node - 1,
n_edge=graph.n_edge - 2,
globals=np.array([], dtype=np.float32))
buffer.append(
(graph_, np.array([False]), np.array([False]), np.array(id_)))
id_ += 1
if _REVERSE.value and split != 'train':
operators = graph.nodes['node_feat'][::-1, 1:]
is_correct, _ = test_network(operators, seq_len)
graph_ = operators_to_graphstuple(operators, seq_len)
buffer.append((graph_, np.array([is_correct]),
np.array([is_correct]), np.array(id_)))
id_ += 1
if len(buffer) >= _SHARD_SIZE.value or batch_idx == n_batches - 1:
file_name = os.path.join(file_path, f'{start_id}_{id_ - 1}.npz')
np.savez_compressed(file_name, data=np.array(buffer, dtype='object'))
logging.info('Wrote %d to %s', len(buffer), file_name)
buffer = []
start_id = id_
logging.info('Wrote %d instances in `%s`', sample_count, split)
if __name__ == '__main__':
app.run(main)
| digraph_transformer-main | script_generate_sorting_network_np.py |
# MIT License
# Copyright (c) 2019 OGB Team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Adapted to jax, source: https://github.com/snap-stanford/ogb/blob/c8efe8ec99d11279c80f2bcdbe1567675c1c5666/examples/graphproppred/code2/utils.py and converted to jax.
"""
# pylint: skip-file
import os
import haiku as hk
import jax.numpy as jnp
import jraph
import numpy as np
import pandas as pd
from sklearn import metrics
roc_auc_score, average_precision_score = metrics.roc_auc_score, metrics.average_precision_score
class ASTNodeEncoder(hk.Module):
"""Adapted from OGB.
Input:
x: default node feature. the first and second column represents node
type and node attributes.
depth: The depth of the node in the AST.
Output:
emb_dim-dimensional vector
"""
def __init__(self,
emb_dim: int,
num_nodetypes: int,
num_nodeattributes: int,
max_depth: int,
edge_dim: int,
dtype=jnp.float32,
name=None):
super().__init__()
self.emb_dim = emb_dim
self.num_nodetypes = num_nodetypes
self.num_nodeattributes = num_nodeattributes
self.max_depth = max_depth
self.edge_dim = edge_dim
self.dtype = dtype
def __call__(self, graph: jraph.GraphsTuple, depth: int, *args, **kwargs):
x = graph.nodes
type_encoder = hk.get_parameter(
'type_encoder',
shape=[self.num_nodetypes, self.emb_dim],
dtype=self.dtype,
init=hk.initializers.RandomNormal())
attribute_encoder = hk.get_parameter(
'attribute_encoder',
shape=[self.num_nodeattributes, self.emb_dim],
dtype=self.dtype,
init=hk.initializers.RandomNormal())
nodes = type_encoder[x[..., 0]] + attribute_encoder[x[..., 1]]
if depth is not None:
depth_encoder = hk.get_parameter(
'depth_encoder',
shape=[self.max_depth + 1, self.emb_dim],
dtype=self.dtype,
init=hk.initializers.RandomNormal())
depth = jnp.where(depth > self.max_depth, self.max_depth, depth)
nodes = nodes + depth_encoder[depth[..., 0]]
edges = hk.Linear(self.edge_dim, with_bias=False)(graph.edges)
graph = graph._replace(nodes=nodes, edges=edges)
return graph
def get_vocab_mapping(seq_list, num_vocab):
"""Adapted from OGB.
Input:
seq_list: a list of sequences
num_vocab: vocabulary size
Output:
vocab2idx:
A dictionary that maps vocabulary into integer index.
Additioanlly, we also index '__UNK__' and '__EOS__'
'__UNK__' : out-of-vocabulary term
'__EOS__' : end-of-sentence
idx2vocab:
A list that maps idx to actual vocabulary.
"""
vocab_cnt = {}
vocab_list = []
for seq in seq_list:
for w in seq:
if w in vocab_cnt:
vocab_cnt[w] += 1
else:
vocab_cnt[w] = 1
vocab_list.append(w)
cnt_list = np.array([vocab_cnt[w] for w in vocab_list])
topvocab = np.argsort(-cnt_list, kind='stable')[:num_vocab]
print('Coverage of top {} vocabulary:'.format(num_vocab))
print(float(np.sum(cnt_list[topvocab])) / np.sum(cnt_list))
vocab2idx = {
vocab_list[vocab_idx]: idx for idx, vocab_idx in enumerate(topvocab)
}
idx2vocab = [vocab_list[vocab_idx] for vocab_idx in topvocab]
# print(topvocab)
# print([vocab_list[v] for v in topvocab[:10]])
# print([vocab_list[v] for v in topvocab[-10:]])
vocab2idx['__UNK__'] = num_vocab
idx2vocab.append('__UNK__')
vocab2idx['__EOS__'] = num_vocab + 1
idx2vocab.append('__EOS__')
# test the correspondence between vocab2idx and idx2vocab
for idx, vocab in enumerate(idx2vocab):
assert (idx == vocab2idx[vocab])
# test that the idx of '__EOS__' is len(idx2vocab) - 1.
# This fact will be used in decode_arr_to_seq, when finding __EOS__
assert (vocab2idx['__EOS__'] == len(idx2vocab) - 1)
return vocab2idx, idx2vocab
def augment_edge(edge_index, node_is_attributed, reverse: bool = True):
"""Adapted from OGB.
Input:
edge_index: senders and receivers stacked
node_is_attributed: node attributes
reverse: if true also reverse edges are added
Output:
data (edges are augmented in the following ways):
data.edge_index: Added next-token edge. The inverse edges were
also added.
data.edge_attr (torch.Long):
data.edge_attr[:,0]: whether it is AST edge (0) for
next-token edge (1)
data.edge_attr[:,1]: whether it is original direction (0) or
inverse direction (1)
"""
##### AST edge
edge_index_ast = edge_index
edge_attr_ast = np.zeros((edge_index_ast.shape[1], 2))
##### Inverse AST edge
if reverse:
edge_index_ast_inverse = np.stack([edge_index_ast[1], edge_index_ast[0]],
axis=0)
edge_attr_ast_inverse = [
np.zeros((edge_index_ast_inverse.shape[1], 1)),
np.ones((edge_index_ast_inverse.shape[1], 1))
]
edge_attr_ast_inverse = np.concatenate(edge_attr_ast_inverse, axis=1)
##### Next-token edge
## Obtain attributed nodes and get their indices in dfs order
# attributed_node_idx = torch.where(data.node_is_attributed.view(-1,) == 1)[0]
# attributed_node_idx_in_dfs_order = attributed_node_idx[torch.argsort(data.node_dfs_order[attributed_node_idx].view(-1,))]
## Since the nodes are already sorted in dfs ordering in our case, we can just do the following.
attributed_node_idx_in_dfs_order = np.where(node_is_attributed[:, 0] == 1)[0]
## build next token edge
# Given: attributed_node_idx_in_dfs_order
# [1, 3, 4, 5, 8, 9, 12]
# Output:
# [[1, 3, 4, 5, 8, 9]
# [3, 4, 5, 8, 9, 12]
edge_index_nextoken = [
attributed_node_idx_in_dfs_order[:-1],
attributed_node_idx_in_dfs_order[1:]
]
edge_index_nextoken = np.stack(edge_index_nextoken, axis=0)
edge_attr_nextoken = [
np.ones((edge_index_nextoken.shape[1], 1)),
np.zeros((edge_index_nextoken.shape[1], 1))
]
edge_attr_nextoken = np.concatenate(edge_attr_nextoken, axis=1)
##### Inverse next-token edge
if reverse:
edge_index_nextoken_inverse = np.stack(
[edge_index_nextoken[1], edge_index_nextoken[0]], axis=0)
edge_attr_nextoken_inverse = np.ones((edge_index_nextoken.shape[1], 2))
edge_index = [
edge_index_ast, edge_index_ast_inverse, edge_index_nextoken,
edge_index_nextoken_inverse
]
edge_index = np.concatenate(edge_index, axis=1)
edge_attr = [
edge_attr_ast, edge_attr_ast_inverse, edge_attr_nextoken,
edge_attr_nextoken_inverse
]
edge_attr = np.concatenate(edge_attr, axis=0)
else:
edge_index = np.concatenate([edge_index_ast, edge_index_nextoken], axis=1)
edge_attr = np.concatenate([edge_attr_ast, edge_attr_nextoken], axis=0)
return edge_index, edge_attr
def encode_y_to_arr(seq, vocab2idx, max_seq_len):
"""Adapted from OGB.
Input:
data: PyG graph object
output: add y_arr to data
"""
y_arr = encode_seq_to_arr(seq, vocab2idx, max_seq_len)
return y_arr
def encode_seq_to_arr(seq, vocab2idx, max_seq_len):
"""Adapted from OGB.
Input:
seq: A list of words
output: add y_arr (jnp.array)
"""
padded_seq = (
seq[:max_seq_len] + ['__EOS__'] * max(0, max_seq_len - len(seq)))
augmented_seq = [
vocab2idx[w] if w in vocab2idx else vocab2idx['__UNK__']
for w in padded_seq
]
return jnp.array(augmented_seq)
def decode_arr_to_seq(arr, idx2vocab):
"""Adapted from OGB.
Input: jnp.array 1d: y_arr Output: a sequence of words.
IMPORTANT: we now filter for the unknown token to avoid inflating the FPs
"""
# find the position of __EOS__ (the last vocab in idx2vocab)
eos_idx_list = jnp.nonzero(arr == len(idx2vocab) - 1)[0]
if len(eos_idx_list) > 0:
clippted_arr = arr[:jnp.min(eos_idx_list)] # find the smallest __EOS__
else:
clippted_arr = arr
# Otherwise the UNK tokens are counted as a False Positive!
clippted_arr = clippted_arr[clippted_arr != len(idx2vocab) - 2]
return list(map(lambda x: idx2vocab[x], clippted_arr))
| digraph_transformer-main | ogb_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A jaxline experiment for predicting the correctness of sorting networks or for graph property prediction on OGB Code2 dataset.
https://ogb.stanford.edu/docs/graphprop/
"""
import datetime
import functools
import os
import threading
import traceback
from typing import Dict, NamedTuple, Optional, Tuple
from absl import app
from absl import flags
from absl import logging
import chex
import dill
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jutils
import jraph
import numpy as np
from ogb.graphproppred import GraphPropPredDataset, Evaluator
import optax
import tensorflow as tf
import tree
# pylint: disable=g-bad-import-order
import dataset_utils
import models
import ogb_utils
from utils import tp_fn_fp, prec_rec_f1
try:
import wandb
except:
wandb = None
hk.experimental.profiler_name_scopes(enabled=True)
jax.config.parse_flags_with_absl()
FLAGS = flags.FLAGS
class _Predictions(NamedTuple):
predictions: np.ndarray
indices: np.ndarray
target: Optional[np.ndarray]
def _sort_predictions_by_indices(predictions: _Predictions):
sorted_order = np.argsort(predictions.indices)
return _Predictions(
predictions=predictions.predictions[sorted_order],
indices=predictions.indices[sorted_order],
target=(None if predictions.target is None
else predictions.target[sorted_order]))
def _disable_gpu_for_tf():
tf.config.set_visible_devices([], "GPU") # Hide local GPUs.
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def maybe_get_first(xs):
if jax.device_count() == 1:
return xs
return jutils.get_first(xs)
def maybe_bcast_local_devices(xs):
if jax.device_count() == 1:
return xs
return jutils.bcast_local_devices(xs)
class Experiment(experiment.AbstractExperiment):
"""OGB Graph Property Prediction GraphNet experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assumed that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
"_params": "params",
"_opt_state": "opt_state",
"_network_state": "network_state"
}
NON_BROADCAST_CHECKPOINT_ATTRS = {
# Predictions are written by the evaluator and hence are
# present only in 'best' ckpt.
"_test_predictions": "test_predictions",
"_valid_predictions": "valid_predictions",
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.init_rng = init_rng
self.config = config
self.dataset_config = config.dataset
self._test_predictions: _Predictions = None
self._valid_predictions: _Predictions = None
if mode not in ("train", "eval", "train_eval_multithreaded"):
raise ValueError(f"Invalid mode {mode}.")
self.loss = None
self.forward = None
# Needed for checkpoint restore.
self._params = None
self._network_state = None
self._opt_state = None
# For eval on ogb
self.ogb_targets = None
self.ogb_evaluator = None
# _ _
# | |_ _ __ __ _(_)_ __
# | __| "__/ _` | | "_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def _loss(self, *graph) -> chex.ArrayTree:
assert self.mode == "train"
graph = jraph.GraphsTuple(*graph)
model_instance = models.get_model(self.config.dataset,
self.config.pmap_axis,
**self.config.model)
loss, prediction = model_instance.loss(graph)
target = graph.globals["target"]
scalars = {"loss": loss.mean()}
if self.dataset_config.num_classes > 0:
prediction = jnp.argmax(prediction, axis=-1)
if self.dataset_config.name.startswith("dist"):
mask = target >= 0
else:
target = target[..., 0, :]
mask = jnp.ones_like(target, dtype=jnp.bool_)
accuracy = ((prediction == target) & mask).sum() / mask.sum()
scalars["accuracy"] = accuracy
if self.dataset_config.num_classes == 2:
scalars["tp"], scalars["fn"], scalars["fp"] = tp_fn_fp(
prediction, target, mask)
scalars["precision"], scalars["recall"], scalars["f1"] = prec_rec_f1(
scalars["tp"], scalars["fn"], scalars["fp"]
)
return loss.sum(), scalars
def _grads_stats(self,
grads: chex.ArrayTree,
divisor=1) -> Dict[str, jnp.DeviceArray]:
def stack(x: chex.ArrayTree) -> jnp.DeviceArray:
return jnp.array(jax.tree_util.tree_leaves(x))
return {
"gradient_mean":
jnp.mean(stack(jax.tree_map(jnp.mean, grads))) / divisor,
"gradient_absmean":
jnp.mean(stack(jax.tree_map(lambda x: jnp.abs(x).mean(), grads))) /
divisor,
"gradient_min":
jnp.min(stack(jax.tree_map(jnp.min, grads))) / divisor,
"gradient_max":
jnp.max(stack(jax.tree_map(jnp.max, grads))) / divisor,
}
def _update_parameters(self, params, network_state, opt_state, global_step,
rng, graph):
"""Updates parameters."""
def get_loss(params, network_state, rng, *graph):
(loss, scalars), network_state = self.loss.apply(params, network_state,
rng, *graph)
loss = loss.mean()
return loss, (scalars, network_state)
grad_loss_fn = jax.grad(get_loss, has_aux=True)
out = grad_loss_fn(params, network_state, rng, *graph)
grads, (scalars, network_state) = out
if jax.device_count() > 1:
grads = jax.lax.psum(grads, axis_name=self.config.pmap_axis)
updates, opt_state = self.optimizer.update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
scalars.update(self._grads_stats(grads, 1))
return params, network_state, opt_state, scalars
def _train_init(self):
self.loss = hk.transform_with_state(self._loss)
self._train_input = jutils.py_prefetch(
lambda: self._build_numpy_dataset_iterator("train"), buffer_size=5)
init_stacked_graphs = next(self._train_input)
if jax.device_count() > 1:
init_key = jutils.bcast_local_devices(self.init_rng)
p_init = jax.pmap(self.loss.init, axis_name=self.config.pmap_axis)
else:
init_key = self.init_rng
p_init = self.loss.init
self._params, self._network_state = p_init(init_key, *init_stacked_graphs)
# Learning rate scheduling.
lr_schedule = optax.warmup_cosine_decay_schedule(
**self.config.optimizer.lr_schedule)
# @optax.inject_hyperparams
def build_optimizer(learning_rate, kwargs):
optimizer = getattr(optax, self.config.optimizer.name)(
learning_rate=learning_rate, **kwargs)
if not self.config.optimizer.use_agc:
return optimizer
else:
return optax.chain(
optimizer,
optax.adaptive_grad_clip(**self.config.optimizer.agc_kwargs),
)
self.optimizer = build_optimizer(lr_schedule,
self.config.optimizer.optimizer_kwargs)
if self.config.optimizer.accumulate_gradient_k > 1:
g_k = self.config.optimizer.accumulate_gradient_k
self.optimizer = optax.MultiSteps(
self.optimizer, use_grad_mean=False, every_k_schedule=g_k)
if jax.device_count() > 1:
self._opt_state = jax.pmap(self.optimizer.init)(self._params)
self.update_parameters = jax.pmap(
self._update_parameters,
axis_name=self.config.pmap_axis,
donate_argnums=(0, 1, 2))
else:
self._opt_state = jax.jit(self.optimizer.init)(self._params)
self.update_parameters = jax.jit(
self._update_parameters,
donate_argnums=(0, 1, 2))
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self.loss is None:
self._train_init()
graph = next(self._train_input)
if jax.device_count() == 1:
rng = rng[0]
out = self.update_parameters(self._params, self._network_state,
self._opt_state, global_step, rng, graph)
(self._params, self._network_state, self._opt_state, scalars) = out
scalars = maybe_get_first(scalars)
scalars = jax.tree_map(lambda x: x.item(), scalars)
scalars["local_device_count"] = jax.local_device_count()
scalars["device_count"] = jax.device_count()
scalars["process_count"] = jax.process_count()
scalars["process_index"] = jax.process_index()
return scalars
def _build_numpy_dataset_iterator(self, split: str):
"""See base class."""
batch_size = (
self.config.training.batch_size
if self.mode == "train" else self.config.evaluation.batch_size)
max_number_of_instances = -1
if split != "train":
max_number_of_instances = self.config.evaluation.max_number_of_instances
path = os.path.join(self.config.data_root, self.dataset_config.path)
data_generator = functools.partial(dataset_utils.dataset_generator, path,
split)
example = next(data_generator())
signature_from_example = tree.map_structure(_numpy_to_tensor_spec, example)
dataset = tf.data.Dataset.from_generator(
data_generator, output_signature=signature_from_example)
return dataset_utils.build_dataset_iterator(
self.dataset_config,
dataset,
batch_size=batch_size,
debug=self.config.debug,
is_training=self.mode == "train",
posenc_config=self.config.model.posenc_config,
max_number_of_instances=max_number_of_instances,
**self.config.dataset_config)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def _forward(self, *graph) -> Tuple[np.ndarray, np.ndarray]:
assert "eval" in self.mode
graph = jraph.GraphsTuple(*graph)
model_instance = models.get_model(self.dataset_config,
self.config.pmap_axis,
**self.config.model)
loss, prediction = model_instance.loss(graph, is_training=False)
return loss, prediction
def _eval_init(self):
self.forward = hk.transform_with_state(self._forward)
self.eval_apply = jax.jit(self.forward.apply)
def _ogb_performance_metrics(self, loss: np.ndarray, prediction: np.ndarray,
target: np.ndarray, target_raw: np.ndarray):
"""Creates unnormalised values for accumulation."""
prediction_ = np.argmax(prediction, axis=-1)
accuracy = prediction_ == target
values = {"accuracy": accuracy.sum(), "loss": loss.sum()}
counts = {"accuracy": accuracy.size, "loss": loss.size}
scalars = {"values": values, "counts": counts}
if self.dataset_config.idx2vocab is None:
meta = np.load(os.path.join(self.config.data_root,
self.config.dataset.path, 'meta.npz'),
allow_pickle=True)['data'].item()
self.dataset_config.idx2vocab = meta['idx2vocab']
self.dataset_config.vocab2idx = meta['vocab2idx']
arr_to_seq = functools.partial(
ogb_utils.decode_arr_to_seq, idx2vocab=self.dataset_config.idx2vocab)
seq_pred = [arr_to_seq(seq) for seq in prediction_]
seq_ref = [[el for el in seq if el]
for seq in target_raw[:, 0].astype("U")]
return scalars, seq_ref, seq_pred
def _sn_performance_metrics(self, loss: np.ndarray, prediction: np.ndarray,
target: np.ndarray, seq_len: int):
"""Creates unnormalised values for accumulation."""
prediction_ = np.argmax(prediction, axis=-1)
target = target[..., 0, :]
accuracy = prediction_ == target
accuracy_sum = accuracy.sum()
values = {
"accuracy": accuracy_sum,
f"accuracy_{seq_len}": accuracy_sum,
"loss": loss.sum()
}
tp, fn, fp = tp_fn_fp(prediction_, target)
values["tp"] = values[f"tp_{seq_len}"] = tp
values["fn"] = values[f"fn_{seq_len}"] = fn
values["fp"] = values[f"fp_{seq_len}"] = fp
counts = {
"tp": 0,
"fn": 0,
"fp": 0,
f"tp_{seq_len}": 0,
f"fn_{seq_len}": 0,
f"fp_{seq_len}": 0,
"accuracy": accuracy.size,
f"accuracy_{seq_len}": accuracy.size,
"loss": loss.size
}
scalars = {"values": values, "counts": counts}
return scalars, target, []
def _dist_performance_metrics(self, loss: np.ndarray, prediction: np.ndarray,
target: np.ndarray, skip=True):
"""Creates unnormalised values for accumulation."""
if skip:
return {"values": {}, "counts": {}}, target, loss
mask = target >= 0
if self.dataset_config.num_classes > 0:
prediction_ = np.argmax(prediction, axis=-1)
accuracy = (prediction_ == target) & mask
accuracy_sum = accuracy.sum()
values = {"loss": loss.sum(), "accuracy": accuracy_sum}
values["tp"], values["fn"], values["fp"] = tp_fn_fp(
prediction_, target, mask)
counts = {
"tp": 0,
"fn": 0,
"fp": 0,
"loss": loss.size,
"accuracy": mask.sum(),
}
nb_nodes = mask[..., 0].sum(-1) + 1
for nb in np.unique(nb_nodes):
inst_mask = nb_nodes == nb
values[f"loss_{nb}"] = loss[inst_mask].sum()
values[f"accuracy_{nb}"] = accuracy[inst_mask].sum()
values[f"tp_{nb}"], values[f"fn_{nb}"], values[f"fp_{nb}"] = tp_fn_fp(
prediction_[inst_mask], target[inst_mask], mask[inst_mask])
counts.update({
f"tp_{nb}": 0,
f"fn_{nb}": 0,
f"fp_{nb}": 0,
f"loss_{nb}": inst_mask.sum(),
f"accuracy_{nb}": mask[inst_mask].sum(),
})
else:
values = {
"loss": loss.sum(),
}
counts = {
"loss": loss.size,
}
nb_nodes = mask[..., 0].sum(-1) + 1
for nb in np.unique(nb_nodes):
inst_mask = nb_nodes == nb
values.update({
f"loss_{nb}": loss[inst_mask].sum(),
})
counts.update({
f"loss_{nb}": inst_mask.sum(),
})
scalars = {"values": values, "counts": counts}
return scalars, target, []
# return scalars, target.flatten(), prediction[..., 0].flatten()
def _get_prediction(self, params, state, rng,
graph) -> Tuple[np.ndarray, np.ndarray]:
"""Returns predictions for all the graphs in the dataset split."""
model_output, _ = self.eval_apply(params, state, rng, *graph)
return model_output
def _sum_agg_two_level_struct_with_default(self, structure, default=0):
"""Two level version of `tree.map_structure(lambda *l: sum(l), *all_scalars)` that handles missing keys.
"""
accum = {}
for element in structure:
for ckey, container in element.items():
if ckey not in accum:
accum[ckey] = {}
for vkey, values in container.items():
if vkey not in accum[ckey]:
accum[ckey][vkey] = default
accum[ckey][vkey] += values
return accum
def _get_predictions(self, params, state, rng, m2_offest, m1_offset, split):
graph_iterator = jutils.py_prefetch(
lambda: self._build_numpy_dataset_iterator(split), buffer_size=5)
all_scalars = []
all_seq_refs = []
all_seq_preds = []
predictions = []
graph_indices = []
for i, graph in enumerate(graph_iterator):
# Since jax does not support strings we cannot pass it to the model
target_raw = None
if "target_raw" in graph.globals:
target_raw = graph.globals["target_raw"]
del graph.globals["target_raw"]
loss, prediction = self._get_prediction(params, state, rng, graph)
if self.dataset_config.num_classes > 0:
prediction = jax.nn.softmax(prediction, axis=-1)
prediction = prediction.at[..., -2].add(-m2_offest)
prediction = prediction.at[..., -1].add(-m1_offset)
if "target" in graph.globals and not jnp.isnan(
graph.globals["target"]).any():
if self.dataset_config.name.startswith("dist"):
scalars, seq_ref, seq_pred = self._dist_performance_metrics(
loss, prediction, graph.globals["target"])
elif self.dataset_config.name.startswith("sn"):
scalars, seq_ref, seq_pred = self._sn_performance_metrics(
loss, prediction, graph.globals["target"],
int(graph.nodes["node_feat"][..., 1:3].max().item()) + 1)
else:
scalars, seq_ref, seq_pred = self._ogb_performance_metrics(
loss, prediction, graph.globals["target"][..., 0, :], target_raw)
all_scalars.append(scalars)
all_seq_refs.extend(seq_ref)
all_seq_preds.extend(seq_pred)
predictions.append(prediction)
graph_indices.append(graph.globals["graph_index"][:, 0])
if i % 50 == 0:
logging.info("Generated predictions for %d batches so far", i + 1)
if "dist" in self.dataset_config.name:
with jax.default_device(jax.devices("cpu")[0]):
all_scalars = [
self._dist_performance_metrics(
np.stack(all_seq_preds), np.concatenate(predictions),
np.stack(all_seq_refs), skip=False)[0]
]
target = None
if isinstance(all_seq_refs[0], jnp.ndarray):
target = np.stack(all_seq_refs)
predictions = _sort_predictions_by_indices(
_Predictions(predictions=np.concatenate(predictions),
indices=np.concatenate(graph_indices),
target=target))
if all_scalars:
# Sum over graphs in the dataset.
accum_scalars = self._sum_agg_two_level_struct_with_default(all_scalars)
scalars = tree.map_structure(lambda x, y: x / max(y, 1),
accum_scalars["values"],
accum_scalars["counts"])
if "ogbg-code2" in self.dataset_config.name:
if self.ogb_targets is None:
ds = GraphPropPredDataset("ogbg-code2", root=self.config.data_root)
self.ogb_targets = {}
for split_ in ["valid", "test"]:
self.ogb_targets[split_] = {
id_: ds[id_][1] for id_ in ds.get_idx_split()[split_]}
if self.ogb_evaluator is None:
self.ogb_evaluator = Evaluator("ogbg-code2")
# De-bias filtering out large graphs (> 1023 nodes)
indices = np.concatenate(graph_indices)
seq_pred = {
id_: prediction for id_, prediction in zip(indices, all_seq_preds)
}
seq_pred = [seq_pred[id_] if id_ in seq_pred else []
for id_ in self.ogb_targets[split].keys()]
scalars.update(
self.ogb_evaluator.eval({
"seq_ref": list(self.ogb_targets[split].values()),
"seq_pred": seq_pred
}))
elif (("dist" in self.dataset_config.name
and self.dataset_config.num_classes == 2)
or self.dataset_config.name.startswith("sn")):
scalars["precision"], scalars["recall"], scalars["f1"] = prec_rec_f1(
scalars["tp"], scalars["fn"], scalars["fp"]
)
for nb in np.unique([
int(k.split('_')[-1]) for k in scalars.keys() if '_' in k]):
prec_rec_f1_ = prec_rec_f1(
scalars[f"tp_{nb}"], scalars[f"fn_{nb}"], scalars[f"fp_{nb}"]
)
scalars.update({
f"precision_{nb}": prec_rec_f1_[0],
f"recall_{nb}": prec_rec_f1_[1],
f"f1_{nb}": prec_rec_f1_[2]
})
elif ("dist" in self.dataset_config.name
and self.dataset_config.num_classes < 0):
with jax.default_device(jax.devices("cpu")[0]):
scalars["rmse"] = jnp.sqrt(scalars["loss"])
for nb in np.unique([
int(k.split('_')[-1]) for k in scalars.keys() if '_' in k]):
scalars[f"rmse_{nb}"] = jnp.sqrt(scalars[f"loss_{nb}"])
scalars["local_device_count"] = jax.local_device_count()
scalars["device_count"] = jax.device_count()
scalars["process_count"] = jax.process_count()
scalars["process_index"] = jax.process_index()
else:
scalars = {}
return predictions, scalars
def evaluate(self, global_step, rng, **unused_kwargs):
"""See base class."""
if self.forward is None:
self._eval_init()
global_step = maybe_get_first(global_step).item()
params = maybe_get_first(self._params)
state = maybe_get_first(self._network_state)
if rng.ndim == 2:
rng = rng[0]
params = jax.tree_map(lambda x: x.copy(), params)
m2_offset = self.config.evaluation.unk_offset
m1_offset = self.config.evaluation.eos_offset
self._valid_predictions, scalars = self._get_predictions(
params, state, rng, m2_offset, m1_offset, "valid")
scalars["num_valid_predictions"] = len(self._valid_predictions.predictions)
if self.config.evaluation.eval_also_on_test:
self._test_predictions, test_scalars = self._get_predictions(
params, state, rng, m2_offset, m1_offset, "test")
scalars["num_test_predictions"] = len(self._test_predictions.predictions)
scalars.update({f"test_{k}": v for k, v in test_scalars.items()})
scalars = jax.tree_map(
lambda x: x.item() if isinstance(x, jnp.ndarray) else x, scalars)
logging.info("evaluate() - global_step: %d, %s", global_step, scalars)
return scalars
def _numpy_to_tensor_spec(arr: np.ndarray) -> tf.TensorSpec:
if not isinstance(arr, np.ndarray) and not isinstance(arr, jnp.ndarray):
return tf.TensorSpec([],
dtype=tf.int32 if isinstance(arr, int) else tf.float32)
elif arr.shape:
return tf.TensorSpec((None,) + arr.shape[1:], arr.dtype)
else:
return tf.TensorSpec([], arr.dtype)
def _get_step_date_label(global_step: int):
# Date removing microseconds.
date_str = datetime.datetime.now().isoformat().split(".")[0]
return f"step_{global_step}_{date_str}"
def _restore_state_to_in_memory_checkpointer(restore_path):
"""Initializes experiment state from a checkpoint."""
# Load pretrained experiment state.
python_state_path = os.path.join(restore_path, "checkpoint.dill")
with open(python_state_path, "rb") as f:
pretrained_state = dill.load(f)
logging.info("Restored checkpoint from %s", python_state_path)
# Assign state to a dummy experiment instance for the in-memory checkpointer,
# broadcasting to devices.
dummy_experiment = Experiment(
mode="train", init_rng=0, config=FLAGS.config.experiment_kwargs.config)
for attribute, key in Experiment.CHECKPOINT_ATTRS.items():
setattr(dummy_experiment, attribute,
maybe_bcast_local_devices(pretrained_state[key]))
jaxline_state = dict(
global_step=pretrained_state["global_step"],
experiment_module=dummy_experiment)
snapshot = jutils.SnapshotNT(0, jaxline_state)
# Finally, seed the jaxline `jutils.InMemoryCheckpointer` global dict.
jutils.GLOBAL_CHECKPOINT_DICT["latest"] = jutils.CheckpointNT(
threading.local(), [snapshot])
def _save_state_from_in_memory_checkpointer(
save_path, experiment_class: experiment.AbstractExperiment):
"""Saves experiment state to a checkpoint."""
logging.info("Saving model.")
for checkpoint_name, checkpoint in jutils.GLOBAL_CHECKPOINT_DICT.items():
if not checkpoint.history:
logging.info('Nothing to save in "%s"', checkpoint_name)
continue
for entry in reversed(checkpoint.history):
try:
pickle_nest = entry.pickle_nest
global_step = pickle_nest["global_step"]
state_dict = {"global_step": global_step}
for attribute, key in experiment_class.CHECKPOINT_ATTRS.items():
state_dict[key] = maybe_get_first(
getattr(pickle_nest["experiment_module"], attribute))
save_dir = os.path.join(save_path, checkpoint_name,
_get_step_date_label(global_step))
python_state_path = os.path.join(save_dir, "checkpoint.dill")
os.makedirs(save_dir, exist_ok=True)
with open(python_state_path, "wb") as f:
dill.dump(state_dict, f)
logging.info('Saved %s checkpoint to %s',
checkpoint_name, python_state_path)
break
except Exception as e:
print(e)
def main(argv, experiment_class: experiment.AbstractExperiment):
# Maybe restore a model.
restore_path = FLAGS.config.restore_path
if restore_path:
_restore_state_to_in_memory_checkpointer(restore_path)
# Maybe save a model.
save_dir = os.path.join(FLAGS.config.checkpoint_dir, "models")
os.makedirs(save_dir, exist_ok=True)
if FLAGS.config.one_off_evaluate:
# No need to save checkpoint in this case.
def save_model_fn(): return None
else:
save_model_fn = functools.partial(_save_state_from_in_memory_checkpointer,
save_dir, experiment_class)
try:
platform.main(experiment_class, argv)
except Exception as e:
logging.error(traceback.format_exc())
finally:
save_model_fn() # Save at the end of training or in case of exception.
if __name__ == "__main__":
_disable_gpu_for_tf()
flags.mark_flag_as_required("config")
# pytype: disable=wrong-arg-types
app.run(lambda argv: main(argv, Experiment))
| digraph_transformer-main | experiment.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Misc utils covering helper function and positional encodings."""
import functools
from typing import Optional, Tuple, Union
import jax
import jax.numpy as jnp
import jraph
import numba
import numpy as np
# set the threading layer before any parallel target compilation
numba.config.THREADING_LAYER = 'safe'
numba.set_num_threads(max(int(3 / 4 * numba.get_num_threads()), 1))
Tensor = Union[np.ndarray, jnp.DeviceArray]
# Constant required for numerical reasons
EPS = 1e-8
def tp_fn_fp(prediction, target, mask=None):
if mask is None:
mask = jnp.ones_like(target)
tp = ((prediction == target) & (prediction == 1) & mask).sum()
fn = ((prediction != target) & (prediction == 0) & mask).sum()
fp = ((prediction != target) & (prediction == 1) & mask).sum()
return tp, fn, fp
def prec_rec_f1(tp, fn, fp):
precision = tp / jnp.clip(tp + fp, a_min=1)
recall = tp / jnp.clip(tp + fn, a_min=1)
f1 = 2 * precision * recall / jnp.clip(precision + recall, a_min=1)
return precision, recall, f1
def softmax_cross_entropy_loss(
logits: Tensor,
targets: Tensor,
n_classes: int,
only_punish_first_end_of_sequence_token: bool = False) -> jnp.DeviceArray:
"""Calculation of softmax loss for sequence of predictions/tokens."""
targets_one_hot = jax.nn.one_hot(targets, n_classes)
logits = jax.nn.log_softmax(logits)
elem_loss = -jnp.sum(targets_one_hot * logits, axis=-1)
if not only_punish_first_end_of_sequence_token:
return jnp.mean(elem_loss, axis=-1)
mask = jnp.cumsum(targets == n_classes - 1, axis=-1) < 2
elem_loss *= mask
return jnp.sum(elem_loss, axis=-1) / jnp.sum(mask, axis=-1)
def count_edges(idx, n_nodes):
segment_sum = functools.partial(
jraph.segment_sum,
data=jnp.ones(1, dtype=jnp.int32),
num_segments=n_nodes)
return jax.vmap(segment_sum)(segment_ids=idx)
def dense_random_walk_matrix(graph: jraph.GraphsTuple,
reverse: bool = False) -> Tensor:
"""Returns the dense random walk matrix `A D^(-1)`.
Args:
graph: the explicitly batched graph (i.e. nodes are of shape [b, n, d]).
reverse: If True the the graph is reversed. Default False.
Returns:
tensor of shape [b, n, n] containing the random walk probabilities
"""
batch, n_nodes = graph.nodes.shape[:2]
if reverse:
senders = graph.receivers
receivers = graph.senders
else:
senders = graph.senders
receivers = graph.receivers
deg = count_edges(senders, n_nodes)
inv_deg = jnp.where(deg < 1, 0., 1. / deg)
adj = jnp.zeros((batch, n_nodes, n_nodes), dtype=jnp.float32)
assign = jax.vmap(lambda a, s, r, d: a.at[s, r].add(d[s]))
adj = assign(adj, senders, receivers, inv_deg)
# Once implemented swap next line with: adj = jnp.fill_diagonal(adj, deg < 1)
adj = adj.at[:, jnp.arange(n_nodes), jnp.arange(n_nodes)].add(deg < 1)
return adj
def k_step_random_walk(graph: jraph.GraphsTuple,
k: int = 3,
ppr_restart_p: Optional[float] = None,
reverse: bool = False) -> Tensor:
"""Returns the random walk matrices for k' in {1, ..., k} `I (A D^(-1))^k'`.
Args:
graph: the explicitly batched graph (i.e. nodes are of shape [b, n, d]).
k: number of random walk steps.
ppr_restart_p: if set, also the ppr is returned at `k + 1`-th dimension.
reverse: If True the the graph is reversed. Default False.
Returns:
tensor of shape [b, n, n, k {+1}] containing the random walk probabilities
"""
transition_probabilities = dense_random_walk_matrix(graph, reverse)
rw_probabilities = transition_probabilities
output = [rw_probabilities]
for _ in range(k - 1):
rw_probabilities = rw_probabilities @ transition_probabilities
output.append(rw_probabilities)
if ppr_restart_p:
output.append(exact_ppr_from_trans(transition_probabilities, ppr_restart_p))
output = jnp.stack(output, axis=-1)
return output
def exact_ppr(graph: jraph.GraphsTuple,
restart_p: float = 0.2,
reverse: bool = False) -> Tensor:
"""Calculates the personalized page rank via matrix inversion.
Args:
graph: the explicitly batched graph (i.e. nodes are of shape [b, n, d]).
restart_p: the personalized page rank restart probability. Default 0.2.
reverse: If True the the graph is reversed. Default False.
Returns:
tensor of shape [b, n, n] containing the random walk probabilities
"""
assert restart_p >= 0 and restart_p <= 1, 'Restart prob. must be in [0, 1]'
transition_probabilities = dense_random_walk_matrix(graph, reverse)
return exact_ppr_from_trans(transition_probabilities, restart_p)
def exact_ppr_from_trans(transition_prob: Tensor,
restart_p: float = 0.2) -> Tensor:
"""Calculates the personalized page rank via matrix inversion.
Args:
transition_prob: tensor of shape [b, n, n] containing transition
probabilities.
restart_p: the personalized page rank restart probability. Default 0.2.
Returns:
tensor of shape [b, n, n] containing the random walk probabilities
"""
n_nodes = transition_prob.shape[-1]
rw_matrix = jnp.eye(n_nodes) + (restart_p - 1) * transition_prob
return restart_p * jnp.linalg.inv(rw_matrix)
def svd_encodings(graph: jraph.GraphsTuple, rank: int) -> Tensor:
"""SVD encodings following Hussain et al., Global Self-Attention as
a Replacement for Graph Convolution, KDD 2022.
Args:
graph (jraph.GraphsTuple): to obtain the adjacency matrix.
rank (int): for low rank approximation.
Returns:
Tensor: positional encodings.
"""
batch, n_nodes = graph.nodes.shape[:2]
senders = graph.senders
receivers = graph.receivers
adj = jnp.zeros((batch, n_nodes, n_nodes), dtype=jnp.float32)
assign = jax.vmap(lambda a, s, r, d: a.at[s, r].add(d[s]))
adj = assign(adj, senders, receivers, jnp.ones_like(senders))
U, S, Vh = jax.lax.linalg.svd(adj)
V = jnp.conjugate(jnp.transpose(Vh, axes=(0, 2, 1)))
UV = jnp.stack((U, V), axis=-2)
S = S[..., :rank]
UV = UV[..., :rank]
UV = UV * jnp.sqrt(S)[:, None, None, :]
return UV.reshape(adj.shape[:-1] + (-1,))
# Necessary to work around numbas limitations with specifying axis in norm and
# braodcasting in parallel loops.
@numba.njit('float64[:, :](float64[:, :])', parallel=False)
def _norm_2d_along_first_dim_and_broadcast(array):
"""Equivalent to `linalg.norm(array, axis=0)[None, :] * ones_like(array)`."""
output = np.zeros(array.shape, dtype=array.dtype)
for i in numba.prange(array.shape[-1]):
output[:, i] = np.linalg.norm(array[:, i])
return output
# Necessary to work around numbas limitations with specifying axis in norm and
# braodcasting in parallel loops.
@numba.njit('float64[:, :](float64[:, :])', parallel=False)
def _max_2d_along_first_dim_and_broadcast(array):
"""Equivalent to `array.max(0)[None, :] * ones_like(array)`."""
output = np.zeros(array.shape, dtype=array.dtype)
for i in numba.prange(array.shape[-1]):
output[:, i] = array[:, i].max()
return output
@numba.njit([
'Tuple((float64[::1], complex128[:, :], complex128[:, ::1]))(int64[:], ' +
'int64[:], int64[:], int64, int64, int64, float64, b1, b1, b1, b1, b1)'
])
def eigv_magnetic_laplacian_numba(
senders: np.ndarray, receivers: np.ndarray, n_node: np.ndarray,
padded_nodes_size: int, k: int, k_excl: int, q: float, q_absolute: bool,
norm_comps_sep: bool, l2_norm: bool, sign_rotate: bool,
use_symmetric_norm: bool) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""k *complex* eigenvectors of the smallest k eigenvectors of the magnetic laplacian.
Args:
senders: Origin of the edges of shape [m].
receivers: Target of the edges of shape [m].
n_node: array shape [2]
padded_nodes_size: int the number of nodes including padding.
k: Returns top k eigenvectors.
k_excl: The top (trivial) eigenvalues / -vectors to exclude.
q: Factor in magnetic laplacian. Default 0.25.
q_absolute: If true `q` will be used, otherwise `q / m_imag / 2`.
norm_comps_sep: If true first imaginary part is separately normalized.
l2_norm: If true we use l2 normalization and otherwise the abs max value.
sign_rotate: If true we decide on the sign based on max real values and
rotate the imaginary part.
use_symmetric_norm: symmetric (True) or row normalization (False).
Returns:
array of shape [<= k] containing the k eigenvalues.
array of shape [n, <= k] containing the k eigenvectors.
array of shape [n, n] the laplacian.
"""
# Handle -1 padding
edges_padding_mask = senders >= 0
adj = np.zeros(int(padded_nodes_size * padded_nodes_size), dtype=np.float64)
linear_index = receivers + (senders * padded_nodes_size).astype(senders.dtype)
adj[linear_index] = edges_padding_mask.astype(adj.dtype)
adj = adj.reshape(padded_nodes_size, padded_nodes_size)
# TODO(simongeisler): maybe also allow weighted matrices etc.
adj = np.where(adj > 1, 1, adj)
symmetric_adj = adj + adj.T
symmetric_adj = np.where((adj != 0) & (adj.T != 0), symmetric_adj / 2,
symmetric_adj)
symmetric_deg = symmetric_adj.sum(-2)
if not q_absolute:
m_imag = (adj != adj.T).sum() / 2
m_imag = min(m_imag, n_node[0])
q = q / (m_imag if m_imag > 0 else 1)
theta = 1j * 2 * np.pi * q * (adj - adj.T)
if use_symmetric_norm:
inv_deg = np.zeros((padded_nodes_size, padded_nodes_size), dtype=np.float64)
np.fill_diagonal(
inv_deg, 1. / np.sqrt(np.where(symmetric_deg < 1, 1, symmetric_deg)))
eye = np.eye(padded_nodes_size)
inv_deg = inv_deg.astype(adj.dtype)
deg = inv_deg @ symmetric_adj.astype(adj.dtype) @ inv_deg
laplacian = eye - deg * np.exp(theta)
mask = np.arange(padded_nodes_size) < n_node[:1]
mask = np.expand_dims(mask, -1) & np.expand_dims(mask, 0)
laplacian = mask.astype(adj.dtype) * laplacian
else:
deg = np.zeros((padded_nodes_size, padded_nodes_size), dtype=np.float64)
np.fill_diagonal(deg, symmetric_deg)
laplacian = deg - symmetric_adj * np.exp(theta)
if q == 0:
laplacian_r = np.real(laplacian)
assert (laplacian_r == laplacian_r.T).all()
# Avoid rounding errors of any sort
eigenvalues, eigenvectors = np.linalg.eigh(laplacian_r)
eigenvalues = eigenvalues[..., k_excl:k_excl + k]
eigenvectors = eigenvectors[..., :, k_excl:k_excl + k]
return eigenvalues.real, eigenvectors.astype(np.complex128), laplacian
eigenvalues, eigenvectors = np.linalg.eigh(laplacian)
eigenvalues = eigenvalues[..., k_excl:k_excl + k]
eigenvectors = eigenvectors[..., k_excl:k_excl + k]
if sign_rotate:
sign = np.zeros((eigenvectors.shape[1],), dtype=eigenvectors.dtype)
for i in range(eigenvectors.shape[1]):
argmax_i = np.abs(eigenvectors[:, i].real).argmax()
sign[i] = np.sign(eigenvectors[argmax_i, i].real)
eigenvectors = np.expand_dims(sign, 0) * eigenvectors
argmax_imag_0 = eigenvectors[:, 0].imag.argmax()
rotation = np.angle(eigenvectors[argmax_imag_0:argmax_imag_0 + 1])
eigenvectors = eigenvectors * np.exp(-1j * rotation)
if norm_comps_sep:
# Only scale eigenvectors that seems to be more than numerical errors
eps = EPS / np.sqrt(eigenvectors.shape[0])
if l2_norm:
scale_real = _norm_2d_along_first_dim_and_broadcast(np.real(eigenvectors))
real = np.real(eigenvectors) / scale_real
else:
scale_real = _max_2d_along_first_dim_and_broadcast(
np.abs(np.real(eigenvectors)))
real = np.real(eigenvectors) / scale_real
scale_mask = np.abs(
np.real(eigenvectors)).sum(0) / eigenvectors.shape[0] > eps
eigenvectors[:, scale_mask] = (
real[:, scale_mask] + 1j * np.imag(eigenvectors)[:, scale_mask])
if l2_norm:
scale_imag = _norm_2d_along_first_dim_and_broadcast(np.imag(eigenvectors))
imag = np.imag(eigenvectors) / scale_imag
else:
scale_imag = _max_2d_along_first_dim_and_broadcast(
np.abs(np.imag(eigenvectors)))
imag = np.imag(eigenvectors) / scale_imag
scale_mask = np.abs(
np.imag(eigenvectors)).sum(0) / eigenvectors.shape[0] > eps
eigenvectors[:, scale_mask] = (
np.real(eigenvectors)[:, scale_mask] + 1j * imag[:, scale_mask])
elif not l2_norm:
scale = _max_2d_along_first_dim_and_broadcast(np.absolute(eigenvectors))
eigenvectors = eigenvectors / scale
return eigenvalues.real, eigenvectors, laplacian
_eigv_magnetic_laplacian_numba_parallel_signature = [
'Tuple((float64[:, :], complex128[:, :, :]))(int64[:, :], ' +
'int64[:, :], int64[:, :], int64, int64, int64, float64, b1, b1, b1, b1, b1)'
]
@numba.njit(_eigv_magnetic_laplacian_numba_parallel_signature, parallel=True)
def eigv_magnetic_laplacian_numba_parallel(
senders: np.ndarray,
receivers: np.ndarray,
n_node: np.ndarray,
batch_size: int,
k: int,
k_excl: int,
q: float,
q_absolute: bool,
norm_comps_sep: bool,
l2_norm: bool,
sign_rotate: bool,
use_symmetric_norm: bool,
# ) -> Tuple[List[np.ndarray], List[np.ndarray]]:
) -> Tuple[np.ndarray, np.ndarray]:
"""k *complex* eigenvectors of the smallest k eigenvectors of the magnetic laplacian.
Args:
senders: Origin of the edges of shape [b, m].
receivers: Target of the edges of shape [b, m].
n_node: array shape [b, 2]
batch_size: batch size b.
k: Returns top k eigenvectors.
k_excl: The top (trivial) eigenvalues / -vectors to exclude.
q: Factor in magnetic laplacian. Default 0.25.
q_absolute: If true `q` will be used, otherwise `q / m_imag / 2`.
norm_comps_sep: If true first imaginary part is separately normalized.
l2_norm: If true we use l2 normalization and otherwise the abs max value.
Will be treated as false if `norm_comps_sep` is true.
sign_rotate: If true we decide on the sign based on max real values and
rotate the imaginary part.
use_symmetric_norm: symmetric (True) or row normalization (False).
Returns:
list with arrays of shape [<= k] containing the k eigenvalues.
list with arrays of shape [n_i, <= k] containing the k eigenvectors.
"""
n = n_node.sum(-1).max()
eigenvalues = np.zeros((batch_size, k), dtype=np.float64)
eigenvectors = np.zeros((batch_size, n, k), dtype=np.complex128)
n_node_wo_padding = n_node[:, 0]
padding_maks = senders >= 0
for i in numba.prange(0, batch_size, 1):
eigenvalue, eigenvector, _ = eigv_magnetic_laplacian_numba(
senders[i][padding_maks[i]],
receivers[i][padding_maks[i]],
n_node[i],
padded_nodes_size=n_node_wo_padding[i],
k=k,
k_excl=k_excl,
q=q,
q_absolute=q_absolute,
norm_comps_sep=norm_comps_sep,
l2_norm=l2_norm,
sign_rotate=sign_rotate,
use_symmetric_norm=use_symmetric_norm)
eigenvalues[i, :eigenvalue.shape[0]] = eigenvalue
eigenvectors[i, :eigenvector.shape[0], :eigenvector.shape[1]] = eigenvector
return eigenvalues, eigenvectors
def eigv_magnetic_laplacian_numba_batch(
senders: np.ndarray,
receivers: np.ndarray,
n_node: np.ndarray,
k: int = 10,
k_excl: int = 1,
q: float = 0.25,
q_absolute: bool = True,
norm_comps_sep: bool = False,
l2_norm: bool = True,
sign_rotate: bool = False,
use_symmetric_norm: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""k *complex* eigenvectors of the smallest k eigenvectors of the magnetic laplacian.
Args:
senders: Origin of the edges of shape [m].
receivers: Target of the edges of shape [m].
n_node: array shape [b, 2]
k: Returns top k eigenvectors.
k_excl: The top (trivial) eigenvalues / -vectors to exclude.
q: Factor in magnetic laplacian. Default 0.25.
q_absolute: If true `q` will be used, otherwise `q / n_node`.
norm_comps_sep: If true real and imaginary part are separately normalized.
l2_norm: If true we use l2 normalization and otherwise the abs max value.
sign_rotate: If true we decide on the sign based on max real values and
rotate the imaginary part.
use_symmetric_norm: symmetric (True) or row normalization (False).
Returns:
array of shape [k] containing the k eigenvalues.
array of shape [n, k] containing the k eigenvectors.
"""
eigenvalues, eigenvectors = eigv_magnetic_laplacian_numba_parallel(
senders.astype(np.int64), receivers.astype(np.int64),
n_node.astype(np.int64), senders.shape[0], int(k), int(k_excl), float(q),
q_absolute, norm_comps_sep, l2_norm, sign_rotate, use_symmetric_norm)
return eigenvalues, eigenvectors
def sinusoid_position_encoding(
pos_seq: Tensor,
hidden_size: int,
max_timescale: float = 1e4,
min_timescale: float = 2.,
) -> Tensor:
"""Creates sinusoidal encodings.
Args:
pos_seq: Tensor with positional ids.
hidden_size: `int` dimension of the positional encoding vectors, D
max_timescale: `int` maximum timescale for the frequency
min_timescale: `int` minimum timescale for the frequency
Returns:
An array of shape [L, D]
"""
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale**(-freqs / hidden_size)
sinusoid_inp = jnp.einsum('bi,j->bij', pos_seq, inv_freq)
pos_emb = jnp.concatenate(
[jnp.sin(sinusoid_inp), jnp.cos(sinusoid_inp)], axis=-1)
return pos_emb
| digraph_transformer-main | utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mostly standard comnponents or adaptions to mimic PyTorch's behaviour."""
import dataclasses
from typing import Callable, Optional, Union
import warnings
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
Tensor = Union[np.ndarray, jnp.DeviceArray]
@dataclasses.dataclass
class CallArgs:
"""Common arguments to __call__ for most modules."""
# Whether this is training or inference.
is_training: bool
# Whether local stats are used for batch norm when is_training=False.
test_local_stats: bool = False
class ExponentialMovingAverage(hk.Module):
"""Maintains an exponential moving average.
This uses the Adam debiasing procedure.
See https://arxiv.org/pdf/1412.6980.pdf for details.
"""
def __init__(
self,
decay,
zero_debias: bool = True,
warmup_length: int = 0,
init_value: float = 0,
name: Optional[str] = None,
):
"""Initializes an ExponentialMovingAverage module.
Args:
decay: The chosen decay. Must in ``[0, 1)``. Values close to 1 result in
slow decay; values close to ``0`` result in fast decay.
zero_debias: Whether to run with zero-debiasing.
warmup_length: A positive integer, EMA has no effect until the internal
counter has reached `warmup_length` at which point the initial value for
the decaying average is initialized to the input value after
`warmup_length` iterations.
init_value: Value to warm start the moving average.
name: The name of the module.
"""
super().__init__(name=name)
self.decay = decay
self.warmup_length = warmup_length
self.zero_debias = zero_debias
self.init_value = init_value
self.init = hk.initializers.Constant(init_value)
if warmup_length < 0:
raise ValueError(
f'`warmup_length` is {warmup_length}, but should be non-negative.')
if warmup_length and zero_debias:
raise ValueError(
'Zero debiasing does not make sense when warming up the value of the '
'average to an initial value. Set zero_debias=False if setting '
'warmup_length to a non-zero value.')
if init_value != 0 and zero_debias:
raise ValueError(
'Do not set an inti value and zero_debias at the same time')
def initialize(self, shape, dtype=jnp.float32):
"""If uninitialized sets the average to ``zeros`` of the given shape/dtype.
"""
if hasattr(shape, 'shape'):
warnings.warn(
'Passing a value into initialize instead of a shape/dtype '
'is deprecated. Update your code to use: '
'`ema.initialize(v.shape, v.dtype)`.',
category=DeprecationWarning)
shape, dtype = shape.shape, shape.dtype
hk.get_state('hidden', shape, dtype, init=self.init)
hk.get_state('average', shape, dtype, init=self.init)
def __call__(
self,
value: jnp.ndarray,
update_stats: bool = True,
) -> jnp.ndarray:
"""Updates the EMA and returns the new value.
Args:
value: The array-like object for which you would like to perform an
exponential decay on.
update_stats: A Boolean, whether to update the internal state of this
object to reflect the input value. When `update_stats` is False the
internal stats will remain unchanged.
Returns:
The exponentially weighted average of the input value.
"""
if not isinstance(value, jnp.ndarray):
value = jnp.asarray(value)
counter = hk.get_state(
'counter', (),
jnp.int32,
init=hk.initializers.Constant(-self.warmup_length))
counter = counter + 1
decay = jax.lax.convert_element_type(self.decay, value.dtype)
if self.warmup_length > 0:
decay = jax.lax.select(counter <= 0, 0.0, decay)
one = jnp.ones([], value.dtype)
hidden = hk.get_state('hidden', value.shape, value.dtype, init=self.init)
hidden = hidden * decay + value * (one - decay)
average = hidden
if self.zero_debias:
average /= (one - jnp.power(decay, counter))
if update_stats:
hk.set_state('counter', counter)
hk.set_state('hidden', hidden)
hk.set_state('average', average)
return average
@property
def average(self):
return hk.get_state('average')
class LayerNorm(hk.LayerNorm):
"""Wrapper to allow for same interface as BatchNorm."""
def __init__(self, **kwargs):
kwargs.setdefault('create_scale', True)
kwargs.setdefault('create_offset', True)
super().__init__(**kwargs)
def __call__(self,
x: Tensor,
call_args: Optional[CallArgs] = None,
mask: Optional[Tensor] = None) -> Tensor:
return super().__call__(x)
class BatchNorm(hk.BatchNorm):
"""Makes a BatchNorm Module that can be called with CallArgs."""
def __init__(self,
create_scale=False,
create_offset=True,
decay_rate=0.999,
eps: float = 1e-3,
initialize_running_stats: bool = True,
name: Optional[str] = None,
**kwargs):
super().__init__(
create_scale=create_scale,
create_offset=create_offset,
decay_rate=decay_rate,
eps=eps,
name=name,
**kwargs)
if initialize_running_stats:
self.mean_ema = ExponentialMovingAverage(
decay_rate,
name='mean_ema',
zero_debias=False,
init_value=0.,
warmup_length=0)
self.var_ema = ExponentialMovingAverage(
decay_rate,
name='var_ema',
zero_debias=False,
init_value=1.,
warmup_length=0)
def __call__(self,
x: Tensor,
call_args: CallArgs,
mask: Optional[Tensor] = None):
return self.forward(
x,
is_training=call_args.is_training,
test_local_stats=call_args.test_local_stats,
mask=mask)
def forward(
self,
inputs: Tensor,
is_training: bool,
mask: Optional[Tensor] = None,
test_local_stats: bool = False,
scale: Optional[Tensor] = None,
offset: Optional[Tensor] = None,
) -> Tensor:
"""Computes the normalized version of the input with optional masking.
Args:
inputs: An array, where the data format is ``[..., C]``.
is_training: Whether this is during training.
mask: If provided, mask must broadcast to inputs where `false` elements
are masked out for calculating the running statistics.
test_local_stats: Whether local stats are used when is_training=False.
scale: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the scale applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_scale=True``.
offset: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the offset applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_offset=True``.
Returns:
The array, normalized across all but the last dimension.
"""
if self.create_scale and scale is not None:
raise ValueError(
'Cannot pass `scale` at call time if `create_scale=True`.')
if self.create_offset and offset is not None:
raise ValueError(
'Cannot pass `offset` at call time if `create_offset=True`.')
channel_index = self.channel_index
if channel_index < 0:
channel_index += inputs.ndim
if self.axis is not None:
axis = self.axis
else:
axis = [i for i in range(inputs.ndim) if i != channel_index]
if is_training or test_local_stats:
if mask is None:
mask = jnp.ones_like(inputs)
n_elements = jnp.sum(mask, axis, keepdims=True)
inputs *= mask
isum = jnp.sum(inputs, axis, keepdims=True)
isum_of_squares = jnp.sum(jnp.square(inputs), axis, keepdims=True)
if self.cross_replica_axis and jax.device_count() > 1:
isum = jax.lax.psum(
isum,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups)
isum_of_squares = jax.lax.psum(
isum_of_squares,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups)
n_elements = jax.lax.psum(
n_elements,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups)
mean = isum / n_elements
mean_of_squares = isum_of_squares / n_elements
var = mean_of_squares - jnp.square(mean)
else:
mean = self.mean_ema.average.astype(inputs.dtype)
var = self.var_ema.average.astype(inputs.dtype)
if is_training:
self.mean_ema(mean)
self.var_ema(var)
w_shape = [1 if i in axis else inputs.shape[i] for i in range(inputs.ndim)]
w_dtype = inputs.dtype
if self.create_scale:
scale = hk.get_parameter('scale', w_shape, w_dtype, self.scale_init)
elif scale is None:
scale = np.ones([], dtype=w_dtype)
if self.create_offset:
offset = hk.get_parameter('offset', w_shape, w_dtype, self.offset_init)
elif offset is None:
offset = np.zeros([], dtype=w_dtype)
eps = jax.lax.convert_element_type(self.eps, var.dtype)
inv = jax.lax.rsqrt(var + eps)
scaled = scale * (inputs - mean) * inv + offset
# It is technically not required to enforce zeros in the output
scaled *= mask
return scaled
UpdateFn = Callable[[jraph.NodeFeatures], jraph.NodeFeatures]
class GraphConvolution(hk.Module):
"""Returns a method that applies a Graph Convolution layer.
This implementation also allows for edge features like the OGB sample code.
Graph Convolutional layer as in https://arxiv.org/abs/1609.02907,
NOTE: This implementation does not add an activation after aggregation.
If you are stacking layers, you may want to add an activation between
each layer.
Attributes:
update_node_fn: function used to update the nodes. In the paper a single
layer MLP is used.
update_edge_fn: function used to aggregates the edge features.
aggregate_nodes_fn: function used to aggregates the sender nodes.
activation: to be applied. Default is relu.
add_self_edges: whether to add self edges to nodes in the graph as in the
paper definition of GCN. The number of graph.edges must match in either
case Defaults to False.
bidirectional: if True also messages in opposite edge direction are passed
Returns:
A method that applies a Graph Convolution layer.
"""
def __init__(
self,
forw_update_node_fn: UpdateFn,
forw_update_edge_fn: UpdateFn,
backw_update_node_fn: Optional[UpdateFn] = None,
backw_update_edge_fn: Optional[UpdateFn] = None,
aggregate_nodes_fn: jraph.AggregateEdgesToNodesFn = jraph.segment_sum,
activation: Callable[[Tensor], Tensor] = jax.nn.relu,
add_self_edges: bool = False,
name: Optional[str] = None):
super().__init__(name)
self.forw_update_node_fn = forw_update_node_fn
self.forw_update_edge_fn = forw_update_edge_fn
self.backw_update_node_fn = backw_update_node_fn
self.backw_update_edge_fn = backw_update_edge_fn
self.aggregate_nodes_fn = aggregate_nodes_fn
self.activation = activation
self.add_self_edges = add_self_edges
def __call__(self, graph: jraph.GraphsTuple):
"""Applies a Graph Convolution layer."""
orig_nodes, orig_edges, receivers, senders, _, _, _ = graph
# Equivalent to jnp.sum(n_node), but jittable
total_num_nodes = jax.tree_util.tree_leaves(orig_nodes)[0].shape[0]
if self.add_self_edges:
# We add self edges to the senders and receivers so that each node
# includes itself in aggregation.
# In principle, a `GraphsTuple` should partition by n_edge, but in
# this case it is not required since a GCN is agnostic to whether
# the `GraphsTuple` is a batch of graphs or a single large graph.
conv_receivers = jnp.concatenate((receivers, jnp.arange(total_num_nodes)),
axis=0)
conv_senders = jnp.concatenate((senders, jnp.arange(total_num_nodes)),
axis=0)
else:
conv_senders = senders
conv_receivers = receivers
# First pass nodes through the node updater.
transf_nodes = self.forw_update_node_fn(orig_nodes)
edges = self.forw_update_edge_fn(orig_edges)
# Calculate the normalization values.
def count_edges(x): return jraph.segment_sum( # pylint: disable=g-long-lambda
jnp.ones_like(conv_senders), x, total_num_nodes)
sender_degree = count_edges(conv_senders) + 1.
receiver_degree = count_edges(conv_receivers) + 1.
norm = (jax.lax.rsqrt(sender_degree)[conv_senders] *
jax.lax.rsqrt(receiver_degree)[conv_receivers])[:, None]
# Aggregate the pre normalized nodes.
nodes = self.aggregate_nodes_fn(
norm * self.activation(transf_nodes[conv_senders] + edges),
conv_receivers, total_num_nodes)
if self.backw_update_node_fn and self.backw_update_edge_fn:
backw_nodes = self.backw_update_node_fn(orig_nodes)
edges = self.backw_update_edge_fn(orig_edges)
backw_nodes = self.aggregate_nodes_fn(
norm * self.activation(transf_nodes[conv_receivers] + edges),
conv_senders, total_num_nodes)
nodes += backw_nodes
root_emb = hk.get_parameter(
'root_emb',
shape=[1, transf_nodes.shape[-1]],
dtype=jnp.float32,
init=hk.initializers.RandomNormal()).astype(transf_nodes.dtype)
nodes += self.activation(transf_nodes + root_emb) / \
receiver_degree[:, None]
# pylint: enable=g-long-lambda
return graph._replace(nodes=self.activation(nodes))
class MLP(hk.Module):
"""A simple MLP implementation."""
def __init__(self,
dim: int,
activation=jax.nn.relu,
n_layers: int = 2,
with_norm: bool = True,
final_activation: bool = True,
name: Optional[str] = None):
super().__init__(name=name)
self.dim = dim
self.activation = activation
self.n_layers = n_layers
self.with_norm = with_norm
self.final_activation = final_activation
def __call__(self, x: Tensor) -> Tensor:
return mlp(
x,
dim=self.dim,
activation=self.activation,
n_layers=self.n_layers,
with_norm=self.with_norm,
final_activation=self.final_activation)
def mlp(x: Tensor,
dim: int,
activation=jax.nn.relu,
n_layers: int = 2,
with_norm: bool = True,
final_activation: bool = True,
name: Optional[str] = None):
"""Simple MLP layer with LayerNorm.
Args:
x: tensor of shape [b, *].
dim: hidden and output dimensions, D.
activation: a non-linearity. Default jax.nn.relu.
n_layers: `int` number of layers. Default 2.
with_norm: `bool` include LayerNorm. Default True.
final_activation: `bool` include activation as last layer. Default True.
name: name of the Sequential/MLP module.
Returns:
A tensor of shape [b, D]
"""
layers = []
for idx in range(n_layers):
layers.append(hk.Linear(dim, name=f'{name}_linear{idx}' if name else None))
if with_norm:
norm = LayerNorm(
axis=-1, name=f'{name}_layer_norm{idx}' if name else None)
layers.append(norm)
layers.append(activation)
if not final_activation:
layers = layers[:-1]
return hk.Sequential(layers, name=name)(x)
class MultiHeadAttention(hk.Module):
"""Multi-headed attention (MHA) module.
This module extends the haiku implementation by optional biases in the
linear transformations and dropout_p on the attention matrix.
Rough sketch:
- Compute keys (K), queries (Q), and values (V) as projections of inputs.
- Attention weights are computed as W = softmax(QK^T / sqrt(key_size)).
- Output is another projection of WV^T.
For more detail, see the original Transformer paper:
"Attention is all you need" https://arxiv.org/abs/1706.03762.
Glossary of shapes:
- T: Sequence length.
- D: Vector (embedding) size.
- H: Number of attention heads.
"""
def __init__(
self,
num_heads: int,
key_size: int,
w_init: Optional[hk.initializers.Initializer] = None,
value_size: Optional[int] = None,
model_size: Optional[int] = None,
dropout_p: float = 0.2,
with_bias: bool = False,
re_im_separate_projection: bool = False,
name: Optional[str] = None,
):
"""Initialises the module.
Args:
num_heads: Number of independent attention heads (H).
key_size: The size of keys (K) and queries used for attention.
w_init: Initialiser for weights in the linear map.
value_size: Optional size of the value projection (V). If None, defaults
to the key size (K).
model_size: Optional size of the output embedding (D'). If None, defaults
to the key size multiplied by the number of heads (K * H).
dropout_p: dropout_p after softmax of attention matrix.
with_bias: if false (default), the linear projects will not have a bias.
re_im_separate_projection: if true real and imaginary components are
projected without weight sharing.
name: Optional name for this module.
"""
super().__init__(name=name)
self.num_heads = num_heads
self.key_size = key_size
self.value_size = value_size or key_size
self.model_size = model_size or key_size * num_heads
self.dropout_p = dropout_p
self.with_bias = with_bias
self.re_im_separate_projection = re_im_separate_projection
self.w_init = w_init
def __call__(
self,
query: Tensor,
key: Tensor,
value: Tensor,
is_training: bool,
logit_offset: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""Computes (optionally masked) MHA with queries, keys & values.
This module broadcasts over zero or more 'batch-like' leading dimensions.
Args:
query: Embeddings sequence used to compute queries; shape [..., T', D_q].
key: Embeddings sequence used to compute keys; shape [..., T, D_k].
value: Embeddings sequence used to compute values; shape [..., T, D_v].
is_training: if True (not the default), dropout will not be applied. # #
logit_offset: Optional offset/bias that is applied right before applying
the softmax and before the mask for the attention scores (broadcast to
[..., T', T, D_o]). A head specific linear transformation is applied.
mask: Optional mask applied to attention weights; shape [..., H=1, T', T]
or [..., T', T].
Returns:
A new sequence of embeddings, consisting of a projection of the
attention-weighted value projections; shape [..., T', D'].
"""
# In shape hints below, we suppress the leading dims [...] for brevity.
# Hence e.g. [A, B] should be read in every case as [..., A, B].
*leading_dims, sequence_length, _ = query.shape
projection = self._linear_projection
# Compute key/query/values (overload K/Q/V to denote the respective sizes).
query_heads = projection(query, self.key_size, 'query') # [T', H, Q=K]
key_heads = projection(key, self.key_size, 'key') # [T, H, K]
value_heads = projection(value, self.value_size, 'value') # [T, H, V]
# Compute attention weights.
attn_logits = jnp.einsum('...thd,...Thd->...htT', query_heads, key_heads)
attn_logits = jnp.real(attn_logits) # In case the logits are complex
attn_logits = attn_logits / jnp.sqrt(self.key_size).astype(value.dtype)
# E.g. to apply relative positional encodings or add edge bias
if logit_offset is not None:
logit_offset = hk.Linear(self.num_heads)(logit_offset)
new_order = list(range(logit_offset.ndim - 3)) + [
logit_offset.ndim - 1, logit_offset.ndim - 3, logit_offset.ndim - 2
]
logit_offset = logit_offset.transpose(*new_order)
attn_logits = attn_logits + logit_offset
if mask is not None:
if mask.ndim == attn_logits.ndim - 1:
mask = mask[..., None, :, :]
elif mask.ndim != attn_logits.ndim:
raise ValueError(
f'Mask dimensionality {mask.ndim} must match logits dimensionality '
f'{attn_logits.ndim}.')
attn_logits = jnp.where(mask, attn_logits, -1e30)
attn_weights = jax.nn.softmax(attn_logits) # [H, T', T]
if is_training and self.dropout_p > 0:
attn_weights = hk.dropout(
hk.next_rng_key(), self.dropout_p, attn_weights)
# Weight the values by the attention and flatten the head vectors.
attn = jnp.einsum('...htT,...Thd->...thd', attn_weights, value_heads)
attn = jnp.reshape(attn, (*leading_dims, sequence_length, -1)) # [T', H*V]
# Apply another projection to get the final embeddings.
final_projection = hk.Linear(self.model_size, w_init=self.w_init)
return final_projection(attn) # [T', D']
@hk.transparent
def _linear_projection(
self,
x: Tensor,
head_size: int,
name: Optional[str] = None,
) -> Tensor:
lin = hk.Linear(
self.num_heads * head_size,
w_init=self.w_init,
name=name,
with_bias=self.with_bias)
if jnp.iscomplexobj(x):
if self.re_im_separate_projection:
y_re = lin(jnp.real(x))
lin_im = hk.Linear(
self.num_heads * head_size,
w_init=self.w_init,
name=name,
with_bias=self.with_bias)
y_im = lin_im(jnp.imag(x))
else:
y_re = lin(jnp.real(x))
y_im = lin(jnp.imag(x))
y = y_re + 1j * y_im
else:
y = lin(x)
*leading_dims, _ = x.shape
return y.reshape((*leading_dims, self.num_heads, head_size))
| digraph_transformer-main | layers.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses python *function* into an AST with data and control flow edges.
This file is intentionally left to be as close as possible to `python-graphs`.
"""
from typing import Any, Dict, List, Set, Tuple, Union
import uuid
from absl import logging
import astunparse
import gast as ast
import numpy as np
from python_graphs import control_flow
from python_graphs import data_flow
from python_graphs import instruction as instruction_module
from python_graphs import program_graph as pg
from python_graphs import program_graph_dataclasses as pb
from six.moves import builtins
NEWLINE_TOKEN = '#NEWLINE#'
UNINDENT_TOKEN = '#UNINDENT#'
INDENT_TOKEN = '#INDENT#'
# Using the same names as OGBN-Code2
MASK_TOKEN = '_mask_'
NONE_TOKEN = '__NONE__'
UNKNOWN_TOKEN = '__UNK__'
# Aligned with OGB. Perhaps not all needed, but we keep them to avoid edge cases
FEATURE_FIELD_ORDER = ('name', 'arg', 'value', 's', 'n', 'id', 'attr')
# Some operations with commutative properties
# Exclude And as well as Or if you want to detect exceptions (e.g. and does
# not resolve subsequent argument if resolves to false)
COMMUTATIVE_OPERATORS = ('And', 'Or', 'Add', 'Mult', 'BitOr', 'BitXor',
'BitAnd')
COMMUTATIVE_EDGE_TYPES = ('body', 'finalbody', 'orelse')
FUNC_DEf = (ast.FunctionDef, ast.AsyncFunctionDef)
def py2ogbgraph(program: str,
attr2idx: Dict[str, int],
type2idx: Dict[str, int],
mask_function_name: bool = True,
max_characters_token: int = 100) -> Tuple[Dict[str, Any], str]:
"""The main function that converts a function into a graph.
This is done similarly to the OGB Code2 dataset with the notable exception
that nodes are not connected sequentially. Specifically, we construct the
graph in a data-centric manner to achieve certain properties.
Args:
program: The function as string.
attr2idx: maps attribute values to ids (OGB default).
type2idx: maps attribute types to ids (OGB default).
mask_function_name: If True, we mask out the function name (including
recursive calls).
max_characters_token: Limit on the maximum number of characters for values.
Returns:
A dictionary that (Imostly) contains np.ndarrays with the values required
to construct the graph. This data structure is intentionally chosen to be
as close as possible to the OGB data.
"""
program_node = ast.parse(program, mode='exec')
graph = get_program_graph(program_node)
ast_nodes = list(ast.walk(program_node))
assert all([not edge.has_back_edge for edge in graph.edges])
assert len(ast_nodes) == len(graph.nodes)
# Mask out target function name
if mask_function_name:
function_name = ast_nodes[1].name
assert isinstance(ast_nodes[1], ast.FunctionDef) or isinstance(
ast_nodes[1], ast.AsyncFunctionDef), (
'To mask method name, 1st node in AST must be of type FunctionDef')
node = graph.get_node_by_ast_node(ast_nodes[1])
assert hasattr(node, 'fields')
assert 'name' in node.fields
assert node.fields['name'] == function_name
ast_nodes[1].name = node.fields['name'] = MASK_TOKEN
# Check for recursive calls
for ast_node in ast_nodes:
if isinstance(ast_node, ast.Call) and isinstance(ast_node.func, ast.Name):
func_defs = list(graph.get_nodes_by_function_name(MASK_TOKEN))
if not func_defs:
continue
ast_node.func.id = MASK_TOKEN
for child in graph.children(graph.get_node_by_ast_node(ast_node)):
if isinstance(child, ast.Name) and child.ast_node.id == function_name:
child.ast_node.id = MASK_TOKEN
if 'name' in child.fields and child.fields['name'] == function_name:
child.fields['name'] = MASK_TOKEN
ogb_walker = OGB_ASTWalker()
ogb_walker.visit(program_node)
graph2dataset_id = dict()
dataset2graph_id = dict()
for id_dataset, attributed_node in enumerate(ogb_walker.nodes):
id_graph = graph.get_node_by_ast_node(attributed_node['node'])
assert id_graph not in graph2dataset_id, f'Found id_graph={id_graph} twice'
assert id_graph not in dataset2graph_id, f'Found id_dataset={id_dataset} twice'
graph2dataset_id[id_graph.id] = id_dataset
dataset2graph_id[id_dataset] = id_graph.id
edge_index = []
edges_deduplicated = list({(edge.id1, edge.id2, edge.field_name,
edge.type.value) for edge in graph.edges})
for id1, id2, _, _ in edges_deduplicated:
edge_index.append((graph2dataset_id[id1], graph2dataset_id[id2]))
edge_index = np.array(edge_index).transpose()
edge_coalesced_order = np.lexsort(np.flip(edge_index, axis=0))
edge_index = edge_index[:, edge_coalesced_order]
edge_type = []
# Similarly to the labels, the encodings need to be handled at runtime
edge_name = []
edge_order = []
for edge_idx in edge_coalesced_order:
id1, id2, field_name, type_ = edges_deduplicated[edge_idx]
order = 0
if field_name is None:
field_name = NONE_TOKEN
elif ':' in field_name:
splitted_name = field_name.split(':')
field_name = ':'.join(splitted_name[:-1])
if field_name not in COMMUTATIVE_EDGE_TYPES:
order = int(splitted_name[-1])
elif field_name == 'left':
field_name = 'inputs'
elif field_name == 'right':
field_name = 'inputs'
if not any([
c for c in graph.children(graph.get_node_by_id(id1))
if c.ast_type in COMMUTATIVE_OPERATORS
]):
order = 1
edge_type.append(type_)
edge_name.append(field_name.encode('utf-8'))
edge_order.append(order)
node_feat_raw = []
node_feat = []
dfs_order = []
depth = []
attributed = []
for idx, attributed_node in enumerate(ogb_walker.nodes):
ast_node = attributed_node['node']
node_type = attributed_node['type']
fields = graph.get_node_by_ast_node(ast_node).fields
for field in FEATURE_FIELD_ORDER:
if field in fields:
attribute = fields[field]
break
else:
if fields.values():
attribute = list(fields.values())[0]
else:
attribute = None
is_attributed = attribute is not None
if is_attributed:
node_attr = str(attribute)[:max_characters_token]
node_attr = node_attr.replace('\n', '').replace('\r', '')
else:
node_attr = NONE_TOKEN
node_feat_raw.append(
(str(node_type).encode('utf-8'), str(node_attr).encode('utf-8')))
node_feat.append((type2idx.get(node_type,
len(type2idx) - 1),
attr2idx.get(node_attr,
len(attr2idx) - 1)))
dfs_order.append(idx)
depth.append(attributed_node['depth'])
attributed.append(is_attributed)
data = dict()
# Nodes
data['node_feat_raw'] = np.array(node_feat_raw, dtype='object')
data['node_feat'] = np.array(node_feat, dtype=np.int64)
data['node_dfs_order'] = np.array(dfs_order, dtype=np.int64).reshape(-1, 1)
data['node_depth'] = np.array(depth, dtype=np.int64).reshape(-1, 1)
data['node_is_attributed'] = np.array(
attributed, dtype=np.int64).reshape(-1, 1)
# Edges
data['edge_index'] = edge_index
data['edge_type'] = np.array(edge_type, dtype=np.int64).reshape(-1, 1)
data['edge_name'] = np.array(edge_name, dtype='object').reshape(-1, 1)
data['edge_order'] = np.array(edge_order, dtype=np.int64).reshape(-1, 1)
# Sizes
data['num_nodes'] = len(data['node_feat'])
data['num_edges'] = len(data['edge_index'][0])
return data, function_name
class OGB_ASTWalker(ast.NodeVisitor): # pylint: disable=invalid-name
"""Minimal version of the Open Graph Benchmark ASTWalker."""
def __init__(self):
self.node_id = 0
self.stack = []
self.nodes = []
def generic_visit(self, node: ast.AST):
# encapsulate all node features in a dict
self.nodes.append({
'type': type(node).__name__,
'node': node,
'depth': len(self.stack)
})
# DFS traversal logic
self.stack.append(node)
super().generic_visit(node)
self.stack.pop()
def get_program_graph(program: Union[str, ast.AST]):
"""Constructs a program graph to represent the given program."""
if isinstance(program, ast.AST):
program_node = program
else:
program_node = ast.parse(program, mode='exec')
program_graph = pg.ProgramGraph()
# Perform control flow analysis.
control_flow_graph = control_flow.get_control_flow_graph(program_node)
# Add AST_NODE program graph nodes corresponding to Instructions in the
# control flow graph.
for control_flow_node in control_flow_graph.get_control_flow_nodes():
program_graph.add_node_from_instruction(control_flow_node.instruction)
# Add AST_NODE program graph nodes corresponding to AST nodes.
for ast_node in ast.walk(program_node):
if not program_graph.contains_ast_node(ast_node):
pg_node = pg.make_node_from_ast_node(ast_node)
program_graph.add_node(pg_node)
root = program_graph.get_node_by_ast_node(program_node)
program_graph.root_id = root.id
# Add AST edges (FIELD). Also add AST_LIST and AST_VALUE program graph nodes.
for ast_node in ast.walk(program_node):
node = program_graph.get_node_by_ast_node(ast_node)
setattr(node, 'fields', {})
for field_name, value in list(ast.iter_fields(ast_node)):
if isinstance(value, list):
last_item = None
for index, item in enumerate(value):
list_field_name = make_list_field_name(field_name, index)
if isinstance(item, ast.AST):
if last_item is not None:
assert isinstance(item, ast.AST)
program_graph.add_new_edge(ast_node, item, pb.EdgeType.FIELD,
list_field_name)
else:
if last_item is not None:
assert not isinstance(item, ast.AST)
node.fields[list_field_name] = item
last_item = item
elif isinstance(value, ast.AST):
program_graph.add_new_edge(ast_node, value, pb.EdgeType.FIELD,
field_name)
else:
node.fields[field_name] = value
# Perform data flow analysis.
analysis = data_flow.LastAccessAnalysis()
for node in control_flow_graph.get_enter_control_flow_nodes():
analysis.visit(node)
# Add control flow edges (NEXT_SYNTAX) - as originally done by python graphs
# for CFG_NEXT.
for control_flow_node in control_flow_graph.get_control_flow_nodes():
instruction = control_flow_node.instruction
for next_control_flow_node in control_flow_node.next:
next_instruction = next_control_flow_node.instruction
program_graph.add_new_edge(
instruction.node,
next_instruction.node,
edge_type=pb.EdgeType.NEXT_SYNTAX)
# edge_type=pb.EdgeType.CFG_NEXT)
# Add data flow edges (LAST_READ and LAST_WRITE).
for control_flow_node in control_flow_graph.get_control_flow_nodes():
# Start with the most recent accesses before this instruction.
last_accesses = control_flow_node.get_label('last_access_in').copy()
for access in control_flow_node.instruction.accesses:
# Extract the node and identifiers for the current access.
pg_node = program_graph.get_node_by_access(access)
access_name = instruction_module.access_name(access)
write_identifier = instruction_module.access_identifier(
access_name, 'write')
for write in last_accesses.get(write_identifier, []):
write_pg_node = program_graph.get_node_by_access(write)
program_graph.add_new_edge(
write_pg_node, pg_node, edge_type=pb.EdgeType.LAST_WRITE)
# Update the state to refer to this access as the most recent one.
if instruction_module.access_is_write(access):
last_accesses[write_identifier] = [access]
# Add COMPUTED_FROM edges.
for node in ast.walk(program_node):
if isinstance(node, ast.Assign):
for value_node in ast.walk(node.value):
if isinstance(value_node, ast.Name):
# TODO(dbieber): If possible, improve precision of these edges.
for target in node.targets:
program_graph.add_new_edge(
value_node, target, edge_type=pb.EdgeType.COMPUTED_FROM)
# Add CALLS, FORMAL_ARG_NAME and RETURNS_TO edges.
for node in ast.walk(program_node):
if isinstance(node, ast.Call):
if isinstance(node.func, ast.Name):
# TODO(dbieber): Use data flow analysis instead of all function defs.
func_defs = list(program_graph.get_nodes_by_function_name(node.func.id))
# For any possible last writes that are a function definition, add the
# formal_arg_name and returns_to edges.
if not func_defs:
# TODO(dbieber): Add support for additional classes of functions,
# such as attributes of known objects and builtins.
if node.func.id in dir(builtins):
message = 'Function is builtin.'
else:
message = 'Cannot statically determine the function being called.'
logging.debug('%s (%s)', message, node.func.id)
for func_def in func_defs:
fn_node = func_def.node
# Add calls edge from the call node to the function definition.
program_graph.add_new_edge(node, fn_node, edge_type=pb.EdgeType.CALLS)
# Add returns_to edges from the function's return statements to the
# call node.
for inner_node in ast.walk(func_def.node):
# TODO(dbieber): Determine if the returns_to should instead go to
# the next instruction after the Call node instead.
if isinstance(inner_node, ast.Return):
program_graph.add_new_edge(
inner_node, node, edge_type=pb.EdgeType.RETURNS_TO)
# Add formal_arg_name edges from the args of the Call node to the
# args in the FunctionDef.
for index, arg in enumerate(node.args):
formal_arg = None
if index < len(fn_node.args.args):
formal_arg = fn_node.args.args[index]
elif fn_node.args.vararg:
# Since args.vararg is a string, we use the arguments node.
# TODO(dbieber): Use a node specifically for the vararg.
formal_arg = fn_node.args
if formal_arg is not None:
# Note: formal_arg can be an AST node or a string.
program_graph.add_new_edge(
arg, formal_arg, edge_type=pb.EdgeType.FORMAL_ARG_NAME)
else:
# TODO(dbieber): If formal_arg is None, then remove all
# formal_arg_name edges for this FunctionDef.
logging.debug('formal_arg is None')
for keyword in node.keywords:
name = keyword.arg
formal_arg = None
for arg in fn_node.args.args:
if isinstance(arg, ast.Name) and arg.id == name:
formal_arg = arg
break
else:
if fn_node.args.kwarg:
# Since args.kwarg is a string, we use the arguments node.
# TODO(dbieber): Use a node specifically for the kwarg.
formal_arg = fn_node.args
if formal_arg is not None:
program_graph.add_new_edge(
keyword.value,
formal_arg,
edge_type=pb.EdgeType.FORMAL_ARG_NAME)
else:
# TODO(dbieber): If formal_arg is None, then remove all
# formal_arg_name edges for this FunctionDef.
logging.debug('formal_arg is None')
else:
# TODO(dbieber): Add a special case for Attributes.
logging.debug(
'Cannot statically determine the function being called. (%s)',
astunparse.unparse(node.func).strip())
refined_cf_visitor = ControlFlowVisitor(program_graph, control_flow_graph)
refined_cf_visitor.run(root)
for control_flow_node in refined_cf_visitor.graph.get_control_flow_nodes():
instruction = control_flow_node.instruction
for next_control_flow_node in control_flow_node.next:
next_instruction = next_control_flow_node.instruction
program_graph.add_new_edge(
instruction.node,
next_instruction.node,
edge_type=pb.EdgeType.CFG_NEXT)
return program_graph
class CustomControlFlowWalker(ast.NodeVisitor):
"""This additional control flow walker, analyzes the possible orders in which the instructions can be executed.
"""
def __init__(self, program_graph: pg.ProgramGraph,
cfg_graph: control_flow.ControlFlowGraph):
self.program_graph = program_graph
self.cfg_graph = cfg_graph
self.children_order: Dict[int, List[int]] = dict()
self.node_with_id_visited = set()
def return_exceptional_cf_node(self,
node: pg.ProgramGraphNode) -> Set[ast.AST]:
if (type(node.ast_node) in [
ast.Assert, ast.Break, ast.Raise, ast.Continue, ast.Return, ast.Yield
]):
return {node.ast_node}
else:
return set()
def prune_exceptional_cf_nodes(
self, node: pg.ProgramGraphNode,
exceptional_nodes: Set[ast.AST]) -> Set[ast.AST]:
if type(node.ast_node) in [ast.For, ast.While]:
return exceptional_nodes - {ast.Break, ast.Continue}
elif isinstance(node.ast_node, ast.If):
return exceptional_nodes - {ast.Assert, ast.Raise}
else:
return exceptional_nodes
def generic_visit(self, node) -> Tuple[Set[int], Set[int], Set[int]]:
neighbors = self.program_graph.neighbors_map[node.id]
children = []
for edge, _ in neighbors:
# Look at outgoing ast edges for the children
if edge.id1 != node.id or edge.type != pb.EdgeType.FIELD:
continue
child = self.program_graph.get_node_by_id(edge.id2)
descendants, depends_on, exceptional_nodes = super().visit(child)
children.append(
(edge.id2, edge, descendants, depends_on, exceptional_nodes))
node_depends_on = set()
for edge, _ in neighbors:
# Look at incoming data flow dependencies
if (edge.id2 == node.id and
edge.type in [pb.EdgeType.LAST_WRITE, pb.EdgeType.COMPUTED_FROM] and
# Only allow edge dependence in sequential order
edge.id1 in self.node_with_id_visited):
node_depends_on.add(edge.id1)
self.node_with_id_visited.add(node.id)
if not children:
return {node.id}, node_depends_on, self.return_exceptional_cf_node(node)
blocks = {
field:
[child for child in children if child[1].field_name.startswith(field)]
for field in COMMUTATIVE_EDGE_TYPES
}
self.children_order[node.id] = {}
for field, block in blocks.items():
current_parent = -1
predecessor_list = list()
successor_list = list()
if block:
entry_node_id = block[0][0]
else:
entry_node_id = None
# Required to determine if exceptional node overrules dataflow
children_order = []
exceptional_node_order = 0
for idx, (_, _, descendants, depends_on,
exceptional_nodes) in enumerate(block):
children_order.append(exceptional_node_order)
predecessor_list.append(set())
successor_list.append(set())
if not depends_on and not exceptional_nodes:
if current_parent >= 0:
predecessor_list[idx].add(current_parent)
successor_list[current_parent].add(idx)
continue
for previous_child_idx in reversed(range(idx)):
if children_order[previous_child_idx] < exceptional_node_order:
break
for dependence in depends_on:
if dependence in block[previous_child_idx][2]:
predecessor_list[idx].add(previous_child_idx)
successor_list[previous_child_idx].add(idx)
children_order[idx] = max(children_order[-1],
children_order[previous_child_idx] + 1)
if exceptional_nodes:
current_parent = idx
for previous_child_idx in range(idx):
if not successor_list[previous_child_idx]:
successor_list[previous_child_idx].add(idx)
predecessor_list[idx].add(previous_child_idx)
exceptional_node_order = max(children_order) + 1
children_order[idx] = exceptional_node_order
elif not predecessor_list[idx] and current_parent >= 0:
predecessor_list[idx].add(current_parent)
successor_list[current_parent].add(idx)
self.children_order[node.id][field] = [
entry_node_id, predecessor_list, successor_list
]
agg_descendants = set().union(
*[descendants for _, _, descendants, _, _ in children])
agg_descendants |= {node.id}
agg_depends_on = set().union(
*[depends_on for _, _, _, depends_on, _ in children])
agg_depends_on |= node_depends_on
agg_depends_on -= agg_descendants
agg_exceptional_nodes = set().union(
*[exceptional_nodes for _, _, _, _, exceptional_nodes in children])
agg_exceptional_nodes |= self.return_exceptional_cf_node(node)
agg_exceptional_nodes = self.prune_exceptional_cf_nodes(
node, agg_exceptional_nodes)
return agg_descendants, agg_depends_on, agg_exceptional_nodes
class ControlFlowVisitor(control_flow.ControlFlowVisitor):
"""Here we overwrite the way how `bodies` in the AST are ordered based on the insights in which order instructions can be executed.
"""
def __init__(self, program_graph: pg.ProgramGraph,
cfg_graph: control_flow.ControlFlowGraph):
super().__init__()
self.program_graph = program_graph
self.refined_cf_walker = CustomControlFlowWalker(program_graph, cfg_graph)
def run(self, root: pg.ProgramGraphNode):
self.refined_cf_walker.visit(root)
start_block = self.graph.start_block
end_block = self.visit(root.ast_node, start_block)
exit_block = self.new_block(
node=root.ast_node, label='<exit>', prunable=False)
end_block.add_exit(exit_block)
self.graph.compact()
def visit_list(self, items, current_block):
"""Visit each of the items in a list from the AST."""
if len(items) < 2:
for item in items:
current_block = self.visit(item, current_block)
return current_block
parent = self.program_graph.parent(
self.program_graph.get_node_by_ast_node(items[0]))
children_order = self.refined_cf_walker.children_order[parent.id]
children_order = [
(field, co)
for field, co in children_order.items()
if co[0] == self.program_graph.get_node_by_ast_node(items[0]).id
][0]
field, (_, predecessor_list, successor_list) = children_order
assert not predecessor_list[0], 'First entry cannot have predecessor'
assert not successor_list[-1], 'Last entry cannot have successor'
entry_block = current_block
item_idx_to_block = []
for item_idx, predecessors in enumerate(predecessor_list):
item = items[item_idx]
current_block = self.new_block(
node=item if predecessors else entry_block.node, # For consistency
label=f'field_{item_idx}')
if not predecessors:
entry_block.add_exit(current_block)
else:
for pred_idx in predecessors:
item_idx_to_block[pred_idx].add_exit(current_block)
current_block = self.visit(item, current_block)
item_idx_to_block.append(current_block)
after_block = self.new_block(node=entry_block.node, label='after_block')
for item_idx, successor in enumerate(successor_list):
if not successor:
item_idx_to_block[item_idx].add_exit(after_block)
return after_block
def make_list_field_name(field_name, index):
return '{}:{}'.format(field_name, index)
def parse_list_field_name(list_field_name):
field_name, index = list_field_name.split(':')
index = int(index)
return field_name, index
def unique_id():
"""Returns a unique id that is suitable for identifying graph nodes."""
return uuid.uuid4().int & ((1 << 64) - 1)
| digraph_transformer-main | dataflow_parser.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.